SuccessConsole Output

Skipping 1,797 KB.. Full Log
 latency-svc-32z7p [223.004468ms]
Feb  7 20:26:17.773: INFO: Created: latency-svc-4l5n3
Feb  7 20:26:17.776: INFO: Got endpoints: latency-svc-0s7jp [148.381543ms]
Feb  7 20:26:17.791: INFO: Created: latency-svc-xffzs
Feb  7 20:26:17.792: INFO: Got endpoints: latency-svc-wbnw8 [251.534783ms]
Feb  7 20:26:17.803: INFO: Created: latency-svc-64js8
Feb  7 20:26:17.811: INFO: Got endpoints: latency-svc-ws036 [217.166018ms]
Feb  7 20:26:17.816: INFO: Got endpoints: latency-svc-fplg2 [252.57744ms]
Feb  7 20:26:17.829: INFO: Created: latency-svc-2km3k
Feb  7 20:26:17.855: INFO: Created: latency-svc-rk54n
Feb  7 20:26:17.856: INFO: Got endpoints: latency-svc-4l5n3 [257.734497ms]
Feb  7 20:26:17.856: INFO: Got endpoints: latency-svc-xffzs [239.681975ms]
Feb  7 20:26:17.871: INFO: Created: latency-svc-w9h4d
Feb  7 20:26:17.882: INFO: Got endpoints: latency-svc-64js8 [265.502641ms]
Feb  7 20:26:17.899: INFO: Created: latency-svc-c960c
Feb  7 20:26:17.935: INFO: Created: latency-svc-blx9v
Feb  7 20:26:17.948: INFO: Got endpoints: latency-svc-rk54n [289.612552ms]
Feb  7 20:26:17.949: INFO: Got endpoints: latency-svc-2km3k [173.126251ms]
Feb  7 20:26:18.031: INFO: Created: latency-svc-mvdx9
Feb  7 20:26:18.066: INFO: Got endpoints: latency-svc-c960c [381.306615ms]
Feb  7 20:26:18.075: INFO: Got endpoints: latency-svc-w9h4d [395.627635ms]
Feb  7 20:26:18.080: INFO: Created: latency-svc-4st4p
Feb  7 20:26:18.084: INFO: Got endpoints: latency-svc-blx9v [358.817889ms]
Feb  7 20:26:18.095: INFO: Created: latency-svc-jjg5d
Feb  7 20:26:18.101: INFO: Got endpoints: latency-svc-mvdx9 [374.857685ms]
Feb  7 20:26:18.112: INFO: Created: latency-svc-dkpl5
Feb  7 20:26:18.121: INFO: Created: latency-svc-2rz07
Feb  7 20:26:18.148: INFO: Created: latency-svc-jv6sc
Feb  7 20:26:18.148: INFO: Got endpoints: latency-svc-4st4p [398.567748ms]
Feb  7 20:26:18.152: INFO: Got endpoints: latency-svc-jjg5d [392.071567ms]
Feb  7 20:26:18.157: INFO: Created: latency-svc-8gzzn
Feb  7 20:26:18.165: INFO: Created: latency-svc-5wz9g
Feb  7 20:26:18.172: INFO: Got endpoints: latency-svc-dkpl5 [653.387041ms]
Feb  7 20:26:18.184: INFO: Got endpoints: latency-svc-2rz07 [118.154767ms]
Feb  7 20:26:18.187: INFO: Created: latency-svc-fl1bq
Feb  7 20:26:18.197: INFO: Created: latency-svc-7gq7x
Feb  7 20:26:18.220: INFO: Created: latency-svc-qcgjp
Feb  7 20:26:18.228: INFO: Got endpoints: latency-svc-8gzzn [411.495681ms]
Feb  7 20:26:18.253: INFO: Created: latency-svc-7dnzv
Feb  7 20:26:18.254: INFO: Got endpoints: latency-svc-5wz9g [398.406167ms]
Feb  7 20:26:18.254: INFO: Got endpoints: latency-svc-jv6sc [443.532333ms]
Feb  7 20:26:18.267: INFO: Got endpoints: latency-svc-7gq7x [384.765043ms]
Feb  7 20:26:18.272: INFO: Created: latency-svc-pb37n
Feb  7 20:26:18.278: INFO: Got endpoints: latency-svc-fl1bq [422.279597ms]
Feb  7 20:26:18.290: INFO: Created: latency-svc-cw65k
Feb  7 20:26:18.305: INFO: Created: latency-svc-t1n1g
Feb  7 20:26:18.306: INFO: Got endpoints: latency-svc-qcgjp [358.020188ms]
Feb  7 20:26:18.321: INFO: Got endpoints: latency-svc-7dnzv [371.52961ms]
Feb  7 20:26:18.327: INFO: Created: latency-svc-nxg1l
Feb  7 20:26:18.338: INFO: Got endpoints: latency-svc-pb37n [545.568715ms]
Feb  7 20:26:18.345: INFO: Created: latency-svc-6msmf
Feb  7 20:26:18.353: INFO: Got endpoints: latency-svc-cw65k [125.05557ms]
Feb  7 20:26:18.359: INFO: Created: latency-svc-kzmzp
Feb  7 20:26:18.370: INFO: Created: latency-svc-n8zds
Feb  7 20:26:18.381: INFO: Got endpoints: latency-svc-t1n1g [296.833425ms]
Feb  7 20:26:18.385: INFO: Created: latency-svc-lkp0r
Feb  7 20:26:18.388: INFO: Got endpoints: latency-svc-nxg1l [286.370846ms]
Feb  7 20:26:18.404: INFO: Got endpoints: latency-svc-6msmf [255.569562ms]
Feb  7 20:26:18.410: INFO: Created: latency-svc-l4nk9
Feb  7 20:26:18.431: INFO: Got endpoints: latency-svc-kzmzp [279.144609ms]
Feb  7 20:26:18.432: INFO: Created: latency-svc-nfbvd
Feb  7 20:26:18.435: INFO: Got endpoints: latency-svc-n8zds [263.040206ms]
Feb  7 20:26:18.446: INFO: Created: latency-svc-7gpnf
Feb  7 20:26:18.458: INFO: Created: latency-svc-27pct
Feb  7 20:26:18.472: INFO: Created: latency-svc-fn1l5
Feb  7 20:26:18.481: INFO: Got endpoints: latency-svc-lkp0r [297.166383ms]
Feb  7 20:26:18.486: INFO: Created: latency-svc-3f2pm
Feb  7 20:26:18.504: INFO: Got endpoints: latency-svc-l4nk9 [429.640729ms]
Feb  7 20:26:18.505: INFO: Created: latency-svc-s7mgz
Feb  7 20:26:18.523: INFO: Created: latency-svc-0qv2t
Feb  7 20:26:18.530: INFO: Got endpoints: latency-svc-7gpnf [275.20134ms]
Feb  7 20:26:18.530: INFO: Got endpoints: latency-svc-nfbvd [276.217176ms]
Feb  7 20:26:18.540: INFO: Created: latency-svc-s6ppr
Feb  7 20:26:18.551: INFO: Created: latency-svc-pgv8w
Feb  7 20:26:18.564: INFO: Got endpoints: latency-svc-fn1l5 [285.572606ms]
Feb  7 20:26:18.567: INFO: Created: latency-svc-k5251
Feb  7 20:26:18.568: INFO: Got endpoints: latency-svc-27pct [301.591872ms]
Feb  7 20:26:18.591: INFO: Created: latency-svc-99mpf
Feb  7 20:26:18.597: INFO: Got endpoints: latency-svc-3f2pm [162.003282ms]
Feb  7 20:26:18.607: INFO: Created: latency-svc-lx53t
Feb  7 20:26:18.612: INFO: Got endpoints: latency-svc-0qv2t [274.713147ms]
Feb  7 20:26:18.613: INFO: Got endpoints: latency-svc-s7mgz [292.186778ms]
Feb  7 20:26:18.627: INFO: Created: latency-svc-lk929
Feb  7 20:26:18.652: INFO: Got endpoints: latency-svc-pgv8w [271.092821ms]
Feb  7 20:26:18.655: INFO: Created: latency-svc-98hhv
Feb  7 20:26:18.669: INFO: Got endpoints: latency-svc-k5251 [280.816975ms]
Feb  7 20:26:18.669: INFO: Got endpoints: latency-svc-s6ppr [315.748807ms]
Feb  7 20:26:18.679: INFO: Created: latency-svc-73m0h
Feb  7 20:26:18.684: INFO: Got endpoints: latency-svc-99mpf [279.904162ms]
Feb  7 20:26:18.721: INFO: Got endpoints: latency-svc-lx53t [290.026632ms]
Feb  7 20:26:18.732: INFO: Created: latency-svc-qmxp9
Feb  7 20:26:18.747: INFO: Got endpoints: latency-svc-lk929 [440.620455ms]
Feb  7 20:26:18.760: INFO: Got endpoints: latency-svc-98hhv [278.982514ms]
Feb  7 20:26:18.760: INFO: Created: latency-svc-vbfmr
Feb  7 20:26:18.777: INFO: Got endpoints: latency-svc-73m0h [272.805626ms]
Feb  7 20:26:18.779: INFO: Created: latency-svc-61th3
Feb  7 20:26:18.804: INFO: Created: latency-svc-bnqzg
Feb  7 20:26:18.810: INFO: Got endpoints: latency-svc-qmxp9 [279.873721ms]
Feb  7 20:26:18.817: INFO: Created: latency-svc-519cz
Feb  7 20:26:18.839: INFO: Got endpoints: latency-svc-vbfmr [308.462154ms]
Feb  7 20:26:18.840: INFO: Created: latency-svc-bh1nd
Feb  7 20:26:18.854: INFO: Got endpoints: latency-svc-61th3 [290.388891ms]
Feb  7 20:26:18.876: INFO: Created: latency-svc-jbztv
Feb  7 20:26:18.880: INFO: Got endpoints: latency-svc-bnqzg [312.101877ms]
Feb  7 20:26:18.883: INFO: Got endpoints: latency-svc-519cz [285.651229ms]
Feb  7 20:26:18.896: INFO: Created: latency-svc-tcwdc
Feb  7 20:26:18.910: INFO: Got endpoints: latency-svc-bh1nd [297.276103ms]
Feb  7 20:26:18.913: INFO: Created: latency-svc-wfc8z
Feb  7 20:26:18.933: INFO: Got endpoints: latency-svc-jbztv [320.341005ms]
Feb  7 20:26:18.940: INFO: Created: latency-svc-s07vm
Feb  7 20:26:18.975: INFO: Created: latency-svc-b0bw9
Feb  7 20:26:18.980: INFO: Got endpoints: latency-svc-tcwdc [327.791052ms]
Feb  7 20:26:19.009: INFO: Created: latency-svc-n7vlv
Feb  7 20:26:19.027: INFO: Got endpoints: latency-svc-wfc8z [358.309492ms]
Feb  7 20:26:19.027: INFO: Got endpoints: latency-svc-s07vm [358.415878ms]
Feb  7 20:26:19.040: INFO: Created: latency-svc-1f1wd
Feb  7 20:26:19.053: INFO: Created: latency-svc-9nvdp
Feb  7 20:26:19.079: INFO: Created: latency-svc-4pl4p
Feb  7 20:26:19.081: INFO: Got endpoints: latency-svc-b0bw9 [397.376579ms]
Feb  7 20:26:19.083: INFO: Created: latency-svc-gccn1
Feb  7 20:26:19.096: INFO: Got endpoints: latency-svc-n7vlv [162.23739ms]
Feb  7 20:26:19.096: INFO: Created: latency-svc-6p96n
Feb  7 20:26:19.133: INFO: Created: latency-svc-1mmsf
Feb  7 20:26:19.149: INFO: Created: latency-svc-6w25b
Feb  7 20:26:19.154: INFO: Got endpoints: latency-svc-1f1wd [407.403182ms]
Feb  7 20:26:19.159: INFO: Got endpoints: latency-svc-4pl4p [382.034099ms]
Feb  7 20:26:19.188: INFO: Created: latency-svc-xmdq7
Feb  7 20:26:19.188: INFO: Got endpoints: latency-svc-6p96n [348.765463ms]
Feb  7 20:26:19.188: INFO: Got endpoints: latency-svc-9nvdp [427.831828ms]
Feb  7 20:26:19.236: INFO: Created: latency-svc-tg255
Feb  7 20:26:19.236: INFO: Created: latency-svc-fx9k5
Feb  7 20:26:19.245: INFO: Got endpoints: latency-svc-gccn1 [434.960892ms]
Feb  7 20:26:19.268: INFO: Created: latency-svc-1br2h
Feb  7 20:26:19.308: INFO: Created: latency-svc-mfkj8
Feb  7 20:26:19.316: INFO: Created: latency-svc-9r0gd
Feb  7 20:26:19.321: INFO: Got endpoints: latency-svc-xmdq7 [437.922747ms]
Feb  7 20:26:19.321: INFO: Got endpoints: latency-svc-1mmsf [467.01594ms]
Feb  7 20:26:19.336: INFO: Created: latency-svc-krt62
Feb  7 20:26:19.347: INFO: Got endpoints: latency-svc-6w25b [466.418068ms]
Feb  7 20:26:19.363: INFO: Created: latency-svc-3xqdx
Feb  7 20:26:19.399: INFO: Created: latency-svc-gdh18
Feb  7 20:26:19.402: INFO: Got endpoints: latency-svc-fx9k5 [491.932771ms]
Feb  7 20:26:19.402: INFO: Got endpoints: latency-svc-tg255 [680.43603ms]
Feb  7 20:26:19.408: INFO: Got endpoints: latency-svc-1br2h [427.966966ms]
Feb  7 20:26:19.426: INFO: Created: latency-svc-ms8qm
Feb  7 20:26:19.446: INFO: Got endpoints: latency-svc-mfkj8 [418.918042ms]
Feb  7 20:26:19.454: INFO: Created: latency-svc-87qns
Feb  7 20:26:19.457: INFO: Got endpoints: latency-svc-9r0gd [430.16927ms]
Feb  7 20:26:19.541: INFO: Created: latency-svc-4fzkz
Feb  7 20:26:19.557: INFO: Got endpoints: latency-svc-krt62 [475.304969ms]
Feb  7 20:26:19.579: INFO: Created: latency-svc-s9gph
Feb  7 20:26:19.583: INFO: Got endpoints: latency-svc-gdh18 [429.186635ms]
Feb  7 20:26:19.599: INFO: Got endpoints: latency-svc-3xqdx [503.317538ms]
Feb  7 20:26:19.612: INFO: Created: latency-svc-hvlnn
Feb  7 20:26:19.620: INFO: Got endpoints: latency-svc-ms8qm [460.757804ms]
Feb  7 20:26:19.628: INFO: Got endpoints: latency-svc-87qns [440.492374ms]
Feb  7 20:26:19.638: INFO: Created: latency-svc-jf92z
Feb  7 20:26:19.662: INFO: Got endpoints: latency-svc-s9gph [417.211206ms]
Feb  7 20:26:19.664: INFO: Got endpoints: latency-svc-4fzkz [476.118037ms]
Feb  7 20:26:19.686: INFO: Created: latency-svc-jzp40
Feb  7 20:26:19.698: INFO: Got endpoints: latency-svc-hvlnn [377.30251ms]
Feb  7 20:26:19.706: INFO: Created: latency-svc-c6j3k
Feb  7 20:26:19.712: INFO: Created: latency-svc-njjwr
Feb  7 20:26:19.715: INFO: Got endpoints: latency-svc-jf92z [393.225067ms]
Feb  7 20:26:19.822: INFO: Created: latency-svc-txnhm
Feb  7 20:26:19.877: INFO: Got endpoints: latency-svc-jzp40 [529.584908ms]
Feb  7 20:26:19.877: INFO: Got endpoints: latency-svc-c6j3k [475.153909ms]
Feb  7 20:26:19.918: INFO: Created: latency-svc-cflf1
Feb  7 20:26:19.970: INFO: Got endpoints: latency-svc-njjwr [561.403416ms]
Feb  7 20:26:19.990: INFO: Created: latency-svc-gm0r8
Feb  7 20:26:20.061: INFO: Got endpoints: latency-svc-txnhm [658.880253ms]
Feb  7 20:26:20.133: INFO: Created: latency-svc-2hfdh
Feb  7 20:26:20.157: INFO: Got endpoints: latency-svc-cflf1 [711.299476ms]
Feb  7 20:26:20.159: INFO: Got endpoints: latency-svc-gm0r8 [701.265004ms]
Feb  7 20:26:20.229: INFO: Created: latency-svc-n5sgv
Feb  7 20:26:20.282: INFO: Created: latency-svc-zdb0d
Feb  7 20:26:20.288: INFO: Got endpoints: latency-svc-2hfdh [731.222848ms]
Feb  7 20:26:20.308: INFO: Created: latency-svc-7jp9d
Feb  7 20:26:20.350: INFO: Created: latency-svc-03vfz
Feb  7 20:26:20.355: INFO: Got endpoints: latency-svc-n5sgv [294.070868ms]
Feb  7 20:26:20.374: INFO: Created: latency-svc-1gzwq
Feb  7 20:26:20.384: INFO: Got endpoints: latency-svc-zdb0d [785.459252ms]
Feb  7 20:26:20.390: INFO: Created: latency-svc-flf0h
Feb  7 20:26:20.442: INFO: Created: latency-svc-grx4m
Feb  7 20:26:20.445: INFO: Got endpoints: latency-svc-7jp9d [825.370585ms]
Feb  7 20:26:20.470: INFO: Created: latency-svc-l6nnk
Feb  7 20:26:20.477: INFO: Got endpoints: latency-svc-03vfz [848.920099ms]
Feb  7 20:26:20.493: INFO: Created: latency-svc-gvtfq
Feb  7 20:26:20.512: INFO: Got endpoints: latency-svc-1gzwq [850.279752ms]
Feb  7 20:26:20.512: INFO: Got endpoints: latency-svc-flf0h [848.234741ms]
Feb  7 20:26:20.531: INFO: Created: latency-svc-mm01g
Feb  7 20:26:20.533: INFO: Got endpoints: latency-svc-grx4m [834.878627ms]
Feb  7 20:26:20.549: INFO: Created: latency-svc-zb5ps
Feb  7 20:26:20.554: INFO: Got endpoints: latency-svc-l6nnk [169.117173ms]
Feb  7 20:26:20.563: INFO: Got endpoints: latency-svc-gvtfq [686.04986ms]
Feb  7 20:26:20.567: INFO: Created: latency-svc-9khk1
Feb  7 20:26:20.594: INFO: Created: latency-svc-bb45j
Feb  7 20:26:20.617: INFO: Got endpoints: latency-svc-mm01g [740.669827ms]
Feb  7 20:26:20.621: INFO: Created: latency-svc-kgbgt
Feb  7 20:26:20.636: INFO: Created: latency-svc-k3xsk
Feb  7 20:26:20.655: INFO: Created: latency-svc-4p2mk
Feb  7 20:26:20.680: INFO: Created: latency-svc-kf6rx
Feb  7 20:26:20.686: INFO: Got endpoints: latency-svc-9khk1 [1.102435518s]
Feb  7 20:26:20.708: INFO: Created: latency-svc-8h5kl
Feb  7 20:26:20.715: INFO: Got endpoints: latency-svc-bb45j [557.383647ms]
Feb  7 20:26:20.715: INFO: Got endpoints: latency-svc-zb5ps [745.085678ms]
Feb  7 20:26:20.725: INFO: Created: latency-svc-h6md7
Feb  7 20:26:20.755: INFO: Created: latency-svc-0t39r
Feb  7 20:26:20.771: INFO: Created: latency-svc-xr36v
Feb  7 20:26:20.782: INFO: Got endpoints: latency-svc-k3xsk [493.643558ms]
Feb  7 20:26:20.782: INFO: Got endpoints: latency-svc-kgbgt [623.015622ms]
Feb  7 20:26:20.801: INFO: Created: latency-svc-jc7cx
Feb  7 20:26:20.832: INFO: Created: latency-svc-gjdh2
Feb  7 20:26:20.836: INFO: Got endpoints: latency-svc-4p2mk [481.451207ms]
Feb  7 20:26:20.848: INFO: Created: latency-svc-53j20
Feb  7 20:26:20.863: INFO: Got endpoints: latency-svc-kf6rx [1.148849092s]
Feb  7 20:26:20.864: INFO: Got endpoints: latency-svc-8h5kl [418.100022ms]
Feb  7 20:26:20.865: INFO: Created: latency-svc-nthmg
Feb  7 20:26:20.866: INFO: Got endpoints: latency-svc-h6md7 [389.0623ms]
Feb  7 20:26:20.895: INFO: Got endpoints: latency-svc-0t39r [383.159971ms]
Feb  7 20:26:20.906: INFO: Created: latency-svc-4pf14
Feb  7 20:26:20.936: INFO: Got endpoints: latency-svc-jc7cx [221.531541ms]
Feb  7 20:26:20.936: INFO: Created: latency-svc-0bk0j
Feb  7 20:26:20.944: INFO: Got endpoints: latency-svc-xr36v [431.511384ms]
Feb  7 20:26:20.956: INFO: Created: latency-svc-7rj9v
Feb  7 20:26:20.964: INFO: Got endpoints: latency-svc-gjdh2 [410.546994ms]
Feb  7 20:26:20.973: INFO: Got endpoints: latency-svc-53j20 [409.862532ms]
Feb  7 20:26:20.987: INFO: Created: latency-svc-krbj1
Feb  7 20:26:20.999: INFO: Got endpoints: latency-svc-nthmg [381.496731ms]
Feb  7 20:26:21.008: INFO: Created: latency-svc-vmzs4
Feb  7 20:26:21.017: INFO: Created: latency-svc-v3kk8
Feb  7 20:26:21.033: INFO: Got endpoints: latency-svc-7rj9v [318.216897ms]
Feb  7 20:26:21.037: INFO: Created: latency-svc-r1j61
Feb  7 20:26:21.052: INFO: Created: latency-svc-c0vf9
Feb  7 20:26:21.067: INFO: Got endpoints: latency-svc-4pf14 [381.328241ms]
Feb  7 20:26:21.088: INFO: Created: latency-svc-93sf1
Feb  7 20:26:21.097: INFO: Got endpoints: latency-svc-krbj1 [315.709819ms]
Feb  7 20:26:21.098: INFO: Got endpoints: latency-svc-0bk0j [564.176888ms]
Feb  7 20:26:21.116: INFO: Created: latency-svc-l81sh
Feb  7 20:26:21.121: INFO: Got endpoints: latency-svc-vmzs4 [339.430703ms]
Feb  7 20:26:21.163: INFO: Created: latency-svc-0c680
Feb  7 20:26:21.170: INFO: Got endpoints: latency-svc-v3kk8 [333.755924ms]
Feb  7 20:26:21.178: INFO: Created: latency-svc-9z1w0
Feb  7 20:26:21.179: INFO: Got endpoints: latency-svc-93sf1 [315.52797ms]
Feb  7 20:26:21.197: INFO: Created: latency-svc-m2vdm
Feb  7 20:26:21.216: INFO: Got endpoints: latency-svc-l81sh [320.257668ms]
Feb  7 20:26:21.224: INFO: Created: latency-svc-np04p
Feb  7 20:26:21.235: INFO: Got endpoints: latency-svc-c0vf9 [368.507601ms]
Feb  7 20:26:21.236: INFO: Got endpoints: latency-svc-r1j61 [373.048305ms]
Feb  7 20:26:21.252: INFO: Created: latency-svc-2f104
Feb  7 20:26:21.277: INFO: Created: latency-svc-zrcq7
Feb  7 20:26:21.284: INFO: Got endpoints: latency-svc-9z1w0 [340.536575ms]
Feb  7 20:26:21.293: INFO: Got endpoints: latency-svc-0c680 [356.937677ms]
Feb  7 20:26:21.308: INFO: Created: latency-svc-g4cdz
Feb  7 20:26:21.327: INFO: Created: latency-svc-2tf84
Feb  7 20:26:21.345: INFO: Got endpoints: latency-svc-np04p [372.869481ms]
Feb  7 20:26:21.348: INFO: Created: latency-svc-fqtd1
Feb  7 20:26:21.351: INFO: Got endpoints: latency-svc-m2vdm [387.271425ms]
Feb  7 20:26:21.361: INFO: Created: latency-svc-pt13d
Feb  7 20:26:21.361: INFO: Got endpoints: latency-svc-2f104 [191.29567ms]
Feb  7 20:26:21.376: INFO: Created: latency-svc-bwhll
Feb  7 20:26:21.402: INFO: Created: latency-svc-f2wqk
Feb  7 20:26:21.409: INFO: Created: latency-svc-b95qc
Feb  7 20:26:21.422: INFO: Got endpoints: latency-svc-zrcq7 [388.526471ms]
Feb  7 20:26:21.425: INFO: Got endpoints: latency-svc-g4cdz [190.584495ms]
Feb  7 20:26:21.440: INFO: Got endpoints: latency-svc-pt13d [318.306861ms]
Feb  7 20:26:21.461: INFO: Created: latency-svc-56hx5
Feb  7 20:26:21.476: INFO: Created: latency-svc-14tcz
Feb  7 20:26:21.489: INFO: Got endpoints: latency-svc-fqtd1 [391.370782ms]
Feb  7 20:26:21.498: INFO: Got endpoints: latency-svc-2tf84 [400.374756ms]
Feb  7 20:26:21.498: INFO: Created: latency-svc-1pg09
Feb  7 20:26:21.506: INFO: Got endpoints: latency-svc-bwhll [507.158573ms]
Feb  7 20:26:21.534: INFO: Created: latency-svc-9vgzs
Feb  7 20:26:21.604: INFO: Created: latency-svc-5vgh1
Feb  7 20:26:21.608: INFO: Got endpoints: latency-svc-b95qc [262.160631ms]
Feb  7 20:26:21.620: INFO: Got endpoints: latency-svc-f2wqk [441.188263ms]
Feb  7 20:26:21.629: INFO: Got endpoints: latency-svc-56hx5 [392.335166ms]
Feb  7 20:26:21.636: INFO: Got endpoints: latency-svc-14tcz [568.303461ms]
Feb  7 20:26:21.645: INFO: Got endpoints: latency-svc-1pg09 [360.863636ms]
Feb  7 20:26:21.657: INFO: Got endpoints: latency-svc-9vgzs [364.112717ms]
Feb  7 20:26:21.660: INFO: Got endpoints: latency-svc-5vgh1 [444.837824ms]
Feb  7 20:26:21.660: INFO: Latencies: [118.154767ms 125.05557ms 148.381543ms 151.576618ms 157.53887ms 162.003282ms 162.23739ms 169.117173ms 172.083292ms 173.126251ms 178.459909ms 178.774661ms 190.584495ms 191.29567ms 217.166018ms 221.531541ms 223.004468ms 230.930349ms 231.598598ms 237.905131ms 239.681975ms 243.361169ms 248.692421ms 251.534783ms 252.57744ms 255.569562ms 257.734497ms 260.345283ms 262.160631ms 262.922565ms 263.040206ms 265.502641ms 266.134121ms 266.666716ms 268.864765ms 271.092821ms 272.805626ms 273.60198ms 274.713147ms 275.071006ms 275.20134ms 275.436506ms 276.217176ms 278.982514ms 279.144609ms 279.367472ms 279.873721ms 279.904162ms 279.90944ms 280.816975ms 281.576912ms 285.572606ms 285.651229ms 286.370846ms 286.563818ms 288.840201ms 289.466224ms 289.612552ms 290.026632ms 290.388891ms 292.173136ms 292.186778ms 294.070868ms 296.833425ms 297.166383ms 297.276103ms 297.612146ms 301.591872ms 302.798039ms 303.398484ms 304.749555ms 305.668541ms 308.462154ms 308.663092ms 312.01749ms 312.101877ms 315.52797ms 315.709819ms 315.748807ms 316.288895ms 318.216897ms 318.306861ms 318.490542ms 318.798854ms 319.071806ms 319.561485ms 320.192429ms 320.257668ms 320.341005ms 323.600008ms 327.791052ms 329.495836ms 330.054151ms 333.755924ms 333.841465ms 334.755404ms 335.385362ms 336.815208ms 337.097107ms 339.430703ms 340.536575ms 341.790825ms 344.801643ms 345.952219ms 347.871314ms 348.765463ms 355.0399ms 356.937677ms 358.020188ms 358.309492ms 358.415878ms 358.817889ms 359.480874ms 360.863636ms 364.112717ms 364.441349ms 368.507601ms 371.52961ms 372.869481ms 373.048305ms 374.857685ms 377.30251ms 381.306615ms 381.328241ms 381.496731ms 382.034099ms 383.159971ms 384.765043ms 387.271425ms 388.526471ms 389.0623ms 391.370782ms 392.071567ms 392.335166ms 393.225067ms 395.627635ms 397.376579ms 398.406167ms 398.567748ms 400.374756ms 407.403182ms 409.862532ms 410.546994ms 411.495681ms 417.211206ms 418.100022ms 418.918042ms 422.279597ms 422.917607ms 423.048772ms 427.831828ms 427.966966ms 429.186635ms 429.640729ms 430.16927ms 431.511384ms 434.960892ms 437.922747ms 440.492374ms 440.620455ms 441.188263ms 443.532333ms 444.837824ms 460.757804ms 466.418068ms 467.01594ms 471.866144ms 475.153909ms 475.304969ms 476.118037ms 481.451207ms 483.463868ms 491.932771ms 493.643558ms 503.317538ms 507.158573ms 529.584908ms 545.568715ms 557.383647ms 561.403416ms 564.176888ms 568.303461ms 623.015622ms 653.387041ms 658.880253ms 680.43603ms 686.04986ms 701.265004ms 711.299476ms 731.222848ms 740.669827ms 745.085678ms 785.459252ms 825.370585ms 834.878627ms 848.234741ms 848.920099ms 850.279752ms 1.102435518s 1.148849092s]
Feb  7 20:26:21.661: INFO: 50 %ile: 340.536575ms
Feb  7 20:26:21.661: INFO: 90 %ile: 564.176888ms
Feb  7 20:26:21.661: INFO: 99 %ile: 1.102435518s
Feb  7 20:26:21.661: INFO: Total sample count: 200
[AfterEach] [k8s.io] Service endpoints latency
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:26:21.661: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-svc-latency-3qh24" for this suite.
Feb  7 20:26:47.574: INFO: namespace: e2e-tests-svc-latency-3qh24, resource: bindings, ignored listing per whitelist

• [SLOW TEST:35.247 seconds]
[k8s.io] Service endpoints latency
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  should not be very high [Conformance]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/service_latency.go:116
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Namespaces [Serial] 
  should ensure that all pods are removed when a namespace is deleted.
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/namespace.go:216
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] Namespaces [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:26:47.939: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:26:48.190: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[It] should ensure that all pods are removed when a namespace is deleted.
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/namespace.go:216
STEP: Creating a test namespace
Feb  7 20:26:48.688: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
STEP: Creating a pod in the namespace
STEP: Waiting for the pod to have running status
STEP: Deleting the namespace
STEP: Waiting for the namespace to be removed.
STEP: Verifying there is no pod in the namespace
[AfterEach] [k8s.io] Namespaces [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:27:10.534: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-namespaces-15wzx" for this suite.
Feb  7 20:27:21.461: INFO: namespace: e2e-tests-namespaces-15wzx, resource: bindings, ignored listing per whitelist
STEP: Destroying namespace "e2e-tests-nsdeletetest-b9h7g" for this suite.
Feb  7 20:27:21.860: INFO: Namespace e2e-tests-nsdeletetest-b9h7g was already deleted

• [SLOW TEST:33.922 seconds]
[k8s.io] Namespaces [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  should ensure that all pods are removed when a namespace is deleted.
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/namespace.go:216
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that embedding the JSON NodeAffinity setting as a string in the annotation value work
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:398
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:27:21.861: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:27:22.098: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:27:22.499: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:27:22.562: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:27:22.653: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:27:22.683: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:27:22.744: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:27:22.744: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:27:22.744: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:27:22.806: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:27:22.806: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:27:22.806: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:27:22.806: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:27:22.806: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:27:22.806: INFO: 	Container router ready: true, restart count 0
Feb  7 20:27:22.806: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:27:22.869: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:27:22.931: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that embedding the JSON NodeAffinity setting as a string in the annotation value work
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:398
STEP: Trying to launch a pod without a label to get a node which can launch it.
STEP: Explicitly delete pod here to free the resource it takes.
STEP: Trying to apply a label with fake az info on the found node.
STEP: verifying the node has the label kubernetes.io/e2e-az-name e2e-az1
STEP: Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.
STEP: removing the label kubernetes.io/e2e-az-name off the node ci-pr104-ig-n-3z1d
STEP: verifying the node doesn't have the label kubernetes.io/e2e-az-name
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:27:28.276: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-vx8kp" for this suite.
Feb  7 20:27:54.204: INFO: namespace: e2e-tests-sched-pred-vx8kp, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:27:54.568798   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:32.707 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that embedding the JSON NodeAffinity setting as a string in the annotation value work
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:398
------------------------------
SSSSSSSSSSS
------------------------------
[k8s.io] Namespaces [Serial] 
  should ensure that all services are removed when a namespace is deleted.
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/namespace.go:219
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] Namespaces [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:27:54.569: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:27:54.690: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[It] should ensure that all services are removed when a namespace is deleted.
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/namespace.go:219
STEP: Creating a test namespace
Feb  7 20:27:55.291: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
STEP: Creating a service in the namespace
STEP: Deleting the namespace
STEP: Waiting for the namespace to be removed.
STEP: Verifying there is no service in the namespace
[AfterEach] [k8s.io] Namespaces [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:28:00.780: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-namespaces-xvdt5" for this suite.
Feb  7 20:28:11.712: INFO: namespace: e2e-tests-namespaces-xvdt5, resource: bindings, ignored listing per whitelist
STEP: Destroying namespace "e2e-tests-nsdeletetest-2ff33" for this suite.
Feb  7 20:28:12.106: INFO: Namespace e2e-tests-nsdeletetest-2ff33 was already deleted

• [SLOW TEST:17.538 seconds]
[k8s.io] Namespaces [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  should ensure that all services are removed when a namespace is deleted.
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/namespace.go:219
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that taints-tolerations is respected if matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:716
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:28:12.107: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:28:12.305: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:28:12.713: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:28:12.777: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:28:12.867: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:28:12.897: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:28:12.957: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:28:12.957: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:28:12.957: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:28:13.019: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:28:13.019: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:28:13.019: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:28:13.019: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:28:13.019: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:28:13.019: INFO: 	Container router ready: true, restart count 0
Feb  7 20:28:13.019: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:28:13.082: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:28:13.144: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that taints-tolerations is respected if matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:716
STEP: Trying to launch a pod without a toleration to get a node which can launch it.
STEP: Explicitly delete pod here to free the resource it takes.
STEP: Trying to apply a random taint on the found node.
STEP: verifying the node has the taint kubernetes.io/e2e-taint-key-dd2ae049-ed9d-11e6-8acb-0ee8138d8aac=testing-taint-value:NoSchedule
STEP: Trying to apply a random label on the found node.
STEP: verifying the node has the label kubernetes.io/e2e-label-key-dd3a0829-ed9d-11e6-8acb-0ee8138d8aac testing-label-value
STEP: Trying to relaunch the pod, now with tolerations.
STEP: removing the label kubernetes.io/e2e-label-key-dd3a0829-ed9d-11e6-8acb-0ee8138d8aac off the node ci-pr104-ig-n-lffc
STEP: verifying the node doesn't have the label kubernetes.io/e2e-label-key-dd3a0829-ed9d-11e6-8acb-0ee8138d8aac
STEP: removing the taint kubernetes.io/e2e-taint-key-dd2ae049-ed9d-11e6-8acb-0ee8138d8aac=testing-taint-value:NoSchedule off the node ci-pr104-ig-n-lffc
STEP: verifying the node doesn't have the taint kubernetes.io/e2e-taint-key-dd2ae049-ed9d-11e6-8acb-0ee8138d8aac=testing-taint-value:NoSchedule
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:28:18.305: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-jx0fq" for this suite.
Feb  7 20:28:44.236: INFO: namespace: e2e-tests-sched-pred-jx0fq, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:28:44.603768   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:32.496 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that taints-tolerations is respected if matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:716
------------------------------
SSSSSSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that NodeAffinity is respected if not matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:327
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:28:44.604: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:28:44.768: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:28:45.171: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:28:45.234: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:28:45.325: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:28:45.356: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:28:45.417: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:28:45.417: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:28:45.417: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:28:45.479: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:28:45.479: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:28:45.479: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:28:45.479: INFO: 	Container router ready: true, restart count 0
Feb  7 20:28:45.479: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:28:45.479: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:28:45.479: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:28:45.552: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:28:45.616: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that NodeAffinity is respected if not matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:327
STEP: Trying to schedule Pod with nonempty NodeSelector.
Feb  7 20:28:45.777: INFO: Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:28:55.809: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-f3ztz" for this suite.
Feb  7 20:29:21.735: INFO: namespace: e2e-tests-sched-pred-f3ztz, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:29:22.098778   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:37.495 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that NodeAffinity is respected if not matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:327
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Daemon set [Serial] 
  should run and stop complex daemon with node affinity
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:272
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:29:22.099: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:29:22.249: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:89
[It] should run and stop complex daemon with node affinity
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:272
Feb  7 20:29:30.741: INFO: Creating daemon with a node affinity daemon-set
STEP: Initially, daemon pods should not be running on any nodes.
Feb  7 20:29:32.805: INFO: nodesToPodCount: map[string]int{}
STEP: Change label of node, check that daemon pod is launched.
Feb  7 20:29:36.961: INFO: nodesToPodCount: map[string]int{"ci-pr104-ig-m-pr2w":1}
STEP: remove the node selector and wait for daemons to be unscheduled
Feb  7 20:29:41.088: INFO: nodesToPodCount: map[string]int{}
STEP: We should now be able to delete the daemon set.
[AfterEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:73
Feb  7 20:29:41.151: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"extensions/v1beta1","metadata":{"selfLink":"/apis/extensions/v1beta1/namespaces/e2e-tests-daemonsets-r2clv/daemonsets","resourceVersion":"22046"},"items":null}

Feb  7 20:29:41.182: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-r2clv/pods","resourceVersion":"22046"},"items":null}

[AfterEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:29:49.365: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-daemonsets-r2clv" for this suite.
Feb  7 20:30:00.290: INFO: namespace: e2e-tests-daemonsets-r2clv, resource: bindings, ignored listing per whitelist

• [SLOW TEST:38.553 seconds]
[k8s.io] Daemon set [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  should run and stop complex daemon with node affinity
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:272
------------------------------
[k8s.io] Namespaces [Serial] 
  should delete fast enough (90 percent of 100 namespaces in 150 seconds)
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/namespace.go:222
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] Namespaces [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:30:00.653: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
STEP: Waiting for a default service account to be provisioned in namespace
[It] should delete fast enough (90 percent of 100 namespaces in 150 seconds)
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/namespace.go:222
STEP: Creating testing namespaces
I0207 20:30:00.904548   22630 request.go:632] Throttling request took 60.684495ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:00.954542   22630 request.go:632] Throttling request took 110.675602ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.004568   22630 request.go:632] Throttling request took 160.699776ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.054541   22630 request.go:632] Throttling request took 210.66154ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.104543   22630 request.go:632] Throttling request took 260.642481ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.154546   22630 request.go:632] Throttling request took 310.645503ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.204550   22630 request.go:632] Throttling request took 360.643025ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.254541   22630 request.go:632] Throttling request took 410.618805ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.304459   22630 request.go:632] Throttling request took 460.51756ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.354555   22630 request.go:632] Throttling request took 510.610735ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.404561   22630 request.go:632] Throttling request took 560.613046ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.454569   22630 request.go:632] Throttling request took 610.595101ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.504564   22630 request.go:632] Throttling request took 660.57355ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.554591   22630 request.go:632] Throttling request took 710.595365ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.604561   22630 request.go:632] Throttling request took 760.563669ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.654560   22630 request.go:632] Throttling request took 810.542628ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.704557   22630 request.go:632] Throttling request took 860.533158ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.754547   22630 request.go:632] Throttling request took 910.518805ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.804554   22630 request.go:632] Throttling request took 960.528648ms, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.854551   22630 request.go:632] Throttling request took 1.010506167s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.904547   22630 request.go:632] Throttling request took 1.060481197s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:01.954552   22630 request.go:632] Throttling request took 1.110480429s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.004548   22630 request.go:632] Throttling request took 1.160480209s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.054544   22630 request.go:632] Throttling request took 1.210456151s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.104541   22630 request.go:632] Throttling request took 1.260442196s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.154552   22630 request.go:632] Throttling request took 1.31044729s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.204553   22630 request.go:632] Throttling request took 1.36044318s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.254548   22630 request.go:632] Throttling request took 1.410421045s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.304465   22630 request.go:632] Throttling request took 1.460326878s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.354546   22630 request.go:632] Throttling request took 1.510400066s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.404547   22630 request.go:632] Throttling request took 1.560400148s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.454547   22630 request.go:632] Throttling request took 1.610378188s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.504564   22630 request.go:632] Throttling request took 1.660387441s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.554562   22630 request.go:632] Throttling request took 1.710381423s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.604542   22630 request.go:632] Throttling request took 1.760358151s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.654578   22630 request.go:632] Throttling request took 1.810358845s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.704546   22630 request.go:632] Throttling request took 1.860332875s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.754543   22630 request.go:632] Throttling request took 1.910322761s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.804549   22630 request.go:632] Throttling request took 1.960301766s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.854551   22630 request.go:632] Throttling request took 2.010303505s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.904540   22630 request.go:632] Throttling request took 2.060273652s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:02.954535   22630 request.go:632] Throttling request took 2.110244225s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:03.004547   22630 request.go:632] Throttling request took 2.160252488s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:03.054548   22630 request.go:632] Throttling request took 2.210235903s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:03.104571   22630 request.go:632] Throttling request took 2.260228129s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:03.154561   22630 request.go:632] Throttling request took 2.310198964s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:03.204550   22630 request.go:632] Throttling request took 2.360182633s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:03.254547   22630 request.go:632] Throttling request took 2.410164814s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
I0207 20:30:03.304460   22630 request.go:632] Throttling request took 2.460053967s, request: POST:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces
STEP: Waiting 10 seconds
STEP: Deleting namespaces
Feb  7 20:30:25.427: INFO: namespace : e2e-tests-nslifetest-39-6tb11 api call to delete is complete 
Feb  7 20:30:25.431: INFO: namespace : e2e-tests-nslifetest-20-q60z0 api call to delete is complete 
Feb  7 20:30:25.431: INFO: namespace : e2e-tests-nslifetest-10-1m3zd api call to delete is complete 
Feb  7 20:30:25.431: INFO: namespace : e2e-tests-nslifetest-38-dxtfd api call to delete is complete 
Feb  7 20:30:25.431: INFO: namespace : e2e-tests-nslifetest-99-8l1w9 api call to delete is complete 
Feb  7 20:30:25.431: INFO: namespace : e2e-tests-nslifetest-13-wnt33 api call to delete is complete 
Feb  7 20:30:25.432: INFO: namespace : e2e-tests-nslifetest-56-bmq8n api call to delete is complete 
Feb  7 20:30:25.432: INFO: namespace : e2e-tests-nslifetest-19-02msh api call to delete is complete 
Feb  7 20:30:25.432: INFO: namespace : e2e-tests-nslifetest-11-r48zx api call to delete is complete 
I0207 20:30:25.454552   22630 request.go:632] Throttling request took 67.80617ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-48-gbdb9
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-42-mv9dk api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-37-1vkq8 api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-36-7dtjn api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-62-b4h3p api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-27-qp1r1 api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-14-t7v38 api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-47-4k11m api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-0-hfwxw api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-45-td152 api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-64-v0815 api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-25-77tts api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-4-fmptn api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-6-bhq6v api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-61-7t9lt api call to delete is complete 
Feb  7 20:30:25.503: INFO: namespace : e2e-tests-nslifetest-41-21sr6 api call to delete is complete 
Feb  7 20:30:25.504: INFO: namespace : e2e-tests-nslifetest-65-k6141 api call to delete is complete 
Feb  7 20:30:25.504: INFO: namespace : e2e-tests-nslifetest-66-72hf9 api call to delete is complete 
Feb  7 20:30:25.504: INFO: namespace : e2e-tests-nslifetest-58-n8pq8 api call to delete is complete 
Feb  7 20:30:25.504: INFO: namespace : e2e-tests-nslifetest-35-kdvc9 api call to delete is complete 
Feb  7 20:30:25.504: INFO: namespace : e2e-tests-nslifetest-17-v0lqq api call to delete is complete 
Feb  7 20:30:25.504: INFO: namespace : e2e-tests-nslifetest-59-28155 api call to delete is complete 
Feb  7 20:30:25.504: INFO: namespace : e2e-tests-nslifetest-60-rgzn4 api call to delete is complete 
Feb  7 20:30:25.504: INFO: namespace : e2e-tests-nslifetest-18-8x2kv api call to delete is complete 
Feb  7 20:30:25.504: INFO: namespace : e2e-tests-nslifetest-68-zc8x1 api call to delete is complete 
Feb  7 20:30:25.504: INFO: namespace : e2e-tests-nslifetest-40-pw2w5 api call to delete is complete 
I0207 20:30:25.504567   22630 request.go:632] Throttling request took 117.80721ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-7-4mg8b
Feb  7 20:30:25.505: INFO: namespace : e2e-tests-nslifetest-46-tss49 api call to delete is complete 
Feb  7 20:30:25.506: INFO: namespace : e2e-tests-nslifetest-63-nh3nh api call to delete is complete 
Feb  7 20:30:25.507: INFO: namespace : e2e-tests-nslifetest-12-40891 api call to delete is complete 
Feb  7 20:30:25.507: INFO: namespace : e2e-tests-nslifetest-26-6r7t0 api call to delete is complete 
Feb  7 20:30:25.508: INFO: namespace : e2e-tests-nslifetest-15-ggfvk api call to delete is complete 
Feb  7 20:30:25.508: INFO: namespace : e2e-tests-nslifetest-16-x3fd0 api call to delete is complete 
Feb  7 20:30:25.508: INFO: namespace : e2e-tests-nslifetest-67-4l1t6 api call to delete is complete 
Feb  7 20:30:25.508: INFO: namespace : e2e-tests-nslifetest-24-q11rv api call to delete is complete 
Feb  7 20:30:25.508: INFO: namespace : e2e-tests-nslifetest-2-l1lrj api call to delete is complete 
Feb  7 20:30:25.509: INFO: namespace : e2e-tests-nslifetest-57-ws685 api call to delete is complete 
Feb  7 20:30:25.531: INFO: namespace : e2e-tests-nslifetest-23-k1vgk api call to delete is complete 
Feb  7 20:30:25.531: INFO: namespace : e2e-tests-nslifetest-22-03fs0 api call to delete is complete 
Feb  7 20:30:25.531: INFO: namespace : e2e-tests-nslifetest-1-d52s2 api call to delete is complete 
Feb  7 20:30:25.531: INFO: namespace : e2e-tests-nslifetest-43-xrj3w api call to delete is complete 
Feb  7 20:30:25.532: INFO: namespace : e2e-tests-nslifetest-69-1rcx9 api call to delete is complete 
Feb  7 20:30:25.532: INFO: namespace : e2e-tests-nslifetest-44-50l19 api call to delete is complete 
Feb  7 20:30:25.532: INFO: namespace : e2e-tests-nslifetest-21-twb9h api call to delete is complete 
Feb  7 20:30:25.532: INFO: namespace : e2e-tests-nslifetest-48-gbdb9 api call to delete is complete 
Feb  7 20:30:25.539: INFO: namespace : e2e-tests-nslifetest-7-4mg8b api call to delete is complete 
I0207 20:30:25.554538   22630 request.go:632] Throttling request took 167.777462ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-49-m10tg
Feb  7 20:30:25.592: INFO: namespace : e2e-tests-nslifetest-49-m10tg api call to delete is complete 
I0207 20:30:25.604539   22630 request.go:632] Throttling request took 217.770717ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-28-31w58
Feb  7 20:30:25.638: INFO: namespace : e2e-tests-nslifetest-28-31w58 api call to delete is complete 
I0207 20:30:25.654542   22630 request.go:632] Throttling request took 267.770971ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-70-gvrcj
Feb  7 20:30:25.688: INFO: namespace : e2e-tests-nslifetest-70-gvrcj api call to delete is complete 
I0207 20:30:25.704544   22630 request.go:632] Throttling request took 317.77056ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-5-8zndq
Feb  7 20:30:25.739: INFO: namespace : e2e-tests-nslifetest-5-8zndq api call to delete is complete 
I0207 20:30:25.754542   22630 request.go:632] Throttling request took 367.76199ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-29-3s61j
Feb  7 20:30:25.788: INFO: namespace : e2e-tests-nslifetest-29-3s61j api call to delete is complete 
I0207 20:30:25.804553   22630 request.go:632] Throttling request took 417.763643ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-71-kzpg0
Feb  7 20:30:25.837: INFO: namespace : e2e-tests-nslifetest-71-kzpg0 api call to delete is complete 
I0207 20:30:25.854555   22630 request.go:632] Throttling request took 467.759325ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-50-72c4m
Feb  7 20:30:25.887: INFO: namespace : e2e-tests-nslifetest-50-72c4m api call to delete is complete 
I0207 20:30:25.904548   22630 request.go:632] Throttling request took 517.749386ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-3-tx4gv
Feb  7 20:30:25.938: INFO: namespace : e2e-tests-nslifetest-3-tx4gv api call to delete is complete 
I0207 20:30:25.954549   22630 request.go:632] Throttling request took 567.743611ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-72-65xjp
Feb  7 20:30:25.987: INFO: namespace : e2e-tests-nslifetest-72-65xjp api call to delete is complete 
I0207 20:30:26.004555   22630 request.go:632] Throttling request took 617.74856ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-51-rznmv
Feb  7 20:30:26.037: INFO: namespace : e2e-tests-nslifetest-51-rznmv api call to delete is complete 
I0207 20:30:26.054546   22630 request.go:632] Throttling request took 667.733263ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-30-vlcgd
Feb  7 20:30:26.088: INFO: namespace : e2e-tests-nslifetest-30-vlcgd api call to delete is complete 
I0207 20:30:26.104556   22630 request.go:632] Throttling request took 717.73557ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-73-wg3x9
Feb  7 20:30:26.137: INFO: namespace : e2e-tests-nslifetest-73-wg3x9 api call to delete is complete 
I0207 20:30:26.154554   22630 request.go:632] Throttling request took 767.733ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-52-6h405
Feb  7 20:30:26.187: INFO: namespace : e2e-tests-nslifetest-52-6h405 api call to delete is complete 
I0207 20:30:26.204555   22630 request.go:632] Throttling request took 817.72703ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-31-82gz8
Feb  7 20:30:26.238: INFO: namespace : e2e-tests-nslifetest-31-82gz8 api call to delete is complete 
I0207 20:30:26.254567   22630 request.go:632] Throttling request took 867.733868ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-74-xjwm9
Feb  7 20:30:26.289: INFO: namespace : e2e-tests-nslifetest-74-xjwm9 api call to delete is complete 
I0207 20:30:26.304581   22630 request.go:632] Throttling request took 917.740275ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-53-s497s
Feb  7 20:30:26.340: INFO: namespace : e2e-tests-nslifetest-53-s497s api call to delete is complete 
I0207 20:30:26.354576   22630 request.go:632] Throttling request took 967.723185ms, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-32-cp7sv
Feb  7 20:30:26.389: INFO: namespace : e2e-tests-nslifetest-32-cp7sv api call to delete is complete 
I0207 20:30:26.404558   22630 request.go:632] Throttling request took 1.017710522s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-75-rxmqb
Feb  7 20:30:26.438: INFO: namespace : e2e-tests-nslifetest-75-rxmqb api call to delete is complete 
I0207 20:30:26.454559   22630 request.go:632] Throttling request took 1.06770663s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-54-c0v6x
Feb  7 20:30:26.488: INFO: namespace : e2e-tests-nslifetest-54-c0v6x api call to delete is complete 
I0207 20:30:26.504554   22630 request.go:632] Throttling request took 1.117696763s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-33-tpbcp
Feb  7 20:30:26.538: INFO: namespace : e2e-tests-nslifetest-33-tpbcp api call to delete is complete 
I0207 20:30:26.554558   22630 request.go:632] Throttling request took 1.167700315s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-76-rxl7h
Feb  7 20:30:26.587: INFO: namespace : e2e-tests-nslifetest-76-rxl7h api call to delete is complete 
I0207 20:30:26.604538   22630 request.go:632] Throttling request took 1.217673662s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-55-qth3r
Feb  7 20:30:26.637: INFO: namespace : e2e-tests-nslifetest-55-qth3r api call to delete is complete 
I0207 20:30:26.654554   22630 request.go:632] Throttling request took 1.267683169s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-34-g10xp
Feb  7 20:30:26.687: INFO: namespace : e2e-tests-nslifetest-34-g10xp api call to delete is complete 
I0207 20:30:26.704545   22630 request.go:632] Throttling request took 1.317671723s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-77-8710d
Feb  7 20:30:26.737: INFO: namespace : e2e-tests-nslifetest-77-8710d api call to delete is complete 
I0207 20:30:26.754551   22630 request.go:632] Throttling request took 1.367672134s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-88-b13r1
Feb  7 20:30:26.790: INFO: namespace : e2e-tests-nslifetest-88-b13r1 api call to delete is complete 
I0207 20:30:26.804552   22630 request.go:632] Throttling request took 1.417661137s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-89-stx2l
Feb  7 20:30:26.839: INFO: namespace : e2e-tests-nslifetest-89-stx2l api call to delete is complete 
I0207 20:30:26.854544   22630 request.go:632] Throttling request took 1.467659538s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-82-d869z
Feb  7 20:30:26.887: INFO: namespace : e2e-tests-nslifetest-82-d869z api call to delete is complete 
I0207 20:30:26.904565   22630 request.go:632] Throttling request took 1.517664943s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-83-qdtqd
Feb  7 20:30:26.937: INFO: namespace : e2e-tests-nslifetest-83-qdtqd api call to delete is complete 
I0207 20:30:26.954557   22630 request.go:632] Throttling request took 1.567654895s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-9-t5gxv
Feb  7 20:30:26.987: INFO: namespace : e2e-tests-nslifetest-9-t5gxv api call to delete is complete 
I0207 20:30:27.004547   22630 request.go:632] Throttling request took 1.617645714s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-78-c59hk
Feb  7 20:30:27.038: INFO: namespace : e2e-tests-nslifetest-78-c59hk api call to delete is complete 
I0207 20:30:27.054552   22630 request.go:632] Throttling request took 1.667643436s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-84-0dzvn
Feb  7 20:30:27.087: INFO: namespace : e2e-tests-nslifetest-84-0dzvn api call to delete is complete 
I0207 20:30:27.104563   22630 request.go:632] Throttling request took 1.717642883s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-90-9xvp3
Feb  7 20:30:27.138: INFO: namespace : e2e-tests-nslifetest-90-9xvp3 api call to delete is complete 
I0207 20:30:27.154561   22630 request.go:632] Throttling request took 1.767639551s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-79-tzgx5
Feb  7 20:30:27.187: INFO: namespace : e2e-tests-nslifetest-79-tzgx5 api call to delete is complete 
I0207 20:30:27.204578   22630 request.go:632] Throttling request took 1.817652672s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-85-xb051
Feb  7 20:30:27.237: INFO: namespace : e2e-tests-nslifetest-85-xb051 api call to delete is complete 
I0207 20:30:27.254563   22630 request.go:632] Throttling request took 1.8676318s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-91-lntpv
Feb  7 20:30:27.287: INFO: namespace : e2e-tests-nslifetest-91-lntpv api call to delete is complete 
I0207 20:30:27.304558   22630 request.go:632] Throttling request took 1.917615424s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-8-ph3q3
Feb  7 20:30:27.338: INFO: namespace : e2e-tests-nslifetest-8-ph3q3 api call to delete is complete 
I0207 20:30:27.354572   22630 request.go:632] Throttling request took 1.967629671s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-86-b92fq
Feb  7 20:30:27.387: INFO: namespace : e2e-tests-nslifetest-86-b92fq api call to delete is complete 
I0207 20:30:27.404569   22630 request.go:632] Throttling request took 2.017619496s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-92-5g5m1
Feb  7 20:30:27.437: INFO: namespace : e2e-tests-nslifetest-92-5g5m1 api call to delete is complete 
I0207 20:30:27.454557   22630 request.go:632] Throttling request took 2.067604755s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-80-6g36j
Feb  7 20:30:27.487: INFO: namespace : e2e-tests-nslifetest-80-6g36j api call to delete is complete 
I0207 20:30:27.504563   22630 request.go:632] Throttling request took 2.117600401s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-87-9h8mj
Feb  7 20:30:27.537: INFO: namespace : e2e-tests-nslifetest-87-9h8mj api call to delete is complete 
I0207 20:30:27.554563   22630 request.go:632] Throttling request took 2.167607316s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-93-3b426
Feb  7 20:30:27.587: INFO: namespace : e2e-tests-nslifetest-93-3b426 api call to delete is complete 
I0207 20:30:27.604548   22630 request.go:632] Throttling request took 2.217586691s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-81-rvzcm
Feb  7 20:30:27.638: INFO: namespace : e2e-tests-nslifetest-81-rvzcm api call to delete is complete 
I0207 20:30:27.654560   22630 request.go:632] Throttling request took 2.267584361s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-94-x55wm
Feb  7 20:30:27.687: INFO: namespace : e2e-tests-nslifetest-94-x55wm api call to delete is complete 
I0207 20:30:27.704560   22630 request.go:632] Throttling request took 2.317579823s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-97-l96rc
Feb  7 20:30:27.737: INFO: namespace : e2e-tests-nslifetest-97-l96rc api call to delete is complete 
I0207 20:30:27.754586   22630 request.go:632] Throttling request took 2.367602567s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-96-drk75
Feb  7 20:30:27.789: INFO: namespace : e2e-tests-nslifetest-96-drk75 api call to delete is complete 
I0207 20:30:27.804570   22630 request.go:632] Throttling request took 2.417573715s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-98-ng0ln
Feb  7 20:30:27.838: INFO: namespace : e2e-tests-nslifetest-98-ng0ln api call to delete is complete 
I0207 20:30:27.854560   22630 request.go:632] Throttling request took 2.467556383s, request: DELETE:https://internal-api.pr104.origin-ci-int-gce.dev.rhcloud.com:8443/api/v1/namespaces/e2e-tests-nslifetest-95-m5kpt
Feb  7 20:30:27.887: INFO: namespace : e2e-tests-nslifetest-95-m5kpt api call to delete is complete 
STEP: Waiting for namespaces to vanish
Feb  7 20:30:29.983: INFO: Remaining namespaces : 100
Feb  7 20:30:31.982: INFO: Remaining namespaces : 94
Feb  7 20:30:34.008: INFO: Remaining namespaces : 88
Feb  7 20:30:35.960: INFO: Remaining namespaces : 80
Feb  7 20:30:37.956: INFO: Remaining namespaces : 74
Feb  7 20:30:39.955: INFO: Remaining namespaces : 67
Feb  7 20:30:41.952: INFO: Remaining namespaces : 59
Feb  7 20:30:43.959: INFO: Remaining namespaces : 52
Feb  7 20:30:45.954: INFO: Remaining namespaces : 44
Feb  7 20:30:47.958: INFO: Remaining namespaces : 38
Feb  7 20:30:49.949: INFO: Remaining namespaces : 30
Feb  7 20:30:51.966: INFO: Remaining namespaces : 22
Feb  7 20:30:53.925: INFO: Remaining namespaces : 16
[AfterEach] [k8s.io] Namespaces [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:30:55.921: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-namespaces-w73p4" for this suite.
Feb  7 20:31:06.875: INFO: namespace: e2e-tests-namespaces-w73p4, resource: bindings, ignored listing per whitelist
STEP: Destroying namespace "e2e-tests-nslifetest-99-8l1w9" for this suite.
Feb  7 20:31:07.277: INFO: Namespace e2e-tests-nslifetest-99-8l1w9 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-23-k1vgk" for this suite.
Feb  7 20:31:07.308: INFO: Namespace e2e-tests-nslifetest-23-k1vgk was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-48-gbdb9" for this suite.
Feb  7 20:31:07.338: INFO: Namespace e2e-tests-nslifetest-48-gbdb9 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-24-q11rv" for this suite.
Feb  7 20:31:07.369: INFO: Namespace e2e-tests-nslifetest-24-q11rv was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-0-hfwxw" for this suite.
Feb  7 20:31:07.400: INFO: Namespace e2e-tests-nslifetest-0-hfwxw was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-1-d52s2" for this suite.
Feb  7 20:31:07.431: INFO: Namespace e2e-tests-nslifetest-1-d52s2 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-25-77tts" for this suite.
Feb  7 20:31:07.461: INFO: Namespace e2e-tests-nslifetest-25-77tts was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-50-72c4m" for this suite.
Feb  7 20:31:07.493: INFO: Namespace e2e-tests-nslifetest-50-72c4m was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-26-6r7t0" for this suite.
Feb  7 20:31:07.524: INFO: Namespace e2e-tests-nslifetest-26-6r7t0 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-49-m10tg" for this suite.
Feb  7 20:31:07.555: INFO: Namespace e2e-tests-nslifetest-49-m10tg was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-2-l1lrj" for this suite.
Feb  7 20:31:07.588: INFO: Namespace e2e-tests-nslifetest-2-l1lrj was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-3-tx4gv" for this suite.
Feb  7 20:31:07.618: INFO: Namespace e2e-tests-nslifetest-3-tx4gv was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-51-rznmv" for this suite.
Feb  7 20:31:07.649: INFO: Namespace e2e-tests-nslifetest-51-rznmv was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-52-6h405" for this suite.
Feb  7 20:31:07.680: INFO: Namespace e2e-tests-nslifetest-52-6h405 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-27-qp1r1" for this suite.
Feb  7 20:31:07.711: INFO: Namespace e2e-tests-nslifetest-27-qp1r1 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-29-3s61j" for this suite.
Feb  7 20:31:07.742: INFO: Namespace e2e-tests-nslifetest-29-3s61j was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-8-ph3q3" for this suite.
Feb  7 20:31:07.773: INFO: Namespace e2e-tests-nslifetest-8-ph3q3 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-53-s497s" for this suite.
Feb  7 20:31:07.804: INFO: Namespace e2e-tests-nslifetest-53-s497s was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-28-31w58" for this suite.
Feb  7 20:31:07.835: INFO: Namespace e2e-tests-nslifetest-28-31w58 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-36-7dtjn" for this suite.
Feb  7 20:31:07.866: INFO: Namespace e2e-tests-nslifetest-36-7dtjn was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-5-8zndq" for this suite.
Feb  7 20:31:07.896: INFO: Namespace e2e-tests-nslifetest-5-8zndq was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-4-fmptn" for this suite.
Feb  7 20:31:07.927: INFO: Namespace e2e-tests-nslifetest-4-fmptn was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-54-c0v6x" for this suite.
Feb  7 20:31:07.958: INFO: Namespace e2e-tests-nslifetest-54-c0v6x was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-56-bmq8n" for this suite.
Feb  7 20:31:07.989: INFO: Namespace e2e-tests-nslifetest-56-bmq8n was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-55-qth3r" for this suite.
Feb  7 20:31:08.020: INFO: Namespace e2e-tests-nslifetest-55-qth3r was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-6-bhq6v" for this suite.
Feb  7 20:31:08.050: INFO: Namespace e2e-tests-nslifetest-6-bhq6v was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-30-vlcgd" for this suite.
Feb  7 20:31:08.080: INFO: Namespace e2e-tests-nslifetest-30-vlcgd was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-31-82gz8" for this suite.
Feb  7 20:31:08.111: INFO: Namespace e2e-tests-nslifetest-31-82gz8 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-7-4mg8b" for this suite.
Feb  7 20:31:08.144: INFO: Namespace e2e-tests-nslifetest-7-4mg8b was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-57-ws685" for this suite.
Feb  7 20:31:08.174: INFO: Namespace e2e-tests-nslifetest-57-ws685 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-58-n8pq8" for this suite.
Feb  7 20:31:08.205: INFO: Namespace e2e-tests-nslifetest-58-n8pq8 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-40-pw2w5" for this suite.
Feb  7 20:31:08.235: INFO: Namespace e2e-tests-nslifetest-40-pw2w5 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-14-t7v38" for this suite.
Feb  7 20:31:08.266: INFO: Namespace e2e-tests-nslifetest-14-t7v38 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-63-nh3nh" for this suite.
Feb  7 20:31:08.297: INFO: Namespace e2e-tests-nslifetest-63-nh3nh was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-61-7t9lt" for this suite.
Feb  7 20:31:08.327: INFO: Namespace e2e-tests-nslifetest-61-7t9lt was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-59-28155" for this suite.
Feb  7 20:31:08.358: INFO: Namespace e2e-tests-nslifetest-59-28155 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-39-6tb11" for this suite.
Feb  7 20:31:08.395: INFO: Namespace e2e-tests-nslifetest-39-6tb11 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-9-t5gxv" for this suite.
Feb  7 20:31:08.426: INFO: Namespace e2e-tests-nslifetest-9-t5gxv was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-41-21sr6" for this suite.
Feb  7 20:31:08.456: INFO: Namespace e2e-tests-nslifetest-41-21sr6 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-42-mv9dk" for this suite.
Feb  7 20:31:08.487: INFO: Namespace e2e-tests-nslifetest-42-mv9dk was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-43-xrj3w" for this suite.
Feb  7 20:31:08.518: INFO: Namespace e2e-tests-nslifetest-43-xrj3w was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-32-cp7sv" for this suite.
Feb  7 20:31:08.548: INFO: Namespace e2e-tests-nslifetest-32-cp7sv was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-13-wnt33" for this suite.
Feb  7 20:31:08.579: INFO: Namespace e2e-tests-nslifetest-13-wnt33 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-10-1m3zd" for this suite.
Feb  7 20:31:08.617: INFO: Namespace e2e-tests-nslifetest-10-1m3zd was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-60-rgzn4" for this suite.
Feb  7 20:31:08.648: INFO: Namespace e2e-tests-nslifetest-60-rgzn4 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-35-kdvc9" for this suite.
Feb  7 20:31:08.679: INFO: Namespace e2e-tests-nslifetest-35-kdvc9 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-62-b4h3p" for this suite.
Feb  7 20:31:08.709: INFO: Namespace e2e-tests-nslifetest-62-b4h3p was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-33-tpbcp" for this suite.
Feb  7 20:31:08.740: INFO: Namespace e2e-tests-nslifetest-33-tpbcp was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-11-r48zx" for this suite.
Feb  7 20:31:08.771: INFO: Namespace e2e-tests-nslifetest-11-r48zx was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-64-v0815" for this suite.
Feb  7 20:31:08.802: INFO: Namespace e2e-tests-nslifetest-64-v0815 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-34-g10xp" for this suite.
Feb  7 20:31:08.833: INFO: Namespace e2e-tests-nslifetest-34-g10xp was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-44-50l19" for this suite.
Feb  7 20:31:08.863: INFO: Namespace e2e-tests-nslifetest-44-50l19 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-37-1vkq8" for this suite.
Feb  7 20:31:08.894: INFO: Namespace e2e-tests-nslifetest-37-1vkq8 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-12-40891" for this suite.
Feb  7 20:31:08.925: INFO: Namespace e2e-tests-nslifetest-12-40891 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-47-4k11m" for this suite.
Feb  7 20:31:08.956: INFO: Namespace e2e-tests-nslifetest-47-4k11m was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-15-ggfvk" for this suite.
Feb  7 20:31:08.986: INFO: Namespace e2e-tests-nslifetest-15-ggfvk was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-45-td152" for this suite.
Feb  7 20:31:09.018: INFO: Namespace e2e-tests-nslifetest-45-td152 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-65-k6141" for this suite.
Feb  7 20:31:09.049: INFO: Namespace e2e-tests-nslifetest-65-k6141 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-46-tss49" for this suite.
Feb  7 20:31:09.080: INFO: Namespace e2e-tests-nslifetest-46-tss49 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-38-dxtfd" for this suite.
Feb  7 20:31:09.111: INFO: Namespace e2e-tests-nslifetest-38-dxtfd was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-19-02msh" for this suite.
Feb  7 20:31:09.142: INFO: Namespace e2e-tests-nslifetest-19-02msh was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-20-q60z0" for this suite.
Feb  7 20:31:09.172: INFO: Namespace e2e-tests-nslifetest-20-q60z0 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-66-72hf9" for this suite.
Feb  7 20:31:09.203: INFO: Namespace e2e-tests-nslifetest-66-72hf9 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-17-v0lqq" for this suite.
Feb  7 20:31:09.234: INFO: Namespace e2e-tests-nslifetest-17-v0lqq was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-21-twb9h" for this suite.
Feb  7 20:31:09.264: INFO: Namespace e2e-tests-nslifetest-21-twb9h was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-67-4l1t6" for this suite.
Feb  7 20:31:09.295: INFO: Namespace e2e-tests-nslifetest-67-4l1t6 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-68-zc8x1" for this suite.
Feb  7 20:31:09.327: INFO: Namespace e2e-tests-nslifetest-68-zc8x1 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-88-b13r1" for this suite.
Feb  7 20:31:09.358: INFO: Namespace e2e-tests-nslifetest-88-b13r1 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-18-8x2kv" for this suite.
Feb  7 20:31:09.389: INFO: Namespace e2e-tests-nslifetest-18-8x2kv was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-69-1rcx9" for this suite.
Feb  7 20:31:09.420: INFO: Namespace e2e-tests-nslifetest-69-1rcx9 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-16-x3fd0" for this suite.
Feb  7 20:31:09.450: INFO: Namespace e2e-tests-nslifetest-16-x3fd0 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-83-qdtqd" for this suite.
Feb  7 20:31:09.481: INFO: Namespace e2e-tests-nslifetest-83-qdtqd was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-85-xb051" for this suite.
Feb  7 20:31:09.512: INFO: Namespace e2e-tests-nslifetest-85-xb051 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-22-03fs0" for this suite.
Feb  7 20:31:09.543: INFO: Namespace e2e-tests-nslifetest-22-03fs0 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-84-0dzvn" for this suite.
Feb  7 20:31:09.573: INFO: Namespace e2e-tests-nslifetest-84-0dzvn was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-86-b92fq" for this suite.
Feb  7 20:31:09.604: INFO: Namespace e2e-tests-nslifetest-86-b92fq was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-92-5g5m1" for this suite.
Feb  7 20:31:09.635: INFO: Namespace e2e-tests-nslifetest-92-5g5m1 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-70-gvrcj" for this suite.
Feb  7 20:31:09.665: INFO: Namespace e2e-tests-nslifetest-70-gvrcj was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-95-m5kpt" for this suite.
Feb  7 20:31:09.696: INFO: Namespace e2e-tests-nslifetest-95-m5kpt was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-91-lntpv" for this suite.
Feb  7 20:31:09.727: INFO: Namespace e2e-tests-nslifetest-91-lntpv was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-93-3b426" for this suite.
Feb  7 20:31:09.758: INFO: Namespace e2e-tests-nslifetest-93-3b426 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-89-stx2l" for this suite.
Feb  7 20:31:09.789: INFO: Namespace e2e-tests-nslifetest-89-stx2l was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-94-x55wm" for this suite.
Feb  7 20:31:09.819: INFO: Namespace e2e-tests-nslifetest-94-x55wm was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-96-drk75" for this suite.
Feb  7 20:31:09.855: INFO: Namespace e2e-tests-nslifetest-96-drk75 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-72-65xjp" for this suite.
Feb  7 20:31:09.886: INFO: Namespace e2e-tests-nslifetest-72-65xjp was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-90-9xvp3" for this suite.
Feb  7 20:31:09.919: INFO: Namespace e2e-tests-nslifetest-90-9xvp3 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-97-l96rc" for this suite.
Feb  7 20:31:09.950: INFO: Namespace e2e-tests-nslifetest-97-l96rc was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-77-8710d" for this suite.
Feb  7 20:31:09.980: INFO: Namespace e2e-tests-nslifetest-77-8710d was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-78-c59hk" for this suite.
Feb  7 20:31:10.011: INFO: Namespace e2e-tests-nslifetest-78-c59hk was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-74-xjwm9" for this suite.
Feb  7 20:31:10.042: INFO: Namespace e2e-tests-nslifetest-74-xjwm9 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-75-rxmqb" for this suite.
Feb  7 20:31:10.072: INFO: Namespace e2e-tests-nslifetest-75-rxmqb was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-79-tzgx5" for this suite.
Feb  7 20:31:10.103: INFO: Namespace e2e-tests-nslifetest-79-tzgx5 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-71-kzpg0" for this suite.
Feb  7 20:31:10.134: INFO: Namespace e2e-tests-nslifetest-71-kzpg0 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-73-wg3x9" for this suite.
Feb  7 20:31:10.164: INFO: Namespace e2e-tests-nslifetest-73-wg3x9 was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-87-9h8mj" for this suite.
Feb  7 20:31:10.195: INFO: Namespace e2e-tests-nslifetest-87-9h8mj was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-76-rxl7h" for this suite.
Feb  7 20:31:10.226: INFO: Namespace e2e-tests-nslifetest-76-rxl7h was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-80-6g36j" for this suite.
Feb  7 20:31:10.257: INFO: Namespace e2e-tests-nslifetest-80-6g36j was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-98-ng0ln" for this suite.
Feb  7 20:31:10.288: INFO: Namespace e2e-tests-nslifetest-98-ng0ln was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-81-rvzcm" for this suite.
Feb  7 20:31:10.318: INFO: Namespace e2e-tests-nslifetest-81-rvzcm was already deleted
STEP: Destroying namespace "e2e-tests-nslifetest-82-d869z" for this suite.
Feb  7 20:31:10.349: INFO: Namespace e2e-tests-nslifetest-82-d869z was already deleted

• [SLOW TEST:69.696 seconds]
[k8s.io] Namespaces [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  should delete fast enough (90 percent of 100 namespaces in 150 seconds)
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/namespace.go:222
------------------------------
SSSSSSSSSSSSSS
------------------------------
[k8s.io] Daemon set [Serial] 
  should run and stop simple daemon
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:148
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:31:10.349: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:31:10.545: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:89
[It] should run and stop simple daemon
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:148
Feb  7 20:31:19.065: INFO: Creating simple daemon set daemon-set
STEP: Check that daemon pods launch on every node of the cluster.
Feb  7 20:31:21.189: INFO: nodesToPodCount: map[string]int{"ci-pr104-ig-n-3z1d":1, "ci-pr104-ig-n-lffc":1, "ci-pr104-ig-m-pr2w":1, "ci-pr104-ig-n-rjnv":1}
STEP: Stop a daemon pod, check that the daemon pod is revived.
Feb  7 20:31:23.380: INFO: nodesToPodCount: map[string]int{"ci-pr104-ig-m-pr2w":1, "ci-pr104-ig-n-rjnv":1, "ci-pr104-ig-n-3z1d":1, "ci-pr104-ig-n-lffc":1}
Feb  7 20:31:23.380: INFO: Check that reaper kills all daemon pods for daemon-set
Feb  7 20:31:27.539: INFO: nodesToPodCount: map[string]int{}
[AfterEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:73
Feb  7 20:31:27.570: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"extensions/v1beta1","metadata":{"selfLink":"/apis/extensions/v1beta1/namespaces/e2e-tests-daemonsets-l2g2q/daemonsets","resourceVersion":"26731"},"items":null}

Feb  7 20:31:27.601: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-l2g2q/pods","resourceVersion":"26731"},"items":null}

[AfterEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:31:35.789: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-daemonsets-l2g2q" for this suite.
Feb  7 20:31:46.727: INFO: namespace: e2e-tests-daemonsets-l2g2q, resource: bindings, ignored listing per whitelist

• [SLOW TEST:36.746 seconds]
[k8s.io] Daemon set [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  should run and stop simple daemon
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:148
------------------------------
SSSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that a pod with an invalid NodeAffinity is rejected
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:258
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:31:47.096: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:31:47.266: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:31:47.689: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:31:47.753: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:31:47.844: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:31:47.875: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:31:47.937: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:31:47.937: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:31:47.937: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:31:48.004: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:31:48.004: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:31:48.004: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:31:48.004: INFO: 	Container router ready: true, restart count 0
Feb  7 20:31:48.004: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:31:48.004: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:31:48.004: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:31:48.068: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:31:48.132: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that a pod with an invalid NodeAffinity is rejected
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:258
STEP: Trying to launch a pod with an invalid Affinity data.
Feb  7 20:31:48.260: INFO: Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:31:58.260: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-9b5tc" for this suite.
Feb  7 20:32:09.199: INFO: namespace: e2e-tests-sched-pred-9b5tc, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:32:09.567413   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:22.471 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that a pod with an invalid NodeAffinity is rejected
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:258
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates resource limits of pods that are allowed to run [Conformance]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:214
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:32:09.568: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:32:09.888: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:32:10.291: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:32:10.354: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:32:10.445: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:32:10.476: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:32:10.536: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:32:10.536: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:32:10.537: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:32:10.600: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:32:10.600: INFO: 	Container router ready: true, restart count 0
Feb  7 20:32:10.600: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:32:10.600: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:32:10.600: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:32:10.600: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:32:10.600: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:32:10.664: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:32:10.727: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates resource limits of pods that are allowed to run [Conformance]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:214
Feb  7 20:32:10.856: INFO: Pod docker-registry-3-vgj9z requesting resource cpu=100m on Node ci-pr104-ig-m-pr2w
Feb  7 20:32:10.856: INFO: Pod registry-console-1-93942 requesting resource cpu=0m on Node ci-pr104-ig-m-pr2w
Feb  7 20:32:10.856: INFO: Pod router-1-t1x6w requesting resource cpu=100m on Node ci-pr104-ig-m-pr2w
Feb  7 20:32:10.856: INFO: Using pod capacity: 500m
Feb  7 20:32:10.856: INFO: Node: ci-pr104-ig-m-pr2w has cpu capacity: 1800m
Feb  7 20:32:10.856: INFO: Node: ci-pr104-ig-n-3z1d has cpu capacity: 2000m
Feb  7 20:32:10.856: INFO: Node: ci-pr104-ig-n-lffc has cpu capacity: 2000m
Feb  7 20:32:10.856: INFO: Node: ci-pr104-ig-n-rjnv has cpu capacity: 2000m
STEP: Starting additional 15 Pods to fully saturate the cluster CPU and trying to start another one
Feb  7 20:32:11.425: INFO: Waiting for running...
I0207 20:32:11.426044   22630 reflector.go:196] Starting reflector *api.Pod (0) from github.com/openshift/origin/vendor/k8s.io/kubernetes/test/utils/pod_store.go:52
I0207 20:32:11.426102   22630 reflector.go:234] Listing and watching *api.Pod from github.com/openshift/origin/vendor/k8s.io/kubernetes/test/utils/pod_store.go:52
Feb  7 20:32:21.492: INFO: Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:32:31.557: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-0q1nk" for this suite.
Feb  7 20:32:57.464: INFO: namespace: e2e-tests-sched-pred-0q1nk, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:32:57.831544   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:48.263 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates resource limits of pods that are allowed to run [Conformance]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:214
------------------------------
SSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that Inter-pod-Affinity is respected if not matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:461
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:32:57.831: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:32:58.022: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:32:58.448: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:32:58.513: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:32:58.604: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:32:58.635: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:32:58.697: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:32:58.697: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:32:58.697: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:32:58.759: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:32:58.759: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:32:58.759: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:32:58.759: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:32:58.759: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:32:58.759: INFO: 	Container router ready: true, restart count 0
Feb  7 20:32:58.759: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:32:58.822: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:32:58.885: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that Inter-pod-Affinity is respected if not matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:461
STEP: Trying to schedule Pod with nonempty Pod Affinity.
Feb  7 20:32:59.052: INFO: Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:33:09.084: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-bljdg" for this suite.
Feb  7 20:33:35.021: INFO: namespace: e2e-tests-sched-pred-bljdg, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:33:35.389153   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:37.557 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that Inter-pod-Affinity is respected if not matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:461
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that required NodeAffinity setting is respected if matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:373
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:33:35.390: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:33:35.510: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:33:35.915: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:33:35.979: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:33:36.070: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:33:36.101: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:33:36.162: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:33:36.162: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:33:36.162: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:33:36.225: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:33:36.225: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:33:36.225: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:33:36.225: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:33:36.225: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:33:36.225: INFO: 	Container router ready: true, restart count 0
Feb  7 20:33:36.225: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:33:36.288: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:33:36.352: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that required NodeAffinity setting is respected if matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:373
STEP: Trying to launch a pod without a label to get a node which can launch it.
STEP: Explicitly delete pod here to free the resource it takes.
STEP: Trying to apply a random label on the found node.
STEP: verifying the node has the label kubernetes.io/e2e-9da9f9ea-ed9e-11e6-8acb-0ee8138d8aac 42
STEP: Trying to relaunch the pod, now with labels.
STEP: removing the label kubernetes.io/e2e-9da9f9ea-ed9e-11e6-8acb-0ee8138d8aac off the node ci-pr104-ig-n-rjnv
STEP: verifying the node doesn't have the label kubernetes.io/e2e-9da9f9ea-ed9e-11e6-8acb-0ee8138d8aac
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:33:41.015: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-zv3dx" for this suite.
Feb  7 20:34:06.947: INFO: namespace: e2e-tests-sched-pred-zv3dx, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:34:07.315728   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:31.925 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that required NodeAffinity setting is respected if matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:373
------------------------------
SSSSS
------------------------------
[k8s.io] kubelet [k8s.io] Clean up pods on node 
  kubelet should be able to delete 10 pods per node in 1m0s.
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/kubelet.go:226
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] kubelet
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:34:07.315: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:34:07.471: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] kubelet
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/kubelet.go:165
[It] kubelet should be able to delete 10 pods per node in 1m0s.
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/kubelet.go:226
STEP: Creating a RC of 40 pods and wait until all pods of this RC are running
STEP: creating replication controller cleanup40-af408f69-ed9e-11e6-8acb-0ee8138d8aac in namespace e2e-tests-kubelet-dlz1x
I0207 20:34:08.304884   22630 runners.go:103] Created replication controller with name: cleanup40-af408f69-ed9e-11e6-8acb-0ee8138d8aac, namespace: e2e-tests-kubelet-dlz1x, replica count: 40
I0207 20:34:08.304947   22630 reflector.go:196] Starting reflector *api.Pod (0) from github.com/openshift/origin/vendor/k8s.io/kubernetes/test/utils/pod_store.go:52
I0207 20:34:08.305005   22630 reflector.go:234] Listing and watching *api.Pod from github.com/openshift/origin/vendor/k8s.io/kubernetes/test/utils/pod_store.go:52
I0207 20:34:18.305184   22630 runners.go:103] cleanup40-af408f69-ed9e-11e6-8acb-0ee8138d8aac Pods: 40 out of 40 created, 0 running, 40 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
I0207 20:34:28.305459   22630 runners.go:103] cleanup40-af408f69-ed9e-11e6-8acb-0ee8138d8aac Pods: 40 out of 40 created, 40 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
Feb  7 20:34:29.305: INFO: Checking pods on node ci-pr104-ig-n-rjnv via /runningpods endpoint
Feb  7 20:34:29.305: INFO: Checking pods on node ci-pr104-ig-m-pr2w via /runningpods endpoint
Feb  7 20:34:29.305: INFO: Checking pods on node ci-pr104-ig-n-3z1d via /runningpods endpoint
Feb  7 20:34:29.305: INFO: Checking pods on node ci-pr104-ig-n-lffc via /runningpods endpoint
Feb  7 20:34:29.444: INFO: Resource usage on node "ci-pr104-ig-n-3z1d":
container cpu(cores) memory_working_set(MB) memory_rss(MB)
"/"       0.738      997.87                 118.38
"runtime" 0.357      108.89                 96.69
"kubelet" 0.159      63.56                  62.98

Resource usage on node "ci-pr104-ig-n-lffc":
container cpu(cores) memory_working_set(MB) memory_rss(MB)
"/"       0.644      1026.55                120.72
"runtime" 0.225      106.68                 94.95
"kubelet" 0.195      65.25                  64.88

Resource usage on node "ci-pr104-ig-n-rjnv":
container cpu(cores) memory_working_set(MB) memory_rss(MB)
"/"       0.570      1027.04                116.22
"runtime" 0.461      116.21                 104.69
"kubelet" 0.173      72.85                  72.22

Resource usage on node "ci-pr104-ig-m-pr2w":
container cpu(cores) memory_working_set(MB) memory_rss(MB)
"/"       1.074      2018.09                277.94
"runtime" 0.550      119.34                 110.26
"kubelet" 0.087      66.30                  65.88

STEP: Deleting the RC
STEP: deleting replication controller cleanup40-af408f69-ed9e-11e6-8acb-0ee8138d8aac in namespace e2e-tests-kubelet-dlz1x
I0207 20:34:29.474521   22630 reflector.go:196] Starting reflector *api.Pod (0) from github.com/openshift/origin/vendor/k8s.io/kubernetes/test/utils/pod_store.go:52
I0207 20:34:29.474581   22630 reflector.go:234] Listing and watching *api.Pod from github.com/openshift/origin/vendor/k8s.io/kubernetes/test/utils/pod_store.go:52
Feb  7 20:34:31.731: INFO: Deleting RC cleanup40-af408f69-ed9e-11e6-8acb-0ee8138d8aac took: 1.2568449s
Feb  7 20:34:31.731: INFO: Terminating RC cleanup40-af408f69-ed9e-11e6-8acb-0ee8138d8aac pods took: 38.051µs
Feb  7 20:34:42.732: INFO: Checking pods on node ci-pr104-ig-n-rjnv via /runningpods endpoint
Feb  7 20:34:42.732: INFO: Checking pods on node ci-pr104-ig-m-pr2w via /runningpods endpoint
Feb  7 20:34:42.732: INFO: Checking pods on node ci-pr104-ig-n-3z1d via /runningpods endpoint
Feb  7 20:34:42.732: INFO: Checking pods on node ci-pr104-ig-n-lffc via /runningpods endpoint
Feb  7 20:34:42.822: INFO: Deleting 40 pods on 4 nodes completed in 1.090896227s after the RC was deleted
Feb  7 20:34:42.822: INFO: CPU usage of containers on node "ci-pr104-ig-m-pr2w"
:container 5th%  20th% 50th% 70th% 90th% 95th% 99th%
"/"       0.000 0.000 0.000 0.000 0.000 0.000 0.000
"runtime" 0.000 0.000 0.113 0.113 0.113 0.113 0.113
"kubelet" 0.000 0.000 0.087 0.087 0.087 0.087 0.087

CPU usage of containers on node "ci-pr104-ig-n-3z1d"
:container 5th%  20th% 50th% 70th% 90th% 95th% 99th%
"/"       0.000 0.000 0.738 0.738 0.738 0.738 0.738
"runtime" 0.000 0.000 0.357 0.357 0.357 0.357 0.357
"kubelet" 0.000 0.000 0.159 0.159 0.159 0.159 0.159

CPU usage of containers on node "ci-pr104-ig-n-lffc"
:container 5th%  20th% 50th% 70th% 90th% 95th% 99th%
"/"       0.000 0.000 0.644 0.644 0.644 0.644 0.644
"runtime" 0.000 0.000 0.225 0.225 0.225 0.225 0.225
"kubelet" 0.000 0.000 0.095 0.095 0.095 0.095 0.095

CPU usage of containers on node "ci-pr104-ig-n-rjnv"
:container 5th%  20th% 50th% 70th% 90th% 95th% 99th%
"/"       0.000 0.000 0.529 0.529 0.529 0.529 0.529
"runtime" 0.000 0.000 0.196 0.196 0.196 0.196 0.196
"kubelet" 0.000 0.000 0.130 0.130 0.130 0.130 0.130

[AfterEach] [k8s.io] kubelet
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:34:42.822: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-kubelet-dlz1x" for this suite.
Feb  7 20:34:53.737: INFO: namespace: e2e-tests-kubelet-dlz1x, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] kubelet
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/kubelet.go:173

• [SLOW TEST:47.057 seconds]
[k8s.io] kubelet
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  [k8s.io] Clean up pods on node
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
    kubelet should be able to delete 10 pods per node in 1m0s.
    /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/kubelet.go:226
------------------------------
SSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:664
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:34:54.373: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:34:54.560: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:34:54.971: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:34:55.035: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:34:55.129: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:34:55.161: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:34:55.221: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:34:55.221: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:34:55.221: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:34:55.284: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:34:55.284: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:34:55.284: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:34:55.284: INFO: 	Container router ready: true, restart count 0
Feb  7 20:34:55.284: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:34:55.284: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:34:55.284: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:34:55.346: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:34:55.407: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:664
STEP: Trying to launch a pod with a label to get a node which can launch it.
STEP: Trying to apply a label with fake az info on the found node.
STEP: verifying the node has the label e2e.inter-pod-affinity.kubernetes.io/zone e2e-az1
STEP: Trying to launch a pod that with PodAffinity & PodAntiAffinity setting as embedded JSON string in the annotation value.
STEP: removing the label e2e.inter-pod-affinity.kubernetes.io/zone off the node ci-pr104-ig-n-rjnv
STEP: verifying the node doesn't have the label e2e.inter-pod-affinity.kubernetes.io/zone
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:35:00.773: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-99fpj" for this suite.
Feb  7 20:35:26.703: INFO: namespace: e2e-tests-sched-pred-99fpj, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:35:27.072508   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:32.699 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:664
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that InterPodAffinity is respected if matching with multiple Affinities
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:615
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:35:27.080: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:35:27.251: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:35:27.663: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:35:27.727: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:35:27.817: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:35:27.848: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:35:27.908: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:35:27.908: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:35:27.908: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:35:27.971: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:35:27.971: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:35:27.971: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:35:27.971: INFO: 	Container router ready: true, restart count 0
Feb  7 20:35:27.971: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:35:27.971: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:35:27.971: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:35:28.033: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:35:28.096: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that InterPodAffinity is respected if matching with multiple Affinities
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:615
STEP: Trying to launch a pod with a label to get a node which can launch it.
STEP: Trying to apply a random label on the found node.
STEP: verifying the node has the label e2e.inter-pod-affinity.kubernetes.io/zone kubernetes-e2e
STEP: Trying to launch the pod, now with multiple pod affinities with diff LabelOperators.
STEP: removing the label e2e.inter-pod-affinity.kubernetes.io/zone off the node ci-pr104-ig-n-3z1d
STEP: verifying the node doesn't have the label e2e.inter-pod-affinity.kubernetes.io/zone
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:35:32.852: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-0rst9" for this suite.
Feb  7 20:35:58.785: INFO: namespace: e2e-tests-sched-pred-0rst9, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:35:59.151654   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:32.071 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that InterPodAffinity is respected if matching with multiple Affinities
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:615
------------------------------
SSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that taints-tolerations is respected if not matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:757
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:35:59.151: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:35:59.278: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:35:59.687: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:35:59.751: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:35:59.842: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:35:59.873: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:35:59.934: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:35:59.934: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:35:59.934: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:35:59.996: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:35:59.996: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:35:59.996: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:35:59.996: INFO: 	Container router ready: true, restart count 0
Feb  7 20:35:59.996: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:35:59.996: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:35:59.996: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:36:00.059: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:36:00.123: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that taints-tolerations is respected if not matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:757
STEP: Trying to launch a pod without a toleration to get a node which can launch it.
STEP: Explicitly delete pod here to free the resource it takes.
STEP: Trying to apply a random taint on the found node.
STEP: verifying the node has the taint kubernetes.io/e2e-taint-key-f3ae8c72-ed9e-11e6-8acb-0ee8138d8aac=testing-taint-value:NoSchedule
STEP: Trying to apply a random label on the found node.
STEP: verifying the node has the label kubernetes.io/e2e-label-key-f3bda536-ed9e-11e6-8acb-0ee8138d8aac testing-label-value
STEP: Trying to relaunch the pod, still no tolerations.
Feb  7 20:36:03.259: INFO: Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.
STEP: Removing taint off the node
STEP: removing the taint kubernetes.io/e2e-taint-key-f3ae8c72-ed9e-11e6-8acb-0ee8138d8aac=testing-taint-value:NoSchedule off the node ci-pr104-ig-n-lffc
STEP: verifying the node doesn't have the taint kubernetes.io/e2e-taint-key-f3ae8c72-ed9e-11e6-8acb-0ee8138d8aac=testing-taint-value:NoSchedule
Feb  7 20:36:13.389: INFO: Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.
STEP: removing the label kubernetes.io/e2e-label-key-f3bda536-ed9e-11e6-8acb-0ee8138d8aac off the node ci-pr104-ig-n-lffc
STEP: verifying the node doesn't have the label kubernetes.io/e2e-label-key-f3bda536-ed9e-11e6-8acb-0ee8138d8aac
STEP: removing the taint kubernetes.io/e2e-taint-key-f3ae8c72-ed9e-11e6-8acb-0ee8138d8aac=testing-taint-value:NoSchedule off the node ci-pr104-ig-n-lffc
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:36:23.545: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-n8dmk" for this suite.
Feb  7 20:36:49.453: INFO: namespace: e2e-tests-sched-pred-n8dmk, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:36:49.810628   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:50.659 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that taints-tolerations is respected if not matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:757
------------------------------
SSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that InterPodAffinity is respected if matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:504
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:36:49.810: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:36:49.905: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:36:50.321: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:36:50.385: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:36:50.473: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:36:50.503: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:36:50.562: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:36:50.562: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:36:50.562: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:36:50.625: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:36:50.625: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:36:50.625: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:36:50.625: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:36:50.625: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:36:50.625: INFO: 	Container router ready: true, restart count 0
Feb  7 20:36:50.625: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:36:50.689: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:36:50.753: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that InterPodAffinity is respected if matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:504
STEP: Trying to launch a pod with a label to get a node which can launch it.
STEP: Trying to apply a random label on the found node.
STEP: verifying the node has the label e2e.inter-pod-affinity.kubernetes.io/zone china-e2etest
STEP: Trying to launch the pod, now with podAffinity.
STEP: removing the label e2e.inter-pod-affinity.kubernetes.io/zone off the node ci-pr104-ig-n-rjnv
STEP: verifying the node doesn't have the label e2e.inter-pod-affinity.kubernetes.io/zone
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:36:55.525: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-wtbnt" for this suite.
Feb  7 20:37:21.429: INFO: namespace: e2e-tests-sched-pred-wtbnt, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:37:21.786194   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:31.975 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that InterPodAffinity is respected if matching
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:504
------------------------------
SSSSSSSSSSSS
------------------------------
[k8s.io] Daemon set [Serial] 
  should run and stop complex daemon
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:204
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:37:21.786: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:37:21.903: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:89
[It] should run and stop complex daemon
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:204
Feb  7 20:37:30.411: INFO: Creating daemon with a node selector daemon-set
STEP: Initially, daemon pods should not be running on any nodes.
Feb  7 20:37:32.474: INFO: nodesToPodCount: map[string]int{}
STEP: Change label of node, check that daemon pod is launched.
Feb  7 20:37:36.626: INFO: nodesToPodCount: map[string]int{"ci-pr104-ig-m-pr2w":1}
STEP: remove the node selector and wait for daemons to be unscheduled
Feb  7 20:37:40.749: INFO: nodesToPodCount: map[string]int{}
STEP: We should now be able to delete the daemon set.
[AfterEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:73
Feb  7 20:37:40.811: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"extensions/v1beta1","metadata":{"selfLink":"/apis/extensions/v1beta1/namespaces/e2e-tests-daemonsets-21jnp/daemonsets","resourceVersion":"28832"},"items":null}

Feb  7 20:37:40.842: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-21jnp/pods","resourceVersion":"28833"},"items":null}

[AfterEach] [k8s.io] Daemon set [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:37:49.023: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-daemonsets-21jnp" for this suite.
Feb  7 20:37:59.928: INFO: namespace: e2e-tests-daemonsets-21jnp, resource: bindings, ignored listing per whitelist

• [SLOW TEST:38.500 seconds]
[k8s.io] Daemon set [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  should run and stop complex daemon
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/daemon_set.go:204
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] SchedulerPredicates [Serial] 
  validates that NodeSelector is respected if matching [Conformance]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:289
[BeforeEach] [Top Level]
  /data/src/github.com/openshift/origin/test/extended/util/test.go:47
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:141
STEP: Creating a kubernetes client
Feb  7 20:38:00.287: INFO: >>> kubeConfig: /tmp/cluster-admin.kubeconfig

STEP: Building a namespace api object
Feb  7 20:38:00.465: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:100
Feb  7 20:38:00.998: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
Feb  7 20:38:01.066: INFO: Waiting for terminating namespaces to be deleted...
Feb  7 20:38:01.162: INFO: Waiting up to 5m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Feb  7 20:38:01.192: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller]
Feb  7 20:38:01.252: INFO: 0 / 0 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Feb  7 20:38:01.252: INFO: expected 0 pod replicas in namespace 'kube-system', 0 are Running and Ready.
Feb  7 20:38:01.252: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-m-pr2w before test
Feb  7 20:38:01.313: INFO: docker-registry-3-vgj9z from default started at 2017-02-07 19:52:38 -0500 EST (1 container statuses recorded)
Feb  7 20:38:01.313: INFO: 	Container registry ready: true, restart count 0
Feb  7 20:38:01.313: INFO: registry-console-1-93942 from default started at 2017-02-07 19:52:13 -0500 EST (1 container statuses recorded)
Feb  7 20:38:01.313: INFO: 	Container registry-console ready: true, restart count 0
Feb  7 20:38:01.313: INFO: router-1-t1x6w from default started at 2017-02-07 19:51:44 -0500 EST (1 container statuses recorded)
Feb  7 20:38:01.313: INFO: 	Container router ready: true, restart count 0
Feb  7 20:38:01.313: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-3z1d before test
Feb  7 20:38:01.373: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-lffc before test
Feb  7 20:38:01.434: INFO: 
Logging pods the kubelet thinks is on node ci-pr104-ig-n-rjnv before test
[It] validates that NodeSelector is respected if matching [Conformance]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:289
STEP: Trying to launch a pod without a label to get a node which can launch it.
STEP: Explicitly delete pod here to free the resource it takes.
STEP: Trying to apply a random label on the found node.
STEP: verifying the node has the label kubernetes.io/e2e-3bf96f30-ed9f-11e6-8acb-0ee8138d8aac 42
STEP: Trying to relaunch the pod, now with labels.
STEP: removing the label kubernetes.io/e2e-3bf96f30-ed9f-11e6-8acb-0ee8138d8aac off the node ci-pr104-ig-n-3z1d
STEP: verifying the node doesn't have the label kubernetes.io/e2e-3bf96f30-ed9f-11e6-8acb-0ee8138d8aac
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:142
Feb  7 20:38:07.620: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-sched-pred-j4mgq" for this suite.
Feb  7 20:38:33.529: INFO: namespace: e2e-tests-sched-pred-j4mgq, resource: bindings, ignored listing per whitelist
[AfterEach] [k8s.io] SchedulerPredicates [Serial]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:67
I0207 20:38:33.896432   22630 request.go:769] Error in request: resource name may not be empty

• [SLOW TEST:33.609 seconds]
[k8s.io] SchedulerPredicates [Serial]
/data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:826
  validates that NodeSelector is respected if matching [Conformance]
  /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/scheduler_predicates.go:289
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSS
Ran 22 of 694 Specs in 801.733 seconds
SUCCESS! -- 22 Passed | 0 Failed | 0 Pending | 672 Skipped Feb  7 20:38:33.902: INFO: Error running cluster/log-dump.sh: fork/exec /data/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/cluster/log-dump.sh: no such file or directory
PASS

Ginkgo ran 1 suite in 13m22.581656541s
Test Suite Passed

real	44m11.553s
user	60m42.973s
sys	3m22.830s
+ [[ branch_success == \b\r\a\n\c\h\_\s\u\c\c\e\s\s ]]
+ [[ '' != 1 ]]
+ [[ 1 == 1 ]]
+ to=openshift/origin-gce:latest
+ sudo docker tag openshift/origin-gce:latest openshift/origin-gce:latest
+ sudo docker push openshift/origin-gce:latest
The push refers to a repository [docker.io/openshift/origin-gce]
74ef162b05f6: Preparing
a88d8d803cb0: Preparing
5a25472f261c: Preparing
60021516d912: Preparing
34e7b85d83e4: Preparing
60021516d912: Mounted from openshift/origin-base
34e7b85d83e4: Mounted from openshift/origin-base
74ef162b05f6: Pushed
a88d8d803cb0: Pushed
5a25472f261c: Pushed
latest: digest: sha256:f3b146407cbd434c6851e7ef4871f733ff7c3bb5a2f4bbedc7f7913c67c77aeb size: 1377
+ exit 0
+ gather
+ hack/build-go.sh cmd/oc
++ Building go targets for linux/amd64: cmd/oc
hack/build-go.sh took 3 seconds
++ pwd
+ export PATH=/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64:/var/lib/jenkins/jobs/zz_origin_gce_image/workspace/_output/local/bin/linux/amd64:/usr/lib64/qt-3.3/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/ec2-user/.local/bin:/home/ec2-user/bin
+ PATH=/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64:/var/lib/jenkins/jobs/zz_origin_gce_image/workspace/_output/local/bin/linux/amd64:/usr/lib64/qt-3.3/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/ec2-user/.local/bin:/home/ec2-user/bin
+ oc get --raw /metrics
+ oc get nodes --template '{{ range .items }}{{ .metadata.name }} {{ end }}'
+ xargs -n1 -I '{}' bash -c 'oc get --raw /api/v1/nodes/{}/proxy/metrics > /tmp/artifacts/{}.metrics' ''
[PostBuildScript] - Execution post build scripts.
[workspace] $ /bin/bash /tmp/hudson2678755231611814597.sh
~/jobs/zz_origin_gce_image/workspace ~/jobs/zz_origin_gce_image/workspace
Activated service account credentials for: [jenkins-ci-provisioner@openshift-gce-devel.iam.gserviceaccount.com]

PLAY [Terminate running cluster and remove all supporting resources in GCE] ****

TASK [setup] *******************************************************************
Wednesday 08 February 2017  01:39:28 +0000 (0:00:00.033)       0:00:00.033 **** 
ok: [localhost]

TASK [deprovision : Templatize de-provision script] ****************************
Wednesday 08 February 2017  01:39:29 +0000 (0:00:01.199)       0:00:01.232 **** 
changed: [localhost]

TASK [deprovision : De-provision GCE resources] ********************************
Wednesday 08 February 2017  01:39:29 +0000 (0:00:00.431)       0:00:01.664 **** 
changed: [localhost]

PLAY RECAP *********************************************************************
localhost                  : ok=3    changed=2    unreachable=0    failed=0   

Wednesday 08 February 2017  01:47:00 +0000 (0:07:31.063)       0:07:32.727 **** 
=============================================================================== 
deprovision : De-provision GCE resources ------------------------------ 451.06s
setup ------------------------------------------------------------------- 1.20s
deprovision : Templatize de-provision script ---------------------------- 0.43s
/var/lib/jenkins/jobs/zz_origin_gce_image/workspace/artifacts
/var/lib/jenkins/jobs/zz_origin_gce_image/workspace/artifacts/junit
/var/lib/jenkins/jobs/zz_origin_gce_image/workspace/artifacts/junit/junit.xml
/var/lib/jenkins/jobs/zz_origin_gce_image/workspace/artifacts/ci-pr104-ig-m-pr2w
/var/lib/jenkins/jobs/zz_origin_gce_image/workspace/artifacts/master.metrics

PLAYBOOK: main.yml *************************************************************
4 plays in /var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.venv/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml

PLAY [ensure we have the parameters necessary to deprovision virtual hosts] ****

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.venv/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir)  => {
    "changed": false, 
    "item": "origin_ci_inventory_dir", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region)  => {
    "changed": false, 
    "item": "origin_ci_aws_region", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY [deprovision virtual hosts in EC2] ****************************************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

TASK [deprovision a virtual EC2 host] ******************************************
task path: /var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.venv/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:28
included: /var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.venv/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml for localhost

TASK [update the SSH configuration to remove AWS EC2 specifics] ****************
task path: /var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.venv/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:2
ok: [localhost] => {
    "changed": false, 
    "msg": ""
}

TASK [rename EC2 instance for termination reaper] ******************************
task path: /var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.venv/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:8
changed: [localhost] => {
    "changed": true, 
    "msg": "Tags {'Name': 'terminate'} created for resource i-0a950257192242858."
}

TASK [tear down the EC2 instance] **********************************************
task path: /var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.venv/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:15
changed: [localhost] => {
    "changed": true, 
    "instance_ids": [
        "i-0a950257192242858"
    ], 
    "instances": [
        {
            "ami_launch_index": "0", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-029d090fc25af06b7"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-0c53d812de34eb942"
                }
            }, 
            "dns_name": "ec2-52-90-253-216.compute-1.amazonaws.com", 
            "ebs_optimized": false, 
            "groups": {
                "sg-7e73221a": "default"
            }, 
            "hypervisor": "xen", 
            "id": "i-0a950257192242858", 
            "image_id": "ami-f1b3a8e6", 
            "instance_type": "m4.xlarge", 
            "kernel": null, 
            "key_name": "libra", 
            "launch_time": "2017-02-08T00:27:26.000Z", 
            "placement": "us-east-1d", 
            "private_dns_name": "ip-172-18-10-225.ec2.internal", 
            "private_ip": "172.18.10.225", 
            "public_dns_name": "ec2-52-90-253-216.compute-1.amazonaws.com", 
            "public_ip": "52.90.253.216", 
            "ramdisk": null, 
            "region": "us-east-1", 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "running", 
            "state_code": 16, 
            "tags": {
                "Name": "terminate", 
                "openshift_etcd": "", 
                "openshift_master": "", 
                "openshift_node": ""
            }, 
            "tenancy": "default", 
            "virtualization_type": "hvm"
        }
    ], 
    "tagged_instances": []
}

TASK [remove the serialized host variables] ************************************
task path: /var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.venv/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:21
changed: [localhost] => {
    "changed": true, 
    "path": "/var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.10.225.yml", 
    "state": "absent"
}

PLAY [deprovision virtual hosts locally manged by Vagrant] *********************

PLAY [clean up local configuration for deprovisioned instances] ****************

TASK [remove inventory configuration directory] ********************************
task path: /var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.venv/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:61
changed: [localhost] => {
    "changed": true, 
    "path": "/var/lib/jenkins/jobs/zz_origin_gce_image/workspace/.config/origin-ci-tool/inventory", 
    "state": "absent"
}

PLAY RECAP *********************************************************************
localhost                  : ok=7    changed=4    unreachable=0    failed=0   

~/jobs/zz_origin_gce_image/workspace
Recording test results
Archiving artifacts
Finished: SUCCESS