SuccessConsole Output

Skipping 986 KB.. Full Log
 cookie=0xfb5876b8, duration=761.693s, table=90, n_packets=0, n_bytes=0, priority=100,ip,nw_dst=10.130.0.0/23 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.2->tun_dst,output:1
 cookie=0x0, duration=761.923s, table=90, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=761.919s, table=100, n_packets=0, n_bytes=0, priority=0 actions=goto_table:101
 cookie=0x0, duration=761.910s, table=101, n_packets=0, n_bytes=0, priority=51,tcp,nw_dst=172.17.0.3,tp_dst=53 actions=output:2
 cookie=0x0, duration=761.902s, table=101, n_packets=0, n_bytes=0, priority=51,udp,nw_dst=172.17.0.3,tp_dst=53 actions=output:2
 cookie=0x0, duration=761.895s, table=101, n_packets=0, n_bytes=0, priority=0 actions=output:2
 cookie=0x0, duration=761.888s, table=110, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=761.689s, table=111, n_packets=10, n_bytes=768, priority=100 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.2->tun_dst,output:1,set_field:172.17.0.4->tun_dst,output:1,goto_table:120
 cookie=0x0, duration=761.878s, table=120, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=761.873s, table=253, n_packets=0, n_bytes=0, actions=note:01.07.00.00.00.00


Apr  5 20:47:13.684: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-services2-gzdwh debugpod-sourceip-nettest-node-16qbrg -- /bin/sh -c iptables-save'
Apr  5 20:47:13.981: INFO: stderr: ""
Apr  5 20:47:13.981: INFO: DEBUG:
# Generated by iptables-save v1.4.21 on Thu Apr  5 20:47:13 2018
*nat
:PREROUTING ACCEPT [4:240]
:INPUT ACCEPT [4:240]
:OUTPUT ACCEPT [28:1824]
:POSTROUTING ACCEPT [28:1824]
:DOCKER - [0:0]
:KUBE-HOSTPORTS - [0:0]
:KUBE-MARK-DROP - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-NODEPORT-CONTAINER - [0:0]
:KUBE-NODEPORT-HOST - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-PORTALS-CONTAINER - [0:0]
:KUBE-PORTALS-HOST - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-SEP-77DLEKBM3D5CRC3D - [0:0]
:KUBE-SEP-CKTKXEMIKRIIY55M - [0:0]
:KUBE-SEP-EZ5ESXJRZ36JV4D4 - [0:0]
:KUBE-SEP-PATXOTJBHFPU4CNS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-SVC-2SDL4S5W77TQOHLU - [0:0]
:KUBE-SVC-3VQ6B3MLH7E2SZT4 - [0:0]
:KUBE-SVC-BA6I5HTZKAAAJT56 - [0:0]
:KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0]
:OPENSHIFT-MASQUERADE - [0:0]
-A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A PREROUTING -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-CONTAINER
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A PREROUTING -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-CONTAINER
-A PREROUTING -m comment --comment "kube hostport portals" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS
-A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-HOST
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-HOST
-A OUTPUT -m comment --comment "kube hostport portals" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS
-A POSTROUTING -m comment --comment "rules for masquerading OpenShift traffic" -j OPENSHIFT-MASQUERADE
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 172.19.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -s 127.0.0.0/8 -o tun0 -m comment --comment "SNAT for localhost access to hostports" -j MASQUERADE
-A DOCKER -i docker0 -j RETURN
-A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
-A KUBE-MARK-MASQ -j MARK --set-xmark 0x1/0x1
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x1/0x1 -j MASQUERADE
-A KUBE-SEP-77DLEKBM3D5CRC3D -s 10.128.0.62/32 -m comment --comment "e2e-tests-net-services1-pmlk5/service-mtrjj:" -j KUBE-MARK-MASQ
-A KUBE-SEP-77DLEKBM3D5CRC3D -p tcp -m comment --comment "e2e-tests-net-services1-pmlk5/service-mtrjj:" -m tcp -j DNAT --to-destination 10.128.0.62:8080
-A KUBE-SEP-CKTKXEMIKRIIY55M -s 172.17.0.2/32 -m comment --comment "default/kubernetes:dns" -j KUBE-MARK-MASQ
-A KUBE-SEP-CKTKXEMIKRIIY55M -p udp -m comment --comment "default/kubernetes:dns" -m recent --set --name KUBE-SEP-CKTKXEMIKRIIY55M --mask 255.255.255.255 --rsource -m udp -j DNAT --to-destination 172.17.0.2:8053
-A KUBE-SEP-EZ5ESXJRZ36JV4D4 -s 172.17.0.2/32 -m comment --comment "default/kubernetes:dns-tcp" -j KUBE-MARK-MASQ
-A KUBE-SEP-EZ5ESXJRZ36JV4D4 -p tcp -m comment --comment "default/kubernetes:dns-tcp" -m recent --set --name KUBE-SEP-EZ5ESXJRZ36JV4D4 --mask 255.255.255.255 --rsource -m tcp -j DNAT --to-destination 172.17.0.2:8053
-A KUBE-SEP-PATXOTJBHFPU4CNS -s 172.17.0.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ
-A KUBE-SEP-PATXOTJBHFPU4CNS -p tcp -m comment --comment "default/kubernetes:https" -m recent --set --name KUBE-SEP-PATXOTJBHFPU4CNS --mask 255.255.255.255 --rsource -m tcp -j DNAT --to-destination 172.17.0.2:8443
-A KUBE-SERVICES -d 172.30.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y
-A KUBE-SERVICES -d 172.30.0.1/32 -p udp -m comment --comment "default/kubernetes:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-3VQ6B3MLH7E2SZT4
-A KUBE-SERVICES -d 172.30.0.1/32 -p tcp -m comment --comment "default/kubernetes:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-BA6I5HTZKAAAJT56
-A KUBE-SERVICES -d 172.30.7.22/32 -p tcp -m comment --comment "e2e-tests-net-services1-pmlk5/service-mtrjj: cluster IP" -m tcp --dport 8080 -j KUBE-SVC-2SDL4S5W77TQOHLU
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-SVC-2SDL4S5W77TQOHLU -m comment --comment "e2e-tests-net-services1-pmlk5/service-mtrjj:" -j KUBE-SEP-77DLEKBM3D5CRC3D
-A KUBE-SVC-3VQ6B3MLH7E2SZT4 -m comment --comment "default/kubernetes:dns" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-CKTKXEMIKRIIY55M --mask 255.255.255.255 --rsource -j KUBE-SEP-CKTKXEMIKRIIY55M
-A KUBE-SVC-3VQ6B3MLH7E2SZT4 -m comment --comment "default/kubernetes:dns" -j KUBE-SEP-CKTKXEMIKRIIY55M
-A KUBE-SVC-BA6I5HTZKAAAJT56 -m comment --comment "default/kubernetes:dns-tcp" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-EZ5ESXJRZ36JV4D4 --mask 255.255.255.255 --rsource -j KUBE-SEP-EZ5ESXJRZ36JV4D4
-A KUBE-SVC-BA6I5HTZKAAAJT56 -m comment --comment "default/kubernetes:dns-tcp" -j KUBE-SEP-EZ5ESXJRZ36JV4D4
-A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-PATXOTJBHFPU4CNS --mask 255.255.255.255 --rsource -j KUBE-SEP-PATXOTJBHFPU4CNS
-A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https" -j KUBE-SEP-PATXOTJBHFPU4CNS
-A OPENSHIFT-MASQUERADE -s 10.128.0.0/14 -m comment --comment "masquerade pod-to-service and pod-to-external traffic" -j MASQUERADE
COMMIT
# Completed on Thu Apr  5 20:47:13 2018
# Generated by iptables-save v1.4.21 on Thu Apr  5 20:47:13 2018
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [5225:267934]
:DOCKER - [0:0]
:DOCKER-ISOLATION - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FIREWALL - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORT-NON-LOCAL - [0:0]
:KUBE-SERVICES - [0:0]
:OPENSHIFT-ADMIN-OUTPUT-RULES - [0:0]
:OPENSHIFT-FIREWALL-ALLOW - [0:0]
:OPENSHIFT-FIREWALL-FORWARD - [0:0]
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES
-A INPUT -m comment --comment "Ensure that non-local NodePort traffic can flow" -j KUBE-NODEPORT-NON-LOCAL
-A INPUT -m comment --comment "firewall overrides" -j OPENSHIFT-FIREWALL-ALLOW
-A INPUT -j KUBE-FIREWALL
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 10250 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 1936 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD
-A FORWARD -i tun0 ! -o tun0 -m comment --comment "administrator overrides" -j OPENSHIFT-ADMIN-OUTPUT-RULES
-A FORWARD -m comment --comment "firewall overrides" -j OPENSHIFT-FIREWALL-FORWARD
-A FORWARD -j DOCKER-ISOLATION
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
-A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -j KUBE-FIREWALL
-A DOCKER-ISOLATION -j RETURN
-A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x1/0x1 -j ACCEPT
-A OPENSHIFT-FIREWALL-ALLOW -p udp -m udp --dport 4789 -m comment --comment "VXLAN incoming" -j ACCEPT
-A OPENSHIFT-FIREWALL-ALLOW -i tun0 -m comment --comment "from SDN to localhost" -j ACCEPT
-A OPENSHIFT-FIREWALL-ALLOW -i docker0 -m comment --comment "from docker to localhost" -j ACCEPT
-A OPENSHIFT-FIREWALL-FORWARD -s 10.128.0.0/14 -m comment --comment "attempted resend after connection close" -m conntrack --ctstate INVALID -j DROP
-A OPENSHIFT-FIREWALL-FORWARD -d 10.128.0.0/14 -m comment --comment "forward traffic from SDN" -j ACCEPT
-A OPENSHIFT-FIREWALL-FORWARD -s 10.128.0.0/14 -m comment --comment "forward traffic to SDN" -j ACCEPT
COMMIT
# Completed on Thu Apr  5 20:47:13 2018


Apr  5 20:47:13.981: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-services2-gzdwh debugpod-sourceip-nettest-node-16qbrg -- /bin/sh -c ss -ant'
Apr  5 20:47:14.288: INFO: stderr: ""
Apr  5 20:47:14.288: INFO: DEBUG:
State      Recv-Q Send-Q Local Address:Port               Peer Address:Port              
LISTEN     0      128          *:22                       *:*                  
TIME-WAIT  0      0      172.17.0.3:39682              52.216.229.83:443                
ESTAB      0      0      172.17.0.3:51576              172.17.0.2:8443               
TIME-WAIT  0      0      172.17.0.3:39680              52.216.229.83:443                
LISTEN     0      128         :::10256                   :::*                  
LISTEN     0      128         :::22                      :::*                  
LISTEN     0      128         :::10250                   :::*                  
ESTAB      0      0        ::ffff:172.17.0.3:10250                ::ffff:172.17.0.2:52316              
ESTAB      0      0        ::ffff:172.17.0.3:10250                ::ffff:172.17.0.2:52686              


Apr  5 20:47:14.293: INFO: Cleaning up the exec pod
[AfterEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:47:14.320: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-services1-pmlk5" for this suite.
Apr  5 20:47:20.334: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:47:20.415: INFO: namespace: e2e-tests-net-services1-pmlk5, resource: bindings, ignored listing per whitelist
Apr  5 20:47:20.439: INFO: namespace e2e-tests-net-services1-pmlk5 deletion completed in 6.115284974s
[AfterEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:47:20.439: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-services2-gzdwh" for this suite.
Apr  5 20:47:26.447: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:47:26.534: INFO: namespace: e2e-tests-net-services2-gzdwh, resource: bindings, ignored listing per whitelist
Apr  5 20:47:26.552: INFO: namespace e2e-tests-net-services2-gzdwh deletion completed in 6.111500027s

• [SLOW TEST:64.818 seconds]
[Area:Networking] services
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:10
  when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:415
    should prevent connections to pods in different namespaces on the same node via service IPs [Suite:openshift/conformance/parallel]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:40
------------------------------
S
------------------------------
[Area:Networking] network isolation when using a plugin that isolates namespaces by default 
  should prevent communication between pods in different namespaces on the same node [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:28
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:416
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:47:26.552: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:47:26.631: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
STEP: Waiting for a default service account to be provisioned in namespace
[It] should prevent communication between pods in different namespaces on the same node [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:28
Apr  5 20:47:26.743: INFO: Using nettest-node-1 for test ([nettest-node-1 nettest-node-2] out of [nettest-node-1 nettest-node-2])
Apr  5 20:47:30.757: INFO: Target pod IP:port is 10.128.0.64:8080
Apr  5 20:47:30.757: INFO: Creating an exec pod on node nettest-node-1
Apr  5 20:47:30.757: INFO: Creating new exec pod
Apr  5 20:47:34.769: INFO: Waiting up to 10s to wget 10.128.0.64:8080
Apr  5 20:47:34.770: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-wm7tf execpod-sourceip-nettest-node-1z8zpw -- /bin/sh -c wget -T 30 -qO- 10.128.0.64:8080'
Apr  5 20:48:05.071: INFO: rc: 127
Apr  5 20:48:05.072: INFO: got err: error running &{/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl [kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-wm7tf execpod-sourceip-nettest-node-1z8zpw -- /bin/sh -c wget -T 30 -qO- 10.128.0.64:8080] []  <nil>  wget: download timed out
command terminated with exit code 1
 [] <nil> 0xc421414ea0 exit status 1 <nil> <nil> true [0xc420df30c0 0xc420df30e8 0xc420df3110] [0xc420df30c0 0xc420df30e8 0xc420df3110] [0xc420df30d8 0xc420df3108] [0x989690 0x989690] 0xc4219cce40 <nil>}:
Command stdout:

stderr:
wget: download timed out
command terminated with exit code 1

error:
exit status 1
, retry until timeout
Apr  5 20:48:05.072: INFO: Creating new exec pod
Apr  5 20:48:07.088: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-wm7tf debugpod-sourceip-nettest-node-1f6lsg -- /bin/sh -c ovs-ofctl -O OpenFlow13 dump-flows br0'
Apr  5 20:48:07.375: INFO: stderr: ""
Apr  5 20:48:07.375: INFO: DEBUG:
OFPST_FLOW reply (OF1.3) (xid=0x2):
 cookie=0x0, duration=815.824s, table=0, n_packets=0, n_bytes=0, priority=250,ip,in_port=2,nw_dst=224.0.0.0/4 actions=drop
 cookie=0x0, duration=815.841s, table=0, n_packets=16, n_bytes=672, priority=200,arp,in_port=1,arp_spa=10.128.0.0/14,arp_tpa=10.128.0.0/23 actions=move:NXM_NX_TUN_ID[0..31]->NXM_NX_REG0[],goto_table:10
 cookie=0x0, duration=815.837s, table=0, n_packets=582, n_bytes=67551, priority=200,ip,in_port=1,nw_src=10.128.0.0/14 actions=move:NXM_NX_TUN_ID[0..31]->NXM_NX_REG0[],goto_table:10
 cookie=0x0, duration=815.833s, table=0, n_packets=0, n_bytes=0, priority=200,ip,in_port=1,nw_dst=10.128.0.0/14 actions=move:NXM_NX_TUN_ID[0..31]->NXM_NX_REG0[],goto_table:10
 cookie=0x0, duration=815.818s, table=0, n_packets=18, n_bytes=756, priority=200,arp,in_port=2,arp_spa=10.128.0.1,arp_tpa=10.128.0.0/14 actions=goto_table:30
 cookie=0x0, duration=815.812s, table=0, n_packets=186, n_bytes=52052, priority=200,ip,in_port=2 actions=goto_table:30
 cookie=0x0, duration=815.828s, table=0, n_packets=0, n_bytes=0, priority=150,in_port=1 actions=drop
 cookie=0x0, duration=815.800s, table=0, n_packets=8, n_bytes=648, priority=150,in_port=2 actions=drop
 cookie=0x0, duration=815.791s, table=0, n_packets=42, n_bytes=1764, priority=100,arp actions=goto_table:20
 cookie=0x0, duration=815.785s, table=0, n_packets=618, n_bytes=78343, priority=100,ip actions=goto_table:20
 cookie=0x0, duration=815.778s, table=0, n_packets=357, n_bytes=29154, priority=0 actions=drop
 cookie=0xfaa865db, duration=815.411s, table=10, n_packets=598, n_bytes=68223, priority=100,tun_src=172.17.0.4 actions=goto_table:30
 cookie=0xfb5876b8, duration=815.391s, table=10, n_packets=0, n_bytes=0, priority=100,tun_src=172.17.0.2 actions=goto_table:30
 cookie=0x0, duration=815.770s, table=10, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=39.432s, table=20, n_packets=1, n_bytes=42, priority=100,arp,in_port=65,arp_spa=10.128.0.64,arp_sha=00:00:0a:80:00:40/00:00:ff:ff:ff:ff actions=load:0xb218c3->NXM_NX_REG0[],goto_table:21
 cookie=0x0, duration=36.028s, table=20, n_packets=1, n_bytes=42, priority=100,arp,in_port=66,arp_spa=10.128.0.65,arp_sha=00:00:0a:80:00:41/00:00:ff:ff:ff:ff actions=load:0xa673dd->NXM_NX_REG0[],goto_table:21
 cookie=0x0, duration=39.428s, table=20, n_packets=0, n_bytes=0, priority=100,ip,in_port=65,nw_src=10.128.0.64 actions=load:0xb218c3->NXM_NX_REG0[],goto_table:21
 cookie=0x0, duration=36.024s, table=20, n_packets=5, n_bytes=370, priority=100,ip,in_port=66,nw_src=10.128.0.65 actions=load:0xa673dd->NXM_NX_REG0[],goto_table:21
 cookie=0x0, duration=815.762s, table=20, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=815.749s, table=21, n_packets=660, n_bytes=80107, priority=0 actions=goto_table:30
 cookie=0x0, duration=815.740s, table=30, n_packets=12, n_bytes=504, priority=300,arp,arp_tpa=10.128.0.1 actions=output:2
 cookie=0x0, duration=815.714s, table=30, n_packets=47, n_bytes=4369, priority=300,ip,nw_dst=10.128.0.1 actions=output:2
 cookie=0x0, duration=815.731s, table=30, n_packets=48, n_bytes=2016, priority=200,arp,arp_tpa=10.128.0.0/23 actions=goto_table:40
 cookie=0x0, duration=815.699s, table=30, n_packets=814, n_bytes=124181, priority=200,ip,nw_dst=10.128.0.0/23 actions=goto_table:70
 cookie=0x0, duration=815.718s, table=30, n_packets=16, n_bytes=672, priority=100,arp,arp_tpa=10.128.0.0/14 actions=goto_table:50
 cookie=0x0, duration=815.692s, table=30, n_packets=364, n_bytes=46550, priority=100,ip,nw_dst=10.128.0.0/14 actions=goto_table:90
 cookie=0x0, duration=815.705s, table=30, n_packets=146, n_bytes=21694, priority=100,ip,nw_dst=172.30.0.0/16 actions=goto_table:60
 cookie=0x0, duration=815.684s, table=30, n_packets=5, n_bytes=384, priority=50,ip,in_port=1,nw_dst=224.0.0.0/4 actions=goto_table:120
 cookie=0x0, duration=815.676s, table=30, n_packets=10, n_bytes=768, priority=25,ip,nw_dst=224.0.0.0/4 actions=goto_table:110
 cookie=0x0, duration=815.670s, table=30, n_packets=0, n_bytes=0, priority=0,ip actions=goto_table:100
 cookie=0x0, duration=815.666s, table=30, n_packets=0, n_bytes=0, priority=0,arp actions=drop
 cookie=0x0, duration=39.425s, table=40, n_packets=1, n_bytes=42, priority=100,arp,arp_tpa=10.128.0.64 actions=output:65
 cookie=0x0, duration=36.020s, table=40, n_packets=1, n_bytes=42, priority=100,arp,arp_tpa=10.128.0.65 actions=output:66
 cookie=0x0, duration=815.661s, table=40, n_packets=6, n_bytes=252, priority=0 actions=drop
 cookie=0xfaa865db, duration=815.407s, table=50, n_packets=16, n_bytes=672, priority=100,arp,arp_tpa=10.129.0.0/23 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.4->tun_dst,output:1
 cookie=0xfb5876b8, duration=815.388s, table=50, n_packets=0, n_bytes=0, priority=100,arp,arp_tpa=10.130.0.0/23 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.2->tun_dst,output:1
 cookie=0x0, duration=815.651s, table=50, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=815.643s, table=60, n_packets=5, n_bytes=419, priority=200,reg0=0 actions=output:2
 cookie=0x0, duration=815.414s, table=60, n_packets=0, n_bytes=0, priority=100,ip,nw_dst=172.30.0.1,nw_frag=later actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=815.410s, table=60, n_packets=131, n_bytes=20485, priority=100,tcp,nw_dst=172.30.0.1,tp_dst=443 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=815.405s, table=60, n_packets=0, n_bytes=0, priority=100,udp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=815.397s, table=60, n_packets=0, n_bytes=0, priority=100,tcp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=815.632s, table=60, n_packets=4, n_bytes=346, priority=0 actions=drop
 cookie=0x0, duration=39.418s, table=70, n_packets=5, n_bytes=370, priority=100,ip,nw_dst=10.128.0.64 actions=load:0xb218c3->NXM_NX_REG1[],load:0x41->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=36.018s, table=70, n_packets=0, n_bytes=0, priority=100,ip,nw_dst=10.128.0.65 actions=load:0xa673dd->NXM_NX_REG1[],load:0x42->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=815.626s, table=70, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=815.622s, table=80, n_packets=48, n_bytes=4207, priority=300,ip,nw_src=10.128.0.1 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=815.550s, table=80, n_packets=197, n_bytes=52941, priority=200,reg0=0 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=815.545s, table=80, n_packets=142, n_bytes=21452, priority=200,reg1=0 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=805.966s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x815ec7,reg1=0x815ec7 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=805.865s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x64c0eb,reg1=0x64c0eb actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=585.259s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x672905,reg1=0x672905 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=585.156s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x7c1b53,reg1=0x7c1b53 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=562.482s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x99b8bd,reg1=0x99b8bd actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=562.382s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x53f6d9,reg1=0x53f6d9 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=530.639s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x3fcd53,reg1=0x3fcd53 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=524.413s, table=80, n_packets=497, n_bytes=59170, priority=100,reg0=0x74e5be,reg1=0x74e5be actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=507.780s, table=80, n_packets=8, n_bytes=939, priority=100,reg0=0x576047,reg1=0x576047 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=455.051s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xceaaf,reg1=0xceaaf actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=454.964s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x608c4b,reg1=0x608c4b actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=432.211s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xa6a87b,reg1=0xa6a87b actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=412.602s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x1a123d,reg1=0x1a123d actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=412.526s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xb56ceb,reg1=0xb56ceb actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=384.794s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x50ae27,reg1=0x50ae27 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=384.698s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x6eb4eb,reg1=0x6eb4eb actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=358.982s, table=80, n_packets=14, n_bytes=1309, priority=100,reg0=0x6d89c7,reg1=0x6d89c7 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=310.510s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xa0447d,reg1=0xa0447d actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=300.217s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xf1bdb3,reg1=0xf1bdb3 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=300.124s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xee6f88,reg1=0xee6f88 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=279.461s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xd10031,reg1=0xd10031 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=230.984s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xc2138e,reg1=0xc2138e actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=212.367s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x1ca9c5,reg1=0x1ca9c5 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=206.106s, table=80, n_packets=20, n_bytes=2096, priority=100,reg0=0x6b4fd3,reg1=0x6b4fd3 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=162.296s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xfc881e,reg1=0xfc881e actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=156.075s, table=80, n_packets=15, n_bytes=2256, priority=100,reg0=0x17488a,reg1=0x17488a actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=136.829s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x5aa8fe,reg1=0x5aa8fe actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=130.586s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x6648c9,reg1=0x6648c9 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=124.296s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x515a0c,reg1=0x515a0c actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=118.040s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x33855f,reg1=0x33855f actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=111.824s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x7dd785,reg1=0x7dd785 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=105.606s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x29803e,reg1=0x29803e actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=105.454s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x96439b,reg1=0x96439b actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=40.787s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xb218c3,reg1=0xb218c3 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=40.698s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xa673dd,reg1=0xa673dd actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=815.618s, table=80, n_packets=10, n_bytes=740, priority=0 actions=drop
 cookie=0xfaa865db, duration=815.401s, table=90, n_packets=364, n_bytes=46550, priority=100,ip,nw_dst=10.129.0.0/23 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.4->tun_dst,output:1
 cookie=0xfb5876b8, duration=815.385s, table=90, n_packets=0, n_bytes=0, priority=100,ip,nw_dst=10.130.0.0/23 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.2->tun_dst,output:1
 cookie=0x0, duration=815.615s, table=90, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=815.611s, table=100, n_packets=0, n_bytes=0, priority=0 actions=goto_table:101
 cookie=0x0, duration=815.602s, table=101, n_packets=0, n_bytes=0, priority=51,tcp,nw_dst=172.17.0.3,tp_dst=53 actions=output:2
 cookie=0x0, duration=815.594s, table=101, n_packets=0, n_bytes=0, priority=51,udp,nw_dst=172.17.0.3,tp_dst=53 actions=output:2
 cookie=0x0, duration=815.587s, table=101, n_packets=0, n_bytes=0, priority=0 actions=output:2
 cookie=0x0, duration=815.580s, table=110, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=815.381s, table=111, n_packets=10, n_bytes=768, priority=100 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.2->tun_dst,output:1,set_field:172.17.0.4->tun_dst,output:1,goto_table:120
 cookie=0x0, duration=815.570s, table=120, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=815.565s, table=253, n_packets=0, n_bytes=0, actions=note:01.07.00.00.00.00


Apr  5 20:48:07.375: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-wm7tf debugpod-sourceip-nettest-node-1f6lsg -- /bin/sh -c iptables-save'
Apr  5 20:48:07.669: INFO: stderr: ""
Apr  5 20:48:07.669: INFO: DEBUG:
# Generated by iptables-save v1.4.21 on Thu Apr  5 20:48:07 2018
*nat
:PREROUTING ACCEPT [4:240]
:INPUT ACCEPT [4:240]
:OUTPUT ACCEPT [18:1128]
:POSTROUTING ACCEPT [18:1128]
:DOCKER - [0:0]
:KUBE-HOSTPORTS - [0:0]
:KUBE-MARK-DROP - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-NODEPORT-CONTAINER - [0:0]
:KUBE-NODEPORT-HOST - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-PORTALS-CONTAINER - [0:0]
:KUBE-PORTALS-HOST - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-SEP-CKTKXEMIKRIIY55M - [0:0]
:KUBE-SEP-EZ5ESXJRZ36JV4D4 - [0:0]
:KUBE-SEP-PATXOTJBHFPU4CNS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-SVC-3VQ6B3MLH7E2SZT4 - [0:0]
:KUBE-SVC-BA6I5HTZKAAAJT56 - [0:0]
:KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0]
:OPENSHIFT-MASQUERADE - [0:0]
-A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A PREROUTING -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-CONTAINER
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A PREROUTING -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-CONTAINER
-A PREROUTING -m comment --comment "kube hostport portals" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS
-A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-HOST
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-HOST
-A OUTPUT -m comment --comment "kube hostport portals" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS
-A POSTROUTING -m comment --comment "rules for masquerading OpenShift traffic" -j OPENSHIFT-MASQUERADE
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 172.19.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -s 127.0.0.0/8 -o tun0 -m comment --comment "SNAT for localhost access to hostports" -j MASQUERADE
-A DOCKER -i docker0 -j RETURN
-A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
-A KUBE-MARK-MASQ -j MARK --set-xmark 0x1/0x1
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x1/0x1 -j MASQUERADE
-A KUBE-SEP-CKTKXEMIKRIIY55M -s 172.17.0.2/32 -m comment --comment "default/kubernetes:dns" -j KUBE-MARK-MASQ
-A KUBE-SEP-CKTKXEMIKRIIY55M -p udp -m comment --comment "default/kubernetes:dns" -m recent --set --name KUBE-SEP-CKTKXEMIKRIIY55M --mask 255.255.255.255 --rsource -m udp -j DNAT --to-destination 172.17.0.2:8053
-A KUBE-SEP-EZ5ESXJRZ36JV4D4 -s 172.17.0.2/32 -m comment --comment "default/kubernetes:dns-tcp" -j KUBE-MARK-MASQ
-A KUBE-SEP-EZ5ESXJRZ36JV4D4 -p tcp -m comment --comment "default/kubernetes:dns-tcp" -m recent --set --name KUBE-SEP-EZ5ESXJRZ36JV4D4 --mask 255.255.255.255 --rsource -m tcp -j DNAT --to-destination 172.17.0.2:8053
-A KUBE-SEP-PATXOTJBHFPU4CNS -s 172.17.0.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ
-A KUBE-SEP-PATXOTJBHFPU4CNS -p tcp -m comment --comment "default/kubernetes:https" -m recent --set --name KUBE-SEP-PATXOTJBHFPU4CNS --mask 255.255.255.255 --rsource -m tcp -j DNAT --to-destination 172.17.0.2:8443
-A KUBE-SERVICES -d 172.30.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y
-A KUBE-SERVICES -d 172.30.0.1/32 -p udp -m comment --comment "default/kubernetes:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-3VQ6B3MLH7E2SZT4
-A KUBE-SERVICES -d 172.30.0.1/32 -p tcp -m comment --comment "default/kubernetes:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-BA6I5HTZKAAAJT56
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-SVC-3VQ6B3MLH7E2SZT4 -m comment --comment "default/kubernetes:dns" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-CKTKXEMIKRIIY55M --mask 255.255.255.255 --rsource -j KUBE-SEP-CKTKXEMIKRIIY55M
-A KUBE-SVC-3VQ6B3MLH7E2SZT4 -m comment --comment "default/kubernetes:dns" -j KUBE-SEP-CKTKXEMIKRIIY55M
-A KUBE-SVC-BA6I5HTZKAAAJT56 -m comment --comment "default/kubernetes:dns-tcp" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-EZ5ESXJRZ36JV4D4 --mask 255.255.255.255 --rsource -j KUBE-SEP-EZ5ESXJRZ36JV4D4
-A KUBE-SVC-BA6I5HTZKAAAJT56 -m comment --comment "default/kubernetes:dns-tcp" -j KUBE-SEP-EZ5ESXJRZ36JV4D4
-A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-PATXOTJBHFPU4CNS --mask 255.255.255.255 --rsource -j KUBE-SEP-PATXOTJBHFPU4CNS
-A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https" -j KUBE-SEP-PATXOTJBHFPU4CNS
-A OPENSHIFT-MASQUERADE -s 10.128.0.0/14 -m comment --comment "masquerade pod-to-service and pod-to-external traffic" -j MASQUERADE
COMMIT
# Completed on Thu Apr  5 20:48:07 2018
# Generated by iptables-save v1.4.21 on Thu Apr  5 20:48:07 2018
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [280:54480]
:DOCKER - [0:0]
:DOCKER-ISOLATION - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FIREWALL - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORT-NON-LOCAL - [0:0]
:KUBE-SERVICES - [0:0]
:OPENSHIFT-ADMIN-OUTPUT-RULES - [0:0]
:OPENSHIFT-FIREWALL-ALLOW - [0:0]
:OPENSHIFT-FIREWALL-FORWARD - [0:0]
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES
-A INPUT -m comment --comment "Ensure that non-local NodePort traffic can flow" -j KUBE-NODEPORT-NON-LOCAL
-A INPUT -m comment --comment "firewall overrides" -j OPENSHIFT-FIREWALL-ALLOW
-A INPUT -j KUBE-FIREWALL
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 10250 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 1936 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD
-A FORWARD -i tun0 ! -o tun0 -m comment --comment "administrator overrides" -j OPENSHIFT-ADMIN-OUTPUT-RULES
-A FORWARD -m comment --comment "firewall overrides" -j OPENSHIFT-FIREWALL-FORWARD
-A FORWARD -j DOCKER-ISOLATION
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
-A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -j KUBE-FIREWALL
-A DOCKER-ISOLATION -j RETURN
-A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x1/0x1 -j ACCEPT
-A OPENSHIFT-FIREWALL-ALLOW -p udp -m udp --dport 4789 -m comment --comment "VXLAN incoming" -j ACCEPT
-A OPENSHIFT-FIREWALL-ALLOW -i tun0 -m comment --comment "from SDN to localhost" -j ACCEPT
-A OPENSHIFT-FIREWALL-ALLOW -i docker0 -m comment --comment "from docker to localhost" -j ACCEPT
-A OPENSHIFT-FIREWALL-FORWARD -s 10.128.0.0/14 -m comment --comment "attempted resend after connection close" -m conntrack --ctstate INVALID -j DROP
-A OPENSHIFT-FIREWALL-FORWARD -d 10.128.0.0/14 -m comment --comment "forward traffic from SDN" -j ACCEPT
-A OPENSHIFT-FIREWALL-FORWARD -s 10.128.0.0/14 -m comment --comment "forward traffic to SDN" -j ACCEPT
COMMIT
# Completed on Thu Apr  5 20:48:07 2018


Apr  5 20:48:07.669: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-wm7tf debugpod-sourceip-nettest-node-1f6lsg -- /bin/sh -c ss -ant'
Apr  5 20:48:07.960: INFO: stderr: ""
Apr  5 20:48:07.960: INFO: DEBUG:
State      Recv-Q Send-Q Local Address:Port               Peer Address:Port              
LISTEN     0      128          *:22                       *:*                  
ESTAB      0      0      172.17.0.3:51576              172.17.0.2:8443               
LISTEN     0      128         :::10256                   :::*                  
LISTEN     0      128         :::22                      :::*                  
LISTEN     0      128         :::10250                   :::*                  
ESTAB      0      0        ::ffff:172.17.0.3:10250                ::ffff:172.17.0.2:52316              
ESTAB      0      29       ::ffff:172.17.0.3:10250                ::ffff:172.17.0.2:52748              


Apr  5 20:48:07.965: INFO: Cleaning up the exec pod
[AfterEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:48:07.975: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-isolation1-6mdnb" for this suite.
Apr  5 20:48:19.986: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:48:20.057: INFO: namespace: e2e-tests-net-isolation1-6mdnb, resource: bindings, ignored listing per whitelist
Apr  5 20:48:20.089: INFO: namespace e2e-tests-net-isolation1-6mdnb deletion completed in 12.111603652s
[AfterEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:48:20.089: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-isolation2-wm7tf" for this suite.
Apr  5 20:48:26.097: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:48:26.162: INFO: namespace: e2e-tests-net-isolation2-wm7tf, resource: bindings, ignored listing per whitelist
Apr  5 20:48:26.200: INFO: namespace e2e-tests-net-isolation2-wm7tf deletion completed in 6.109550706s

• [SLOW TEST:59.648 seconds]
[Area:Networking] network isolation
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:10
  when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:415
    should prevent communication between pods in different namespaces on the same node [Suite:openshift/conformance/parallel]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:28
------------------------------
SSSS
------------------------------
[sig-network] Networking Granular Checks: Pods 
  should function for intra-pod communication: udp  [Conformance] [Suite:openshift/conformance/parallel] [Suite:k8s]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:648
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] [sig-network] Networking
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:48:26.200: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
Apr  5 20:48:26.266: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[It] should function for intra-pod communication: udp  [Conformance] [Suite:openshift/conformance/parallel] [Suite:k8s]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:648
STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-cj6w8
STEP: creating a selector
STEP: Creating the service pods in kubernetes
Apr  5 20:48:26.317: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
STEP: Creating test pods
Apr  5 20:48:46.365: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.128.0.67:8080/dial?request=hostName&protocol=udp&host=10.129.0.16&port=8081&tries=1'] Namespace:e2e-tests-pod-network-test-cj6w8 PodName:host-test-container-pod ContainerName:hostexec Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
Apr  5 20:48:46.365: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
Apr  5 20:48:46.459: INFO: Waiting for endpoints: map[]
Apr  5 20:48:46.461: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.128.0.67:8080/dial?request=hostName&protocol=udp&host=10.128.0.66&port=8081&tries=1'] Namespace:e2e-tests-pod-network-test-cj6w8 PodName:host-test-container-pod ContainerName:hostexec Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
Apr  5 20:48:46.461: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
Apr  5 20:48:46.544: INFO: Waiting for endpoints: map[]
[AfterEach] [sig-network] Networking
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:48:46.545: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-pod-network-test-cj6w8" for this suite.
Apr  5 20:49:08.556: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:49:08.631: INFO: namespace: e2e-tests-pod-network-test-cj6w8, resource: bindings, ignored listing per whitelist
Apr  5 20:49:08.658: INFO: namespace e2e-tests-pod-network-test-cj6w8 deletion completed in 22.111298999s

• [SLOW TEST:42.458 seconds]
[sig-network] Networking
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/common/networking.go:25
  Granular Checks: Pods
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/common/networking.go:28
    should function for intra-pod communication: udp  [Conformance] [Suite:openshift/conformance/parallel] [Suite:k8s]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:648
------------------------------
S
------------------------------
NetworkPolicy when using a plugin that implements NetworkPolicy 
  should enforce multiple, stacked policies with overlapping podSelectors [Feature:OSNetworkPolicy] [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/networkpolicy.go:177
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] when using a plugin that implements NetworkPolicy
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:428
Apr  5 20:49:08.659: INFO: This plugin does not implement NetworkPolicy.
[AfterEach] when using a plugin that implements NetworkPolicy
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:49:08.659: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready

S [SKIPPING] in Spec Setup (BeforeEach) [0.001 seconds]
NetworkPolicy
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/networkpolicy.go:48
  when using a plugin that implements NetworkPolicy
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:427
    should enforce multiple, stacked policies with overlapping podSelectors [Feature:OSNetworkPolicy] [Suite:openshift/conformance/parallel] [BeforeEach]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/networkpolicy.go:177

    Apr  5 20:49:08.659: This plugin does not implement NetworkPolicy.

    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/util.go:289
------------------------------
S
------------------------------
[Conformance][Area:Networking][Feature:Router] The HAProxy router 
  should set Forwarded headers appropriately [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/router/headers.go:42
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] [Conformance][Area:Networking][Feature:Router]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:49:08.660: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
Apr  5 20:49:08.679: INFO: configPath is now "/tmp/extended-test-router-headers-rhvpt-d2rjn-user.kubeconfig"
Apr  5 20:49:08.679: INFO: The user is now "extended-test-router-headers-rhvpt-d2rjn-user"
Apr  5 20:49:08.679: INFO: Creating project "extended-test-router-headers-rhvpt-d2rjn"
Apr  5 20:49:08.729: INFO: Waiting on permissions in project "extended-test-router-headers-rhvpt-d2rjn" ...
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [Conformance][Area:Networking][Feature:Router]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/router/headers.go:30
[AfterEach] [Conformance][Area:Networking][Feature:Router]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:49:08.754: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "extended-test-router-headers-rhvpt-d2rjn" for this suite.
Apr  5 20:49:14.783: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:49:14.865: INFO: namespace: extended-test-router-headers-rhvpt-d2rjn, resource: bindings, ignored listing per whitelist
Apr  5 20:49:14.894: INFO: namespace extended-test-router-headers-rhvpt-d2rjn deletion completed in 6.136991378s

S [SKIPPING] in Spec Setup (BeforeEach) [6.234 seconds]
[Conformance][Area:Networking][Feature:Router]
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/router/headers.go:21
  The HAProxy router [BeforeEach]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/router/headers.go:41
    should set Forwarded headers appropriately [Suite:openshift/conformance/parallel]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/router/headers.go:42

    no router installed on the cluster

    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/router/headers.go:33
------------------------------
SSSSSSSSSSSSSS
------------------------------
[Area:Networking] multicast when using one of the plugins 'redhat/openshift-ovs-subnet' 
  should block multicast traffic [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/multicast.go:31
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] when using one of the plugins 'redhat/openshift-ovs-subnet'
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:441
Apr  5 20:49:14.894: INFO: Not using one of the specified plugins
[AfterEach] when using one of the plugins 'redhat/openshift-ovs-subnet'
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:49:14.895: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready

S [SKIPPING] in Spec Setup (BeforeEach) [0.001 seconds]
[Area:Networking] multicast
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/multicast.go:21
  when using one of the plugins 'redhat/openshift-ovs-subnet'
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:439
    should block multicast traffic [Suite:openshift/conformance/parallel] [BeforeEach]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/multicast.go:31

    Apr  5 20:49:14.894: Not using one of the specified plugins

    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/util.go:289
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
NetworkPolicy when using a plugin that implements NetworkPolicy 
  should enforce policy based on PodSelector [Feature:OSNetworkPolicy] [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/networkpolicy.go:86
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] when using a plugin that implements NetworkPolicy
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:428
Apr  5 20:49:14.896: INFO: This plugin does not implement NetworkPolicy.
[AfterEach] when using a plugin that implements NetworkPolicy
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:49:14.896: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready

S [SKIPPING] in Spec Setup (BeforeEach) [0.001 seconds]
NetworkPolicy
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/networkpolicy.go:48
  when using a plugin that implements NetworkPolicy
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:427
    should enforce policy based on PodSelector [Feature:OSNetworkPolicy] [Suite:openshift/conformance/parallel] [BeforeEach]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/networkpolicy.go:86

    Apr  5 20:49:14.896: This plugin does not implement NetworkPolicy.

    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/util.go:289
------------------------------
SSSSSSSSSSSSSSSSSSS
------------------------------
[Area:Networking] services when using a plugin that does not isolate namespaces by default 
  should allow connections to pods in different namespaces on the same node via service IPs [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:27
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] when using a plugin that does not isolate namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:404
Apr  5 20:49:14.897: INFO: This plugin isolates namespaces by default.
[AfterEach] when using a plugin that does not isolate namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:49:14.897: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
[AfterEach] when using a plugin that does not isolate namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:49:14.898: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready

S [SKIPPING] in Spec Setup (BeforeEach) [0.001 seconds]
[Area:Networking] services
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:10
  when using a plugin that does not isolate namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:403
    should allow connections to pods in different namespaces on the same node via service IPs [Suite:openshift/conformance/parallel] [BeforeEach]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:27

    Apr  5 20:49:14.897: This plugin isolates namespaces by default.

    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/util.go:289
------------------------------
SSSSSSSSS
------------------------------
[Area:Networking] multicast when using one of the plugins 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy' 
  should block multicast traffic in namespaces where it is disabled [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/multicast.go:42
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] when using one of the plugins 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy'
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:441
[BeforeEach] when using one of the plugins 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy'
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:49:14.898: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
Apr  5 20:49:14.917: INFO: configPath is now "/tmp/extended-test-multicast-bfjdr-jdcf2-user.kubeconfig"
Apr  5 20:49:14.917: INFO: The user is now "extended-test-multicast-bfjdr-jdcf2-user"
Apr  5 20:49:14.917: INFO: Creating project "extended-test-multicast-bfjdr-jdcf2"
Apr  5 20:49:14.946: INFO: Waiting on permissions in project "extended-test-multicast-bfjdr-jdcf2" ...
STEP: Waiting for a default service account to be provisioned in namespace
[It] should block multicast traffic in namespaces where it is disabled [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/multicast.go:42
Apr  5 20:49:15.001: INFO: Using nettest-node-1 and nettest-node-2 for test ([nettest-node-1 nettest-node-2] out of [nettest-node-1 nettest-node-2])
Apr  5 20:49:15.014: INFO: Waiting up to 5m0s for pod multicast-0                                             status to be running
Apr  5 20:49:15.019: INFO: Waiting for pod multicast-0                                             in namespace 'extended-test-multicast-bfjdr-jdcf2' status to be 'running'(found phase: "Pending", readiness: false) (4.863674ms elapsed)
Apr  5 20:49:20.026: INFO: Waiting up to 5m0s for pod multicast-1                                             status to be running
Apr  5 20:49:20.028: INFO: Waiting for pod multicast-1                                             in namespace 'extended-test-multicast-bfjdr-jdcf2' status to be 'running'(found phase: "Pending", readiness: false) (2.187374ms elapsed)
Apr  5 20:49:25.036: INFO: Waiting up to 5m0s for pod multicast-2                                             status to be running
Apr  5 20:49:25.038: INFO: Waiting for pod multicast-2                                             in namespace 'extended-test-multicast-bfjdr-jdcf2' status to be 'running'(found phase: "Pending", readiness: false) (2.31206ms elapsed)
Apr  5 20:49:30.041: INFO: Running 'oc exec --config=/tmp/extended-test-multicast-bfjdr-jdcf2-user.kubeconfig --namespace=extended-test-multicast-bfjdr-jdcf2 multicast-0 -- omping -c 1 -T 60 -q -q 10.128.0.68 10.128.0.69 10.129.0.17'
Apr  5 20:49:30.041: INFO: Running 'oc exec --config=/tmp/extended-test-multicast-bfjdr-jdcf2-user.kubeconfig --namespace=extended-test-multicast-bfjdr-jdcf2 multicast-1 -- omping -c 1 -T 60 -q -q 10.128.0.68 10.128.0.69 10.129.0.17'
Apr  5 20:49:30.041: INFO: Running 'oc exec --config=/tmp/extended-test-multicast-bfjdr-jdcf2-user.kubeconfig --namespace=extended-test-multicast-bfjdr-jdcf2 multicast-2 -- omping -c 1 -T 60 -q -q 10.128.0.68 10.128.0.69 10.129.0.17'
[AfterEach] when using one of the plugins 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy'
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:49:36.448: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "extended-test-multicast-bfjdr-jdcf2" for this suite.
Apr  5 20:49:42.460: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:49:42.520: INFO: namespace: extended-test-multicast-bfjdr-jdcf2, resource: bindings, ignored listing per whitelist
Apr  5 20:49:42.569: INFO: namespace extended-test-multicast-bfjdr-jdcf2 deletion completed in 6.118772988s

• [SLOW TEST:27.671 seconds]
[Area:Networking] multicast
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/multicast.go:21
  when using one of the plugins 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy'
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:439
    should block multicast traffic in namespaces where it is disabled [Suite:openshift/conformance/parallel]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/multicast.go:42
------------------------------
SSSS
------------------------------
[Area:Networking] network isolation when using a plugin that isolates namespaces by default 
  should allow communication from non-default to default namespace on a different node [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:53
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:416
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:49:42.569: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:49:42.665: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
STEP: Waiting for a default service account to be provisioned in namespace
[It] should allow communication from non-default to default namespace on a different node [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:53
Apr  5 20:49:42.800: INFO: Using nettest-node-1 and nettest-node-2 for test ([nettest-node-1 nettest-node-2] out of [nettest-node-1 nettest-node-2])
Apr  5 20:49:46.821: INFO: Target pod IP:port is 10.128.0.70:8080
Apr  5 20:49:46.821: INFO: Creating an exec pod on node nettest-node-2
Apr  5 20:49:46.821: INFO: Creating new exec pod
Apr  5 20:49:50.832: INFO: Waiting up to 10s to wget 10.128.0.70:8080
Apr  5 20:49:50.832: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-g72b7 execpod-sourceip-nettest-node-26nh8l -- /bin/sh -c wget -T 30 -qO- 10.128.0.70:8080'
Apr  5 20:49:51.127: INFO: stderr: ""
Apr  5 20:49:51.127: INFO: Cleaning up the exec pod
[AfterEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:49:51.136: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-isolation1-r8n4k" for this suite.
Apr  5 20:49:57.148: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:49:57.193: INFO: namespace: e2e-tests-net-isolation1-r8n4k, resource: bindings, ignored listing per whitelist
Apr  5 20:49:57.252: INFO: namespace e2e-tests-net-isolation1-r8n4k deletion completed in 6.112024968s
[AfterEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:49:57.252: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-isolation2-g72b7" for this suite.
Apr  5 20:50:03.261: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:50:03.305: INFO: namespace: e2e-tests-net-isolation2-g72b7, resource: bindings, ignored listing per whitelist
Apr  5 20:50:03.370: INFO: namespace e2e-tests-net-isolation2-g72b7 deletion completed in 6.116097126s

• [SLOW TEST:20.801 seconds]
[Area:Networking] network isolation
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:10
  when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:415
    should allow communication from non-default to default namespace on a different node [Suite:openshift/conformance/parallel]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:53
------------------------------
SSSSS
------------------------------
[Area:Networking] services when using a plugin that isolates namespaces by default 
  should allow connections to services in the default namespace from a pod in another namespace on a different node [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:52
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:416
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:50:03.370: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:50:03.460: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
STEP: Waiting for a default service account to be provisioned in namespace
[It] should allow connections to services in the default namespace from a pod in another namespace on a different node [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:52
Apr  5 20:50:03.585: INFO: Using nettest-node-1 and nettest-node-2 for test ([nettest-node-1 nettest-node-2] out of [nettest-node-1 nettest-node-2])
Apr  5 20:50:07.603: INFO: Target pod IP:port is 10.128.0.71:8080
Apr  5 20:50:07.617: INFO: Endpoint e2e-tests-net-services1-phn65/service-st94m is not ready yet
Apr  5 20:50:12.620: INFO: Target service IP:port is 172.30.154.5:8080
Apr  5 20:50:12.620: INFO: Creating an exec pod on node nettest-node-2
Apr  5 20:50:12.620: INFO: Creating new exec pod
Apr  5 20:50:16.633: INFO: Waiting up to 10s to wget 172.30.154.5:8080
Apr  5 20:50:16.633: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-services2-8l9kk execpod-sourceip-nettest-node-2sc6bc -- /bin/sh -c wget -T 30 -qO- 172.30.154.5:8080'
Apr  5 20:50:16.934: INFO: stderr: ""
Apr  5 20:50:16.934: INFO: Cleaning up the exec pod
[AfterEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:50:16.961: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-services1-phn65" for this suite.
Apr  5 20:50:22.981: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:50:23.032: INFO: namespace: e2e-tests-net-services1-phn65, resource: bindings, ignored listing per whitelist
Apr  5 20:50:23.083: INFO: namespace e2e-tests-net-services1-phn65 deletion completed in 6.11722691s
[AfterEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:50:23.083: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-services2-8l9kk" for this suite.
Apr  5 20:50:29.092: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:50:29.144: INFO: namespace: e2e-tests-net-services2-8l9kk, resource: bindings, ignored listing per whitelist
Apr  5 20:50:29.195: INFO: namespace e2e-tests-net-services2-8l9kk deletion completed in 6.109795311s

• [SLOW TEST:25.825 seconds]
[Area:Networking] services
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:10
  when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:415
    should allow connections to services in the default namespace from a pod in another namespace on a different node [Suite:openshift/conformance/parallel]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:52
------------------------------
SSSSSSSSSSS
------------------------------
NetworkPolicy when using a plugin that implements NetworkPolicy 
  should enforce policy based on Ports [Feature:OSNetworkPolicy] [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/networkpolicy.go:132
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] when using a plugin that implements NetworkPolicy
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:428
Apr  5 20:50:29.195: INFO: This plugin does not implement NetworkPolicy.
[AfterEach] when using a plugin that implements NetworkPolicy
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:50:29.196: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready

S [SKIPPING] in Spec Setup (BeforeEach) [0.001 seconds]
NetworkPolicy
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/networkpolicy.go:48
  when using a plugin that implements NetworkPolicy
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:427
    should enforce policy based on Ports [Feature:OSNetworkPolicy] [Suite:openshift/conformance/parallel] [BeforeEach]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/networkpolicy.go:132

    Apr  5 20:50:29.195: This plugin does not implement NetworkPolicy.

    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/util.go:289
------------------------------
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[Area:Networking] network isolation when using a plugin that isolates namespaces by default 
  should prevent communication between pods in different namespaces on different nodes [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:32
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:416
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:50:29.197: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:50:29.265: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
STEP: Waiting for a default service account to be provisioned in namespace
[It] should prevent communication between pods in different namespaces on different nodes [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:32
Apr  5 20:50:29.401: INFO: Using nettest-node-1 and nettest-node-2 for test ([nettest-node-1 nettest-node-2] out of [nettest-node-1 nettest-node-2])
Apr  5 20:50:33.414: INFO: Target pod IP:port is 10.128.0.72:8080
Apr  5 20:50:33.414: INFO: Creating an exec pod on node nettest-node-2
Apr  5 20:50:33.414: INFO: Creating new exec pod
Apr  5 20:50:39.427: INFO: Waiting up to 10s to wget 10.128.0.72:8080
Apr  5 20:50:39.427: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-jv47p execpod-sourceip-nettest-node-2lrsdj -- /bin/sh -c wget -T 30 -qO- 10.128.0.72:8080'
Apr  5 20:51:09.707: INFO: rc: 127
Apr  5 20:51:09.708: INFO: got err: error running &{/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl [kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-jv47p execpod-sourceip-nettest-node-2lrsdj -- /bin/sh -c wget -T 30 -qO- 10.128.0.72:8080] []  <nil>  wget: download timed out
command terminated with exit code 1
 [] <nil> 0xc4213e0180 exit status 1 <nil> <nil> true [0xc420df2bb8 0xc420df2bf0 0xc420df2c18] [0xc420df2bb8 0xc420df2bf0 0xc420df2c18] [0xc420df2bd0 0xc420df2c10] [0x989690 0x989690] 0xc421971560 <nil>}:
Command stdout:

stderr:
wget: download timed out
command terminated with exit code 1

error:
exit status 1
, retry until timeout
Apr  5 20:51:09.708: INFO: Creating new exec pod
Apr  5 20:51:11.721: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-jv47p debugpod-sourceip-nettest-node-2stfgw -- /bin/sh -c ovs-ofctl -O OpenFlow13 dump-flows br0'
Apr  5 20:51:12.010: INFO: stderr: ""
Apr  5 20:51:12.010: INFO: DEBUG:
OFPST_FLOW reply (OF1.3) (xid=0x2):
 cookie=0x0, duration=1000.505s, table=0, n_packets=0, n_bytes=0, priority=250,ip,in_port=2,nw_dst=224.0.0.0/4 actions=drop
 cookie=0x0, duration=1000.538s, table=0, n_packets=26, n_bytes=1092, priority=200,arp,in_port=1,arp_spa=10.128.0.0/14,arp_tpa=10.129.0.0/23 actions=move:NXM_NX_TUN_ID[0..31]->NXM_NX_REG0[],goto_table:10
 cookie=0x0, duration=1000.532s, table=0, n_packets=403, n_bytes=50218, priority=200,ip,in_port=1,nw_src=10.128.0.0/14 actions=move:NXM_NX_TUN_ID[0..31]->NXM_NX_REG0[],goto_table:10
 cookie=0x0, duration=1000.518s, table=0, n_packets=0, n_bytes=0, priority=200,ip,in_port=1,nw_dst=10.128.0.0/14 actions=move:NXM_NX_TUN_ID[0..31]->NXM_NX_REG0[],goto_table:10
 cookie=0x0, duration=1000.488s, table=0, n_packets=26, n_bytes=1092, priority=200,arp,in_port=2,arp_spa=10.129.0.1,arp_tpa=10.128.0.0/14 actions=goto_table:30
 cookie=0x0, duration=1000.483s, table=0, n_packets=1054, n_bytes=160054, priority=200,ip,in_port=2 actions=goto_table:30
 cookie=0x0, duration=1000.511s, table=0, n_packets=0, n_bytes=0, priority=150,in_port=1 actions=drop
 cookie=0x0, duration=1000.472s, table=0, n_packets=8, n_bytes=648, priority=150,in_port=2 actions=drop
 cookie=0x0, duration=1000.465s, table=0, n_packets=33, n_bytes=1386, priority=100,arp actions=goto_table:20
 cookie=0x0, duration=1000.457s, table=0, n_packets=1326, n_bytes=187090, priority=100,ip actions=goto_table:20
 cookie=0x0, duration=1000.448s, table=0, n_packets=121, n_bytes=9754, priority=0 actions=drop
 cookie=0x80c973ba, duration=1000.112s, table=10, n_packets=429, n_bytes=51310, priority=100,tun_src=172.17.0.3 actions=goto_table:30
 cookie=0xfb5876b8, duration=1000.087s, table=10, n_packets=0, n_bytes=0, priority=100,tun_src=172.17.0.2 actions=goto_table:30
 cookie=0x0, duration=1000.439s, table=10, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=37.396s, table=20, n_packets=1, n_bytes=42, priority=100,arp,in_port=21,arp_spa=10.129.0.20,arp_sha=00:00:0a:81:00:14/00:00:ff:ff:ff:ff actions=load:0xdf4a8e->NXM_NX_REG0[],goto_table:21
 cookie=0x0, duration=37.392s, table=20, n_packets=5, n_bytes=370, priority=100,ip,in_port=21,nw_src=10.129.0.20 actions=load:0xdf4a8e->NXM_NX_REG0[],goto_table:21
 cookie=0x0, duration=1000.432s, table=20, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=1000.424s, table=21, n_packets=1359, n_bytes=188476, priority=0 actions=goto_table:30
 cookie=0x0, duration=1000.416s, table=30, n_packets=17, n_bytes=714, priority=300,arp,arp_tpa=10.129.0.1 actions=output:2
 cookie=0x0, duration=1000.394s, table=30, n_packets=458, n_bytes=91054, priority=300,ip,nw_dst=10.129.0.1 actions=output:2
 cookie=0x0, duration=1000.409s, table=30, n_packets=42, n_bytes=1764, priority=200,arp,arp_tpa=10.129.0.0/23 actions=goto_table:40
 cookie=0x0, duration=1000.384s, table=30, n_packets=1375, n_bytes=197108, priority=200,ip,nw_dst=10.129.0.0/23 actions=goto_table:70
 cookie=0x0, duration=1000.402s, table=30, n_packets=26, n_bytes=1092, priority=100,arp,arp_tpa=10.128.0.0/14 actions=goto_table:50
 cookie=0x0, duration=1000.377s, table=30, n_packets=615, n_bytes=70593, priority=100,ip,nw_dst=10.128.0.0/14 actions=goto_table:90
 cookie=0x0, duration=1000.389s, table=30, n_packets=314, n_bytes=37017, priority=100,ip,nw_dst=172.30.0.0/16 actions=goto_table:60
 cookie=0x0, duration=1000.373s, table=30, n_packets=10, n_bytes=768, priority=50,ip,in_port=1,nw_dst=224.0.0.0/4 actions=goto_table:120
 cookie=0x0, duration=1000.364s, table=30, n_packets=11, n_bytes=822, priority=25,ip,nw_dst=224.0.0.0/4 actions=goto_table:110
 cookie=0x0, duration=1000.358s, table=30, n_packets=0, n_bytes=0, priority=0,ip actions=goto_table:100
 cookie=0x0, duration=1000.353s, table=30, n_packets=0, n_bytes=0, priority=0,arp actions=drop
 cookie=0x0, duration=37.387s, table=40, n_packets=1, n_bytes=42, priority=100,arp,arp_tpa=10.129.0.20 actions=output:21
 cookie=0x0, duration=1000.345s, table=40, n_packets=9, n_bytes=378, priority=0 actions=drop
 cookie=0x80c973ba, duration=1000.107s, table=50, n_packets=26, n_bytes=1092, priority=100,arp,arp_tpa=10.128.0.0/23 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.3->tun_dst,output:1
 cookie=0xfb5876b8, duration=1000.082s, table=50, n_packets=0, n_bytes=0, priority=100,arp,arp_tpa=10.130.0.0/23 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.2->tun_dst,output:1
 cookie=0x0, duration=1000.339s, table=50, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=1000.332s, table=60, n_packets=6, n_bytes=484, priority=200,reg0=0 actions=output:2
 cookie=0x0, duration=1000.117s, table=60, n_packets=0, n_bytes=0, priority=100,ip,nw_dst=172.30.0.1,nw_frag=later actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=1000.112s, table=60, n_packets=293, n_bytes=35327, priority=100,tcp,nw_dst=172.30.0.1,tp_dst=443 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=1000.103s, table=60, n_packets=0, n_bytes=0, priority=100,udp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=1000.096s, table=60, n_packets=0, n_bytes=0, priority=100,tcp,nw_dst=172.30.0.1,tp_dst=53 actions=load:0->NXM_NX_REG1[],load:0x2->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=1000.327s, table=60, n_packets=8, n_bytes=688, priority=0 actions=drop
 cookie=0x0, duration=37.383s, table=70, n_packets=0, n_bytes=0, priority=100,ip,nw_dst=10.129.0.20 actions=load:0xdf4a8e->NXM_NX_REG1[],load:0x15->NXM_NX_REG2[],goto_table:80
 cookie=0x0, duration=1000.318s, table=70, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=1000.312s, table=80, n_packets=734, n_bytes=60761, priority=300,ip,nw_src=10.129.0.1 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=1000.226s, table=80, n_packets=264, n_bytes=94459, priority=200,reg0=0 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=1000.220s, table=80, n_packets=299, n_bytes=35877, priority=200,reg1=0 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=990.604s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x815ec7,reg1=0x815ec7 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=990.497s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x64c0eb,reg1=0x64c0eb actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=769.888s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x672905,reg1=0x672905 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=769.809s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x7c1b53,reg1=0x7c1b53 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=747.123s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x99b8bd,reg1=0x99b8bd actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=747.027s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x53f6d9,reg1=0x53f6d9 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=715.275s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x3fcd53,reg1=0x3fcd53 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=709.047s, table=80, n_packets=336, n_bytes=37682, priority=100,reg0=0x74e5be,reg1=0x74e5be actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=692.423s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x576047,reg1=0x576047 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=639.686s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xceaaf,reg1=0xceaaf actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=639.596s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x608c4b,reg1=0x608c4b actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=616.851s, table=80, n_packets=1, n_bytes=74, priority=100,reg0=0xa6a87b,reg1=0xa6a87b actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=597.237s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x1a123d,reg1=0x1a123d actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=597.153s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xb56ceb,reg1=0xb56ceb actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=569.419s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x50ae27,reg1=0x50ae27 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=569.350s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x6eb4eb,reg1=0x6eb4eb actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=543.609s, table=80, n_packets=6, n_bytes=508, priority=100,reg0=0x6d89c7,reg1=0x6d89c7 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=495.147s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xa0447d,reg1=0xa0447d actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=484.847s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xf1bdb3,reg1=0xf1bdb3 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=484.750s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xee6f88,reg1=0xee6f88 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=464.103s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xd10031,reg1=0xd10031 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=415.619s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xc2138e,reg1=0xc2138e actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=397.006s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x1ca9c5,reg1=0x1ca9c5 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=390.751s, table=80, n_packets=10, n_bytes=1090, priority=100,reg0=0x6b4fd3,reg1=0x6b4fd3 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=346.942s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xfc881e,reg1=0xfc881e actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=340.713s, table=80, n_packets=9, n_bytes=1020, priority=100,reg0=0x17488a,reg1=0x17488a actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=321.462s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x5aa8fe,reg1=0x5aa8fe actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=315.227s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x6648c9,reg1=0x6648c9 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=308.929s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x515a0c,reg1=0x515a0c actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=302.673s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x33855f,reg1=0x33855f actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=296.460s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x7dd785,reg1=0x7dd785 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=290.241s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x29803e,reg1=0x29803e actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=290.082s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x96439b,reg1=0x96439b actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=225.427s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xb218c3,reg1=0xb218c3 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=225.332s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xa673dd,reg1=0xa673dd actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=165.772s, table=80, n_packets=1, n_bytes=50, priority=100,reg0=0xe6552d,reg1=0xe6552d actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=123.277s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x5ac7d2,reg1=0x5ac7d2 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=117.056s, table=80, n_packets=10, n_bytes=1062, priority=100,reg0=0xa2f50a,reg1=0xa2f50a actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=89.404s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x892e91,reg1=0x892e91 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=89.299s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x3bc336,reg1=0x3bc336 actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=68.612s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0x1fd91d,reg1=0x1fd91d actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=68.498s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xdf772c,reg1=0xdf772c actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=42.776s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xeec91b,reg1=0xeec91b actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=42.694s, table=80, n_packets=0, n_bytes=0, priority=100,reg0=0xdf4a8e,reg1=0xdf4a8e actions=output:NXM_NX_REG2[]
 cookie=0x0, duration=1000.303s, table=80, n_packets=5, n_bytes=370, priority=0 actions=drop
 cookie=0x80c973ba, duration=1000.103s, table=90, n_packets=615, n_bytes=70593, priority=100,ip,nw_dst=10.128.0.0/23 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.3->tun_dst,output:1
 cookie=0xfb5876b8, duration=1000.070s, table=90, n_packets=0, n_bytes=0, priority=100,ip,nw_dst=10.130.0.0/23 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.2->tun_dst,output:1
 cookie=0x0, duration=1000.296s, table=90, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=1000.290s, table=100, n_packets=0, n_bytes=0, priority=0 actions=goto_table:101
 cookie=0x0, duration=1000.285s, table=101, n_packets=0, n_bytes=0, priority=51,tcp,nw_dst=172.17.0.4,tp_dst=53 actions=output:2
 cookie=0x0, duration=1000.275s, table=101, n_packets=0, n_bytes=0, priority=51,udp,nw_dst=172.17.0.4,tp_dst=53 actions=output:2
 cookie=0x0, duration=1000.265s, table=101, n_packets=0, n_bytes=0, priority=0 actions=output:2
 cookie=0x0, duration=1000.261s, table=110, n_packets=6, n_bytes=438, priority=0 actions=drop
 cookie=0x0, duration=1000.065s, table=111, n_packets=5, n_bytes=384, priority=100 actions=move:NXM_NX_REG0[]->NXM_NX_TUN_ID[0..31],set_field:172.17.0.2->tun_dst,output:1,set_field:172.17.0.3->tun_dst,output:1,goto_table:120
 cookie=0x0, duration=1000.246s, table=120, n_packets=0, n_bytes=0, priority=0 actions=drop
 cookie=0x0, duration=1000.235s, table=253, n_packets=0, n_bytes=0, actions=note:01.07.00.00.00.00


Apr  5 20:51:12.010: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-jv47p debugpod-sourceip-nettest-node-2stfgw -- /bin/sh -c iptables-save'
Apr  5 20:51:12.300: INFO: stderr: ""
Apr  5 20:51:12.300: INFO: DEBUG:
# Generated by iptables-save v1.4.21 on Thu Apr  5 20:51:12 2018
*nat
:PREROUTING ACCEPT [4:240]
:INPUT ACCEPT [4:240]
:OUTPUT ACCEPT [18:1128]
:POSTROUTING ACCEPT [18:1128]
:DOCKER - [0:0]
:KUBE-HOSTPORTS - [0:0]
:KUBE-MARK-DROP - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-NODEPORT-CONTAINER - [0:0]
:KUBE-NODEPORT-HOST - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-PORTALS-CONTAINER - [0:0]
:KUBE-PORTALS-HOST - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-SEP-CKTKXEMIKRIIY55M - [0:0]
:KUBE-SEP-EZ5ESXJRZ36JV4D4 - [0:0]
:KUBE-SEP-PATXOTJBHFPU4CNS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-SVC-3VQ6B3MLH7E2SZT4 - [0:0]
:KUBE-SVC-BA6I5HTZKAAAJT56 - [0:0]
:KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0]
:OPENSHIFT-MASQUERADE - [0:0]
-A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A PREROUTING -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-CONTAINER
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A PREROUTING -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-CONTAINER
-A PREROUTING -m comment --comment "kube hostport portals" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS
-A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-HOST
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-HOST
-A OUTPUT -m comment --comment "kube hostport portals" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS
-A POSTROUTING -m comment --comment "rules for masquerading OpenShift traffic" -j OPENSHIFT-MASQUERADE
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 172.19.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -s 127.0.0.0/8 -o tun0 -m comment --comment "SNAT for localhost access to hostports" -j MASQUERADE
-A DOCKER -i docker0 -j RETURN
-A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
-A KUBE-MARK-MASQ -j MARK --set-xmark 0x1/0x1
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x1/0x1 -j MASQUERADE
-A KUBE-SEP-CKTKXEMIKRIIY55M -s 172.17.0.2/32 -m comment --comment "default/kubernetes:dns" -j KUBE-MARK-MASQ
-A KUBE-SEP-CKTKXEMIKRIIY55M -p udp -m comment --comment "default/kubernetes:dns" -m recent --set --name KUBE-SEP-CKTKXEMIKRIIY55M --mask 255.255.255.255 --rsource -m udp -j DNAT --to-destination 172.17.0.2:8053
-A KUBE-SEP-EZ5ESXJRZ36JV4D4 -s 172.17.0.2/32 -m comment --comment "default/kubernetes:dns-tcp" -j KUBE-MARK-MASQ
-A KUBE-SEP-EZ5ESXJRZ36JV4D4 -p tcp -m comment --comment "default/kubernetes:dns-tcp" -m recent --set --name KUBE-SEP-EZ5ESXJRZ36JV4D4 --mask 255.255.255.255 --rsource -m tcp -j DNAT --to-destination 172.17.0.2:8053
-A KUBE-SEP-PATXOTJBHFPU4CNS -s 172.17.0.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ
-A KUBE-SEP-PATXOTJBHFPU4CNS -p tcp -m comment --comment "default/kubernetes:https" -m recent --set --name KUBE-SEP-PATXOTJBHFPU4CNS --mask 255.255.255.255 --rsource -m tcp -j DNAT --to-destination 172.17.0.2:8443
-A KUBE-SERVICES -d 172.30.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y
-A KUBE-SERVICES -d 172.30.0.1/32 -p udp -m comment --comment "default/kubernetes:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-3VQ6B3MLH7E2SZT4
-A KUBE-SERVICES -d 172.30.0.1/32 -p tcp -m comment --comment "default/kubernetes:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-BA6I5HTZKAAAJT56
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-SVC-3VQ6B3MLH7E2SZT4 -m comment --comment "default/kubernetes:dns" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-CKTKXEMIKRIIY55M --mask 255.255.255.255 --rsource -j KUBE-SEP-CKTKXEMIKRIIY55M
-A KUBE-SVC-3VQ6B3MLH7E2SZT4 -m comment --comment "default/kubernetes:dns" -j KUBE-SEP-CKTKXEMIKRIIY55M
-A KUBE-SVC-BA6I5HTZKAAAJT56 -m comment --comment "default/kubernetes:dns-tcp" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-EZ5ESXJRZ36JV4D4 --mask 255.255.255.255 --rsource -j KUBE-SEP-EZ5ESXJRZ36JV4D4
-A KUBE-SVC-BA6I5HTZKAAAJT56 -m comment --comment "default/kubernetes:dns-tcp" -j KUBE-SEP-EZ5ESXJRZ36JV4D4
-A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-PATXOTJBHFPU4CNS --mask 255.255.255.255 --rsource -j KUBE-SEP-PATXOTJBHFPU4CNS
-A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https" -j KUBE-SEP-PATXOTJBHFPU4CNS
-A OPENSHIFT-MASQUERADE -s 10.128.0.0/14 -m comment --comment "masquerade pod-to-service and pod-to-external traffic" -j MASQUERADE
COMMIT
# Completed on Thu Apr  5 20:51:12 2018
# Generated by iptables-save v1.4.21 on Thu Apr  5 20:51:12 2018
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [289:56327]
:DOCKER - [0:0]
:DOCKER-ISOLATION - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FIREWALL - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-NODEPORT-NON-LOCAL - [0:0]
:KUBE-SERVICES - [0:0]
:OPENSHIFT-ADMIN-OUTPUT-RULES - [0:0]
:OPENSHIFT-FIREWALL-ALLOW - [0:0]
:OPENSHIFT-FIREWALL-FORWARD - [0:0]
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES
-A INPUT -m comment --comment "Ensure that non-local NodePort traffic can flow" -j KUBE-NODEPORT-NON-LOCAL
-A INPUT -m comment --comment "firewall overrides" -j OPENSHIFT-FIREWALL-ALLOW
-A INPUT -j KUBE-FIREWALL
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 10250 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 1936 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD
-A FORWARD -i tun0 ! -o tun0 -m comment --comment "administrator overrides" -j OPENSHIFT-ADMIN-OUTPUT-RULES
-A FORWARD -m comment --comment "firewall overrides" -j OPENSHIFT-FIREWALL-FORWARD
-A FORWARD -j DOCKER-ISOLATION
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
-A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -j KUBE-FIREWALL
-A DOCKER-ISOLATION -j RETURN
-A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x1/0x1 -j ACCEPT
-A OPENSHIFT-FIREWALL-ALLOW -p udp -m udp --dport 4789 -m comment --comment "VXLAN incoming" -j ACCEPT
-A OPENSHIFT-FIREWALL-ALLOW -i tun0 -m comment --comment "from SDN to localhost" -j ACCEPT
-A OPENSHIFT-FIREWALL-ALLOW -i docker0 -m comment --comment "from docker to localhost" -j ACCEPT
-A OPENSHIFT-FIREWALL-FORWARD -s 10.128.0.0/14 -m comment --comment "attempted resend after connection close" -m conntrack --ctstate INVALID -j DROP
-A OPENSHIFT-FIREWALL-FORWARD -d 10.128.0.0/14 -m comment --comment "forward traffic from SDN" -j ACCEPT
-A OPENSHIFT-FIREWALL-FORWARD -s 10.128.0.0/14 -m comment --comment "forward traffic to SDN" -j ACCEPT
COMMIT
# Completed on Thu Apr  5 20:51:12 2018


Apr  5 20:51:12.300: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-isolation2-jv47p debugpod-sourceip-nettest-node-2stfgw -- /bin/sh -c ss -ant'
Apr  5 20:51:12.590: INFO: stderr: ""
Apr  5 20:51:12.590: INFO: DEBUG:
State      Recv-Q Send-Q Local Address:Port               Peer Address:Port              
LISTEN     0      128          *:22                       *:*                  
ESTAB      0      0      172.17.0.4:46188              172.17.0.2:8443               
LISTEN     0      128         :::10250                   :::*                  
LISTEN     0      128         :::10256                   :::*                  
LISTEN     0      128         :::22                      :::*                  
ESTAB      0      29       ::ffff:172.17.0.4:10250                ::ffff:172.17.0.2:43134              


Apr  5 20:51:12.597: INFO: Cleaning up the exec pod
[AfterEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:51:12.606: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-isolation1-nfcl6" for this suite.
Apr  5 20:51:18.617: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:51:18.694: INFO: namespace: e2e-tests-net-isolation1-nfcl6, resource: bindings, ignored listing per whitelist
Apr  5 20:51:18.717: INFO: namespace e2e-tests-net-isolation1-nfcl6 deletion completed in 6.108927904s
[AfterEach] when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:51:18.717: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-isolation2-jv47p" for this suite.
Apr  5 20:51:24.726: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:51:24.770: INFO: namespace: e2e-tests-net-isolation2-jv47p, resource: bindings, ignored listing per whitelist
Apr  5 20:51:24.832: INFO: namespace e2e-tests-net-isolation2-jv47p deletion completed in 6.112940675s

• [SLOW TEST:55.635 seconds]
[Area:Networking] network isolation
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:10
  when using a plugin that isolates namespaces by default
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/util.go:415
    should prevent communication between pods in different namespaces on different nodes [Suite:openshift/conformance/parallel]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/isolation.go:32
------------------------------
SSSSSSSSSSSSSSSSSS
------------------------------
[Area:Networking] services basic functionality 
  should allow connections to another pod on the same node via a service IP [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:14
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] basic functionality
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:51:24.832: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
STEP: Waiting for a default service account to be provisioned in namespace
[It] should allow connections to another pod on the same node via a service IP [Suite:openshift/conformance/parallel]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:14
Apr  5 20:51:24.929: INFO: Using nettest-node-1 for test ([nettest-node-1 nettest-node-2] out of [nettest-node-1 nettest-node-2])
Apr  5 20:51:28.941: INFO: Target pod IP:port is 10.128.0.73:8080
Apr  5 20:51:28.958: INFO: Endpoint e2e-tests-net-services1-l8jpx/service-dn2sc is not ready yet
Apr  5 20:51:33.961: INFO: Target service IP:port is 172.30.225.74:8080
Apr  5 20:51:33.961: INFO: Creating an exec pod on node nettest-node-1
Apr  5 20:51:33.961: INFO: Creating new exec pod
Apr  5 20:51:39.972: INFO: Waiting up to 10s to wget 172.30.225.74:8080
Apr  5 20:51:39.972: INFO: Running '/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/kubectl --server=https://172.17.0.2:8443 --kubeconfig=/tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig exec --namespace=e2e-tests-net-services1-l8jpx execpod-sourceip-nettest-node-17gg8k -- /bin/sh -c wget -T 30 -qO- 172.30.225.74:8080'
Apr  5 20:51:40.266: INFO: stderr: ""
Apr  5 20:51:40.266: INFO: Cleaning up the exec pod
[AfterEach] basic functionality
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:51:40.293: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-net-services1-l8jpx" for this suite.
Apr  5 20:51:50.312: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:51:50.414: INFO: namespace: e2e-tests-net-services1-l8jpx, resource: bindings, ignored listing per whitelist
Apr  5 20:51:50.414: INFO: namespace e2e-tests-net-services1-l8jpx deletion completed in 10.116055653s

• [SLOW TEST:25.582 seconds]
[Area:Networking] services
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:10
  basic functionality
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:11
    should allow connections to another pod on the same node via a service IP [Suite:openshift/conformance/parallel]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/networking/services.go:14
------------------------------
[sig-network] Networking Granular Checks: Pods 
  should function for node-pod communication: http  [Conformance] [Suite:openshift/conformance/parallel] [Suite:k8s]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:648
[BeforeEach] [Top Level]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/test/extended/util/test.go:53
[BeforeEach] [sig-network] Networking
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:134
STEP: Creating a kubernetes client
Apr  5 20:51:50.414: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
STEP: Building a namespace api object
Apr  5 20:51:50.490: INFO: About to run a Kube e2e test, ensuring namespace is privileged
STEP: Waiting for a default service account to be provisioned in namespace
[It] should function for node-pod communication: http  [Conformance] [Suite:openshift/conformance/parallel] [Suite:k8s]
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:648
STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-m94tm
STEP: creating a selector
STEP: Creating the service pods in kubernetes
Apr  5 20:51:50.535: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
STEP: Creating test pods
Apr  5 20:52:16.587: INFO: ExecWithOptions {Command:[/bin/sh -c timeout -t 15 curl -g -q -s --connect-timeout 1 http://10.129.0.21:8080/hostName | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-m94tm PodName:host-test-container-pod ContainerName:hostexec Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
Apr  5 20:52:16.587: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
Apr  5 20:52:16.669: INFO: Found all expected endpoints: [netserver-0]
Apr  5 20:52:16.671: INFO: ExecWithOptions {Command:[/bin/sh -c timeout -t 15 curl -g -q -s --connect-timeout 1 http://10.128.0.75:8080/hostName | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-m94tm PodName:host-test-container-pod ContainerName:hostexec Stdin:<nil> CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
Apr  5 20:52:16.671: INFO: >>> kubeConfig: /tmp/openshift/networking/multitenant/openshift.local.config/master/admin.kubeconfig
Apr  5 20:52:16.755: INFO: Found all expected endpoints: [netserver-1]
[AfterEach] [sig-network] Networking
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:135
Apr  5 20:52:16.755: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-tests-pod-network-test-m94tm" for this suite.
Apr  5 20:52:38.764: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
Apr  5 20:52:38.811: INFO: namespace: e2e-tests-pod-network-test-m94tm, resource: bindings, ignored listing per whitelist
Apr  5 20:52:38.865: INFO: namespace e2e-tests-pod-network-test-m94tm deletion completed in 22.108787028s

• [SLOW TEST:48.451 seconds]
[sig-network] Networking
/tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/common/networking.go:25
  Granular Checks: Pods
  /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/common/networking.go:28
    should function for node-pod communication: http  [Conformance] [Suite:openshift/conformance/parallel] [Suite:k8s]
    /tmp/openshift/build-rpms/rpm/BUILD/origin-3.10.0/_output/local/go/src/github.com/openshift/origin/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go:648
------------------------------
SSSSSSSSSSSSSSSSSSSApr  5 20:52:38.866: INFO: Running AfterSuite actions on all node
Apr  5 20:52:38.866: INFO: Running AfterSuite actions on node 1

Ran 24 of 440 Specs in 1077.748 seconds
SUCCESS! -- 24 Passed | 0 Failed | 0 Pending | 416 Skipped Apr  5 20:52:38.868: INFO: Dumping logs locally to: /data/src/github.com/openshift/origin/_output/scripts/networking/artifacts/junit
Apr  5 20:52:38.869: INFO: Error running cluster/log-dump/log-dump.sh: fork/exec cluster/log-dump/log-dump.sh: no such file or directory
--- PASS: TestExtended (1077.84s)
PASS
[INFO] [20:52:38+0000] Saving container logs
[INFO] [20:52:41+0000] Shutting down docker-in-docker cluster for the multitenant plugin
Stopping dind cluster 'nettest'
[INFO] [20:52:51+0000] Re-enabling selinux enforcement
[INFO] [20:52:51+0000] No test failures were detected
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: RUN EXTENDED TESTS [01h 01m 05s] ##########
[PostBuildScript] - Execution post build scripts.
[workspace] $ /bin/bash /tmp/jenkins5001537850450372782.sh
########## STARTING STAGE: DOWNLOAD ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ export PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/gathered
+ rm -rf /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/gathered
+ mkdir -p /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/gathered
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo stat /data/src/github.com/openshift/origin/_output/scripts
  File: ‘/data/src/github.com/openshift/origin/_output/scripts’
  Size: 62        	Blocks: 0          IO Block: 4096   directory
Device: ca02h/51714d	Inode: 188918570   Links: 5
Access: (2755/drwxr-sr-x)  Uid: ( 1001/  origin)   Gid: ( 1003/origin-git)
Context: unconfined_u:object_r:svirt_sandbox_file_t:s0
Access: 2018-04-05 19:27:05.157183044 +0000
Modify: 2018-04-05 19:53:04.972963202 +0000
Change: 2018-04-05 19:53:04.972963202 +0000
 Birth: -
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod -R o+rX /data/src/github.com/openshift/origin/_output/scripts
+ scp -r -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel:/data/src/github.com/openshift/origin/_output/scripts /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/gathered
+ tree /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/gathered
/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/gathered
└── scripts
    ├── build-base-images
    │   ├── artifacts
    │   ├── logs
    │   └── openshift.local.home
    ├── networking
    │   ├── artifacts
    │   │   ├── junit
    │   │   │   ├── networking_multitenant_01.xml
    │   │   │   ├── networking_networkpolicy_01.xml
    │   │   │   └── networking_subnet_01.xml
    │   │   ├── multitenant
    │   │   │   ├── nettest-master
    │   │   │   │   └── hosts
    │   │   │   ├── nettest-node-1
    │   │   │   │   └── hosts
    │   │   │   ├── nettest-node-2
    │   │   │   │   └── hosts
    │   │   │   └── openshift.local.config
    │   │   │       ├── master
    │   │   │       │   ├── admin.crt
    │   │   │       │   ├── admin.key
    │   │   │       │   ├── admin.kubeconfig
    │   │   │       │   ├── ca-bundle.crt
    │   │   │       │   ├── ca.crt
    │   │   │       │   ├── ca.key
    │   │   │       │   ├── ca.serial.txt
    │   │   │       │   ├── etcd.server.crt
    │   │   │       │   ├── etcd.server.key
    │   │   │       │   ├── frontproxy-ca.crt
    │   │   │       │   ├── frontproxy-ca.key
    │   │   │       │   ├── frontproxy-ca.serial.txt
    │   │   │       │   ├── master-config.yaml
    │   │   │       │   ├── master.etcd-client.crt
    │   │   │       │   ├── master.etcd-client.key
    │   │   │       │   ├── master.kubelet-client.crt
    │   │   │       │   ├── master.kubelet-client.key
    │   │   │       │   ├── master.proxy-client.crt
    │   │   │       │   ├── master.proxy-client.key
    │   │   │       │   ├── master.server.crt
    │   │   │       │   ├── master.server.key
    │   │   │       │   ├── openshift-aggregator.crt
    │   │   │       │   ├── openshift-aggregator.key
    │   │   │       │   ├── openshift-master.crt
    │   │   │       │   ├── openshift-master.key
    │   │   │       │   ├── openshift-master.kubeconfig
    │   │   │       │   ├── serviceaccounts.private.key
    │   │   │       │   ├── serviceaccounts.public.key
    │   │   │       │   ├── service-signer.crt
    │   │   │       │   └── service-signer.key
    │   │   │       ├── node-nettest-master-node
    │   │   │       │   ├── ca.crt
    │   │   │       │   ├── master-client.crt
    │   │   │       │   ├── master-client.key
    │   │   │       │   ├── node-client-ca.crt
    │   │   │       │   ├── node-config.yaml
    │   │   │       │   ├── node.kubeconfig
    │   │   │       │   ├── node-registration.json
    │   │   │       │   ├── server.crt
    │   │   │       │   └── server.key
    │   │   │       ├── node-nettest-node-1
    │   │   │       │   ├── ca.crt
    │   │   │       │   ├── master-client.crt
    │   │   │       │   ├── master-client.key
    │   │   │       │   ├── node-client-ca.crt
    │   │   │       │   ├── node-config.yaml
    │   │   │       │   ├── node.kubeconfig
    │   │   │       │   ├── node-registration.json
    │   │   │       │   ├── server.crt
    │   │   │       │   └── server.key
    │   │   │       └── node-nettest-node-2
    │   │   │           ├── ca.crt
    │   │   │           ├── master-client.crt
    │   │   │           ├── master-client.key
    │   │   │           ├── node-client-ca.crt
    │   │   │           ├── node-config.yaml
    │   │   │           ├── node.kubeconfig
    │   │   │           ├── node-registration.json
    │   │   │           ├── server.crt
    │   │   │           └── server.key
    │   │   ├── networkpolicy
    │   │   │   ├── nettest-master
    │   │   │   │   └── hosts
    │   │   │   ├── nettest-node-1
    │   │   │   │   └── hosts
    │   │   │   ├── nettest-node-2
    │   │   │   │   └── hosts
    │   │   │   └── openshift.local.config
    │   │   │       ├── master
    │   │   │       │   ├── admin.crt
    │   │   │       │   ├── admin.key
    │   │   │       │   ├── admin.kubeconfig
    │   │   │       │   ├── ca-bundle.crt
    │   │   │       │   ├── ca.crt
    │   │   │       │   ├── ca.key
    │   │   │       │   ├── ca.serial.txt
    │   │   │       │   ├── etcd.server.crt
    │   │   │       │   ├── etcd.server.key
    │   │   │       │   ├── frontproxy-ca.crt
    │   │   │       │   ├── frontproxy-ca.key
    │   │   │       │   ├── frontproxy-ca.serial.txt
    │   │   │       │   ├── master-config.yaml
    │   │   │       │   ├── master.etcd-client.crt
    │   │   │       │   ├── master.etcd-client.key
    │   │   │       │   ├── master.kubelet-client.crt
    │   │   │       │   ├── master.kubelet-client.key
    │   │   │       │   ├── master.proxy-client.crt
    │   │   │       │   ├── master.proxy-client.key
    │   │   │       │   ├── master.server.crt
    │   │   │       │   ├── master.server.key
    │   │   │       │   ├── openshift-aggregator.crt
    │   │   │       │   ├── openshift-aggregator.key
    │   │   │       │   ├── openshift-master.crt
    │   │   │       │   ├── openshift-master.key
    │   │   │       │   ├── openshift-master.kubeconfig
    │   │   │       │   ├── serviceaccounts.private.key
    │   │   │       │   ├── serviceaccounts.public.key
    │   │   │       │   ├── service-signer.crt
    │   │   │       │   └── service-signer.key
    │   │   │       ├── node-nettest-master-node
    │   │   │       │   ├── ca.crt
    │   │   │       │   ├── master-client.crt
    │   │   │       │   ├── master-client.key
    │   │   │       │   ├── node-client-ca.crt
    │   │   │       │   ├── node-config.yaml
    │   │   │       │   ├── node.kubeconfig
    │   │   │       │   ├── node-registration.json
    │   │   │       │   ├── server.crt
    │   │   │       │   └── server.key
    │   │   │       ├── node-nettest-node-1
    │   │   │       │   ├── ca.crt
    │   │   │       │   ├── master-client.crt
    │   │   │       │   ├── master-client.key
    │   │   │       │   ├── node-client-ca.crt
    │   │   │       │   ├── node-config.yaml
    │   │   │       │   ├── node.kubeconfig
    │   │   │       │   ├── node-registration.json
    │   │   │       │   ├── server.crt
    │   │   │       │   └── server.key
    │   │   │       └── node-nettest-node-2
    │   │   │           ├── ca.crt
    │   │   │           ├── master-client.crt
    │   │   │           ├── master-client.key
    │   │   │           ├── node-client-ca.crt
    │   │   │           ├── node-config.yaml
    │   │   │           ├── node.kubeconfig
    │   │   │           ├── node-registration.json
    │   │   │           ├── server.crt
    │   │   │           └── server.key
    │   │   └── subnet
    │   │       ├── nettest-master
    │   │       │   └── hosts
    │   │       ├── nettest-node-1
    │   │       │   └── hosts
    │   │       ├── nettest-node-2
    │   │       │   └── hosts
    │   │       └── openshift.local.config
    │   │           ├── master
    │   │           │   ├── admin.crt
    │   │           │   ├── admin.key
    │   │           │   ├── admin.kubeconfig
    │   │           │   ├── ca-bundle.crt
    │   │           │   ├── ca.crt
    │   │           │   ├── ca.key
    │   │           │   ├── ca.serial.txt
    │   │           │   ├── etcd.server.crt
    │   │           │   ├── etcd.server.key
    │   │           │   ├── frontproxy-ca.crt
    │   │           │   ├── frontproxy-ca.key
    │   │           │   ├── frontproxy-ca.serial.txt
    │   │           │   ├── master-config.yaml
    │   │           │   ├── master.etcd-client.crt
    │   │           │   ├── master.etcd-client.key
    │   │           │   ├── master.kubelet-client.crt
    │   │           │   ├── master.kubelet-client.key
    │   │           │   ├── master.proxy-client.crt
    │   │           │   ├── master.proxy-client.key
    │   │           │   ├── master.server.crt
    │   │           │   ├── master.server.key
    │   │           │   ├── openshift-aggregator.crt
    │   │           │   ├── openshift-aggregator.key
    │   │           │   ├── openshift-master.crt
    │   │           │   ├── openshift-master.key
    │   │           │   ├── openshift-master.kubeconfig
    │   │           │   ├── serviceaccounts.private.key
    │   │           │   ├── serviceaccounts.public.key
    │   │           │   ├── service-signer.crt
    │   │           │   └── service-signer.key
    │   │           ├── node-nettest-master-node
    │   │           │   ├── ca.crt
    │   │           │   ├── master-client.crt
    │   │           │   ├── master-client.key
    │   │           │   ├── node-client-ca.crt
    │   │           │   ├── node-config.yaml
    │   │           │   ├── node.kubeconfig
    │   │           │   ├── node-registration.json
    │   │           │   ├── server.crt
    │   │           │   └── server.key
    │   │           ├── node-nettest-node-1
    │   │           │   ├── ca.crt
    │   │           │   ├── master-client.crt
    │   │           │   ├── master-client.key
    │   │           │   ├── node-client-ca.crt
    │   │           │   ├── node-config.yaml
    │   │           │   ├── node.kubeconfig
    │   │           │   ├── node-registration.json
    │   │           │   ├── server.crt
    │   │           │   └── server.key
    │   │           └── node-nettest-node-2
    │   │               ├── ca.crt
    │   │               ├── master-client.crt
    │   │               ├── master-client.key
    │   │               ├── node-client-ca.crt
    │   │               ├── node-config.yaml
    │   │               ├── node.kubeconfig
    │   │               ├── node-registration.json
    │   │               ├── server.crt
    │   │               └── server.key
    │   ├── logs
    │   │   ├── multitenant
    │   │   │   ├── nettest-master
    │   │   │   │   └── systemd.log
    │   │   │   ├── nettest-node-1
    │   │   │   │   └── systemd.log
    │   │   │   ├── nettest-node-2
    │   │   │   │   └── systemd.log
    │   │   │   └── test.log
    │   │   ├── networkpolicy
    │   │   │   ├── nettest-master
    │   │   │   │   └── systemd.log
    │   │   │   ├── nettest-node-1
    │   │   │   │   └── systemd.log
    │   │   │   ├── nettest-node-2
    │   │   │   │   └── systemd.log
    │   │   │   └── test.log
    │   │   ├── sar.log
    │   │   ├── sar_stderr.log
    │   │   ├── scripts.log
    │   │   └── subnet
    │   │       ├── nettest-master
    │   │       │   └── systemd.log
    │   │       ├── nettest-node-1
    │   │       │   └── systemd.log
    │   │       ├── nettest-node-2
    │   │       │   └── systemd.log
    │   │       └── test.log
    │   └── openshift.local.home
    └── shell
        ├── artifacts
        ├── logs
        │   ├── d2a7d523c00ea425a77e56fdbd14cb44df475ff31fd5ed5c4460df5d8f840157.json
        │   ├── e895ee655860324b5bf44faf3b84364b7df293a4f01c2d4a892d92f380434462.json
        │   └── scripts.log
        └── openshift.local.home

53 directories, 201 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins8922811758367592954.sh
########## STARTING STAGE: GENERATE ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ export PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/generated
+ rm -rf /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/generated
+ mkdir /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/generated
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo docker version && sudo docker info && sudo docker images && sudo docker ps -a 2>&1'
  WARNING: You're not using the default seccomp profile
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo cat /etc/sysconfig/docker /etc/sysconfig/docker-network /etc/sysconfig/docker-storage /etc/sysconfig/docker-storage-setup /etc/systemd/system/docker.service 2>&1'
+ true
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo find /var/lib/docker/containers -name *.log | sudo xargs tail -vn +1 2>&1'
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'oc get --raw /metrics --server=https://$( uname --nodename ):10250 --config=/etc/origin/master/admin.kubeconfig 2>&1'
+ true
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo ausearch -m AVC -m SELINUX_ERR -m USER_AVC 2>&1'
+ true
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'oc get --raw /metrics --config=/etc/origin/master/admin.kubeconfig 2>&1'
+ true
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo df -T -h && sudo pvs && sudo vgs && sudo lvs && sudo findmnt --all 2>&1'
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo yum list installed 2>&1'
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl --dmesg --no-pager --all --lines=all 2>&1'
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl _PID=1 --no-pager --all --lines=all 2>&1'
+ tree /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/generated
/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/generated
├── avc_denials.log
├── containers.log
├── dmesg.log
├── docker.config
├── docker.info
├── filesystem.info
├── installed_packages.log
├── master-metrics.log
├── node-metrics.log
└── pid1.journal

0 directories, 10 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins7119078148556395392.sh
########## STARTING STAGE: FETCH SYSTEMD JOURNALS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ export PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/journals
+ rm -rf /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/journals
+ mkdir /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/journals
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit docker.service --no-pager --all --lines=all
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit dnsmasq.service --no-pager --all --lines=all
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all
+ tree /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/journals
/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/artifacts/journals
├── dnsmasq.service
├── docker.service
└── systemd-journald.service

0 directories, 3 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins9135637155128123202.sh
########## STARTING STAGE: ASSEMBLE GCS OUTPUT ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ export PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
+ trap 'exit 0' EXIT
+ mkdir -p gcs/artifacts gcs/artifacts/generated gcs/artifacts/journals gcs/artifacts/gathered
++ python -c 'import json; import urllib; print json.load(urllib.urlopen('\''https://ci.openshift.redhat.com/jenkins/job/test_pull_request_origin_extended_networking/24/api/json'\''))['\''result'\'']'
+ result=SUCCESS
+ cat
++ date +%s
+ cat /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/builds/24/log
+ cp artifacts/generated/avc_denials.log artifacts/generated/containers.log artifacts/generated/dmesg.log artifacts/generated/docker.config artifacts/generated/docker.info artifacts/generated/filesystem.info artifacts/generated/installed_packages.log artifacts/generated/master-metrics.log artifacts/generated/node-metrics.log artifacts/generated/pid1.journal gcs/artifacts/generated/
+ cp artifacts/journals/dnsmasq.service artifacts/journals/docker.service artifacts/journals/systemd-journald.service gcs/artifacts/journals/
+ cp -r artifacts/gathered/scripts gcs/artifacts/
++ pwd
+ scp -F ./.config/origin-ci-tool/inventory/.ssh_config -r /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/gcs openshiftdevel:/data
+ scp -F ./.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins2762871408719959114.sh
########## STARTING STAGE: PUSH THE ARTIFACTS AND METADATA ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ export PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
++ mktemp
+ script=/tmp/tmp.WuTxux314d
+ cat
+ chmod +x /tmp/tmp.WuTxux314d
+ scp -F ./.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.WuTxux314d openshiftdevel:/tmp/tmp.WuTxux314d
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 300 /tmp/tmp.WuTxux314d"'
+ cd /home/origin
+ trap 'exit 0' EXIT
+ [[ -n {"type":"presubmit","job":"test_pull_request_origin_extended_networking","buildid":"bbc76ae2-3906-11e8-a837-0a58ac100475","refs":{"org":"openshift","repo":"origin","base_ref":"master","base_sha":"6512a2b31cc35ee6b5429d69894efecf162af0dd","pulls":[{"number":19233,"author":"danwinship","sha":"73ce10f9005bc045f51b6adcdc5ad8622f060eeb"}]}} ]]
+ gcloud auth activate-service-account --key-file /data/credentials.json
/tmp/tmp.WuTxux314d: line 8: gcloud: command not found
+ exit 0
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: PUSH THE ARTIFACTS AND METADATA [00h 00m 01s] ##########
[workspace] $ /bin/bash /tmp/jenkins1524918740853528160.sh
########## STARTING STAGE: DEPROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66
++ export PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config
+ oct deprovision

PLAYBOOK: main.yml *************************************************************
4 plays in /var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml

PLAY [ensure we have the parameters necessary to deprovision virtual hosts] ****

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir)  => {
    "changed": false, 
    "generated_timestamp": "2018-04-05 16:53:08.302018", 
    "item": "origin_ci_inventory_dir", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region)  => {
    "changed": false, 
    "generated_timestamp": "2018-04-05 16:53:08.304959", 
    "item": "origin_ci_aws_region", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY [deprovision virtual hosts in EC2] ****************************************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

TASK [deprovision a virtual EC2 host] ******************************************
task path: /var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:28
included: /var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml for localhost

TASK [update the SSH configuration to remove AWS EC2 specifics] ****************
task path: /var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:2
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2018-04-05 16:53:09.084438", 
    "msg": ""
}

TASK [rename EC2 instance for termination reaper] ******************************
task path: /var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:8
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2018-04-05 16:53:09.654376", 
    "msg": "Tags {'Name': 'oct-terminate'} created for resource i-04061ee3036408c7a."
}

TASK [tear down the EC2 instance] **********************************************
task path: /var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:15
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2018-04-05 16:53:10.515573", 
    "instance_ids": [
        "i-04061ee3036408c7a"
    ], 
    "instances": [
        {
            "ami_launch_index": "0", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-02b413e0f8db8a839"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-062890c1a389ff452"
                }
            }, 
            "dns_name": "ec2-54-242-107-75.compute-1.amazonaws.com", 
            "ebs_optimized": false, 
            "groups": {
                "sg-7e73221a": "default"
            }, 
            "hypervisor": "xen", 
            "id": "i-04061ee3036408c7a", 
            "image_id": "ami-069c0ca6cc091e8fa", 
            "instance_type": "m4.xlarge", 
            "kernel": null, 
            "key_name": "libra", 
            "launch_time": "2018-04-05T19:23:15.000Z", 
            "placement": "us-east-1d", 
            "private_dns_name": "ip-172-18-1-48.ec2.internal", 
            "private_ip": "172.18.1.48", 
            "public_dns_name": "ec2-54-242-107-75.compute-1.amazonaws.com", 
            "public_ip": "54.242.107.75", 
            "ramdisk": null, 
            "region": "us-east-1", 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "running", 
            "state_code": 16, 
            "tags": {
                "Name": "oct-terminate", 
                "openshift_etcd": "", 
                "openshift_master": "", 
                "openshift_node": ""
            }, 
            "tenancy": "default", 
            "virtualization_type": "hvm"
        }
    ], 
    "tagged_instances": []
}

TASK [remove the serialized host variables] ************************************
task path: /var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:22
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2018-04-05 16:53:10.753857", 
    "path": "/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.1.48.yml", 
    "state": "absent"
}

PLAY [deprovision virtual hosts locally manged by Vagrant] *********************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

PLAY [clean up local configuration for deprovisioned instances] ****************

TASK [remove inventory configuration directory] ********************************
task path: /var/lib/jenkins/origin-ci-tool/437e1037dfc38a9b27d44a96a21bde8b638ccf66/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:61
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2018-04-05 16:53:11.189566", 
    "path": "/var/lib/jenkins/jobs/test_pull_request_origin_extended_networking/workspace/.config/origin-ci-tool/inventory", 
    "state": "absent"
}

PLAY RECAP *********************************************************************
localhost                  : ok=8    changed=4    unreachable=0    failed=0   

+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPROVISION CLOUD RESOURCES [00h 00m 04s] ##########
Archiving artifacts
[WS-CLEANUP] Deleting project workspace...[WS-CLEANUP] done
Finished: SUCCESS