kubernetes pod 不能访问外网的问题


我在阿里云经典网络环境下用2台服务器搭建了一个kuberentes环境
目前观察下来其他都正常,唯独一个问题,就是kubernetes 里面的pod 不能访问外网。
kubernetes版本是1.5.1
pod网络使用的是 flannel xvlan 模式

[root@ngxingress01 yw-fund-backend]# kubectl version
Client Version: version.Info{Major:"1", Minor:"5", GitVersion:"v1.5.1", GitCommit:"82450d03cb057bab0950214ef122b67c83fb11df", GitTreeState:"clean", BuildDate:"2016-12-14T00:57:05Z", GoVersion:"go1.7.4", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"5", GitVersion:"v1.5.1", GitCommit:"82450d03cb057bab0950214ef122b67c83fb11df", GitTreeState:"clean", BuildDate:"2016-12-14T00:52:01Z", GoVersion:"go1.7.4", Compiler:"gc", Platform:"linux/amd64"}

具体现象:
[root@ngxingress01 yw-fund-backend]# kubectl attach curl-2421989462-0xwqk -c curl -i -t
If you don't see a command prompt, try pressing enter.
[ root@curl-2421989462-0xwqk:/ ]$ ping 114.114.114.114
PING 114.114.114.114 (114.114.114.114): 56 data bytes
^C
--- 114.114.114.114 ping statistics ---
5 packets transmitted, 0 packets received, 100% packet loss
[ root@curl-2421989462-0xwqk:/ ]$ ping 8.8.8.8
PING 8.8.8.8 (8.8.8.8): 56 data bytes
^C
--- 8.8.8.8 ping statistics ---
5 packets transmitted, 0 packets received, 100% packet loss

机器上的iptables:
[root@ngxingress01 yw-fund-backend]# iptables-save

Generated by iptables-save v1.4.21 on Thu Apr 20 14:41:23 2017

nat
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:DOCKER - [0:0]
:KUBE-MARK-DROP - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-SEP-3ABSBF2DOCMSOHT2 - [0:0]
:KUBE-SEP-5BYDP4LF2O2Q4ICD - [0:0]
:KUBE-SEP-6LCCMNIMB2MLAAZM - [0:0]
:KUBE-SEP-BST2NJ6KINXNHGWE - [0:0]
:KUBE-SEP-PHTJ7Y2L7MHNLFNC - [0:0]
:KUBE-SEP-PNFOKI7XE2XXBAST - [0:0]
:KUBE-SEP-YWXDLA4NC3XNJLSL - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0]
:KUBE-SVC-LG4B6Z4ULCMHWGTI - [0:0]
:KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0]
:KUBE-SVC-PK3XLNS3MIE4AIQZ - [0:0]
:KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0]
:KUBE-SVC-XGLOHA7QRQ3V22RZ - [0:0]
-A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 192.168.0.0/20 ! -o docker0 -j MASQUERADE
-A POSTROUTING -s 172.16.0.0/16 -d 172.16.0.0/16 -j RETURN
-A POSTROUTING -s 172.16.0.0/16 ! -d 224.0.0.0/4 -j MASQUERADE
-A POSTROUTING ! -s 172.16.0.0/16 -d 172.16.0.0/16 -j MASQUERADE
-A DOCKER -i docker0 -j RETURN
-A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
-A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000
-A KUBE-NODEPORTS -p tcp -m comment --comment "default/svc-yw-fund-backend:" -m tcp --dport 30080 -j KUBE-MARK-MASQ
-A KUBE-NODEPORTS -p tcp -m comment --comment "default/svc-yw-fund-backend:" -m tcp --dport 30080 -j KUBE-SVC-PK3XLNS3MIE4AIQZ
-A KUBE-NODEPORTS -p tcp -m comment --comment "kube-system/kubernetes-dashboard:" -m tcp --dport 30177 -j KUBE-MARK-MASQ
-A KUBE-NODEPORTS -p tcp -m comment --comment "kube-system/kubernetes-dashboard:" -m tcp --dport 30177 -j KUBE-SVC-XGLOHA7QRQ3V22RZ
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE
-A KUBE-SEP-3ABSBF2DOCMSOHT2 -s 10.24.0.4/32 -m comment --comment "default/svc-yw-fund-backend:" -j KUBE-MARK-MASQ
-A KUBE-SEP-3ABSBF2DOCMSOHT2 -p tcp -m comment --comment "default/svc-yw-fund-backend:" -m tcp -j DNAT --to-destination 10.24.0.4:8080
-A KUBE-SEP-5BYDP4LF2O2Q4ICD -s 10.29.185.169/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ
-A KUBE-SEP-5BYDP4LF2O2Q4ICD -p tcp -m comment --comment "default/kubernetes:https" -m recent --set --name KUBE-SEP-5BYDP4LF2O2Q4ICD --mask 255.255.255.255 --rsource -m tcp -j DNAT --to-destination 10.29.185.169:6443
-A KUBE-SEP-6LCCMNIMB2MLAAZM -s 120.55.128.6/32 -m comment --comment "default/external-mysql-yw:mysql" -j KUBE-MARK-MASQ
-A KUBE-SEP-6LCCMNIMB2MLAAZM -p tcp -m comment --comment "default/external-mysql-yw:mysql" -m tcp -j DNAT --to-destination 120.55.128.6:3306
-A KUBE-SEP-BST2NJ6KINXNHGWE -s 10.24.1.3/32 -m comment --comment "default/svc-yw-fund-backend:" -j KUBE-MARK-MASQ
-A KUBE-SEP-BST2NJ6KINXNHGWE -p tcp -m comment --comment "default/svc-yw-fund-backend:" -m tcp -j DNAT --to-destination 10.24.1.3:8080
-A KUBE-SEP-PHTJ7Y2L7MHNLFNC -s 10.24.0.3/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ
-A KUBE-SEP-PHTJ7Y2L7MHNLFNC -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.24.0.3:53
-A KUBE-SEP-PNFOKI7XE2XXBAST -s 10.24.0.2/32 -m comment --comment "kube-system/kubernetes-dashboard:" -j KUBE-MARK-MASQ
-A KUBE-SEP-PNFOKI7XE2XXBAST -p tcp -m comment --comment "kube-system/kubernetes-dashboard:" -m tcp -j DNAT --to-destination 10.24.0.2:9090
-A KUBE-SEP-YWXDLA4NC3XNJLSL -s 10.24.0.3/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ
-A KUBE-SEP-YWXDLA4NC3XNJLSL -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.24.0.3:53
-A KUBE-SERVICES -d 10.107.182.61/32 -p tcp -m comment --comment "default/svc-yw-fund-backend: cluster IP" -m tcp --dport 8080 -j KUBE-SVC-PK3XLNS3MIE4AIQZ
-A KUBE-SERVICES -d 10.100.186.224/32 -p tcp -m comment --comment "default/external-mysql-yw:mysql cluster IP" -m tcp --dport 3306 -j KUBE-SVC-LG4B6Z4ULCMHWGTI
-A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU
-A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4
-A KUBE-SERVICES -d 10.102.39.184/32 -p tcp -m comment --comment "kube-system/kubernetes-dashboard: cluster IP" -m tcp --dport 80 -j KUBE-SVC-XGLOHA7QRQ3V22RZ
-A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-SEP-PHTJ7Y2L7MHNLFNC
-A KUBE-SVC-LG4B6Z4ULCMHWGTI -m comment --comment "default/external-mysql-yw:mysql" -j KUBE-SEP-6LCCMNIMB2MLAAZM
-A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-5BYDP4LF2O2Q4ICD --mask 255.255.255.255 --rsource -j KUBE-SEP-5BYDP4LF2O2Q4ICD
-A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https" -j KUBE-SEP-5BYDP4LF2O2Q4ICD
-A KUBE-SVC-PK3XLNS3MIE4AIQZ -m comment --comment "default/svc-yw-fund-backend:" -m statistic --mode random --probability 0.50000000000 -j KUBE-SEP-3ABSBF2DOCMSOHT2
-A KUBE-SVC-PK3XLNS3MIE4AIQZ -m comment --comment "default/svc-yw-fund-backend:" -j KUBE-SEP-BST2NJ6KINXNHGWE
-A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns" -j KUBE-SEP-YWXDLA4NC3XNJLSL
-A KUBE-SVC-XGLOHA7QRQ3V22RZ -m comment --comment "kube-system/kubernetes-dashboard:" -j KUBE-SEP-PNFOKI7XE2XXBAST
COMMIT

Completed on Thu Apr 20 14:41:23 2017

Generated by iptables-save v1.4.21 on Thu Apr 20 14:41:23 2017

filter
:INPUT ACCEPT [113:79499]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [108:84705]
:DOCKER - [0:0]
:DOCKER-ISOLATION - [0:0]
:KUBE-FIREWALL - [0:0]
:KUBE-SERVICES - [0:0]
-A INPUT -j KUBE-FIREWALL
-A FORWARD -j DOCKER-ISOLATION
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -j KUBE-FIREWALL
-A DOCKER-ISOLATION -j RETURN
-A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
COMMIT

Completed on Thu Apr 20 14:41:23 2017

已邀请:

tonybai_cn - 关注Go、Docker和Kubernetes

赞同来自:


从pod到外部网络,通信应该走node上的docker0设备,不走flannel。docker0是一个bridge设备。你试试随便在node上创建一个容器(通过docker命令),容器内部是否可以访问外网?查看一下容器内路由设置,比如docker exec xxx ip route,再查看一下node路由

ri0day

赞同来自:


谢谢,回复。
经过测试,发现我单独用docker 启动一个容器的话,访问外网是没有任何问题的

以下是我在docker 里面用busybox 测试的信息

[root@ngxingress01 manifests]# docker run -i -t busybox
/ # ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
10: eth0@if11: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:c0:a8:00:02 brd ff:ff:ff:ff:ff:ff
inet 192.168.0.2/20 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:c0ff:fea8:2/64 scope link
valid_lft forever preferred_lft forever

/ # ping 8.8.8.8
PING 8.8.8.8 (8.8.8.8): 56 data bytes
64 bytes from 8.8.8.8: seq=0 ttl=37 time=268.214 ms
64 bytes from 8.8.8.8: seq=1 ttl=37 time=268.141 ms
^C
--- 8.8.8.8 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 268.141/268.177/268.214 ms

/ # ip route
default via 192.168.0.1 dev eth0
192.168.0.0/20 dev eth0 src 192.168.0.2
#
以下是我host的信息
[root@ngxingress01 manifests]# ip route
default via 101.37.27.247 dev eth1
10.0.0.0/8 via 10.29.187.247 dev eth0
10.24.0.0/24 dev cni0 proto kernel scope link src 10.24.0.1
10.24.0.0/16 dev flannel.1
10.29.184.0/22 dev eth0 proto kernel scope link src 10.29.185.169
100.64.0.0/10 via 10.29.187.247 dev eth0
101.37.24.0/22 dev eth1 proto kernel scope link src 101.37.26.176
169.254.0.0/16 dev eth0 scope link metric 1002
169.254.0.0/16 dev eth1 scope link metric 1003
172.16.0.0/12 via 10.29.187.247 dev eth0
192.168.0.0/20 dev docker0 proto kernel scope link src 192.168.0.1

[root@ngxingress01 manifests]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:16:3e:12:15:42 brd ff:ff:ff:ff:ff:ff
inet 10.29.185.169/22 brd 10.29.187.255 scope global eth0
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:16:3e:0d:dc:f0 brd ff:ff:ff:ff:ff:ff
inet 101.37.26.176/22 brd 101.37.27.255 scope global eth1
valid_lft forever preferred_lft forever
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 02:42:ff:a6:ab:2a brd ff:ff:ff:ff:ff:ff
inet 192.168.0.1/20 scope global docker0
valid_lft forever preferred_lft forever
5: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN
link/ether 9a:ad:64:87:97:0f brd ff:ff:ff:ff:ff:ff
inet 10.24.0.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
6: cni0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP qlen 1000
link/ether 0a:58:0a:18:00:01 brd ff:ff:ff:ff:ff:ff
inet 10.24.0.1/24 scope global cni0
valid_lft forever preferred_lft forever
7: vethd6831f70@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP
link/ether 0e:c1:3b:8e:2f:16 brd ff:ff:ff:ff:ff:ff link-netnsid 0
8: veth798f1932@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP
link/ether 16:88:d2:f9:78:e1 brd ff:ff:ff:ff:ff:ff link-netnsid 1
9: veth6bf195ae@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP
link/ether 12:76:3e:16:93:a2 brd ff:ff:ff:ff:ff:ff link-netnsid 2

tonybai_cn - 关注Go、Docker和Kubernetes

赞同来自:


你的kubernetes pods中ip route的输出是什么样子的?和docker单独启动的container中的对比一下

ri0day

赞同来自:


docker 单独启动一个container 的路由信息如下:
/ # ip route
default via 192.168.0.1 dev eth0
192.168.0.0/20 dev eth0 src 192.168.0.2

kubernetes pods 里面的路由信息如下:
[ root@curl-2421989462-0xwqk:/ ]$ ip route show
default via 10.24.1.1 dev eth0
10.24.1.0/24 dev eth0 src 10.24.1.8
172.16.0.0/16 via 10.24.1.1 dev eth0

10.24 这个是flannel 的网段
[root@ngxingress01 ~]# cat /var/run/flannel/subnet.env
FLANNEL_NETWORK=172.16.0.0/16
FLANNEL_SUBNET=10.24.0.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true

tonybai_cn - 关注Go、Docker和Kubernetes

赞同来自:


问题似乎就在pod中的路由啊。default路由走的是10.24.1.1啊。这样出外网的流量是不是走flannel设备了啊?这样肯定出不去啊

xiphis

赞同来自:


[root@ngxingress01 ~]# cat /var/run/flannel/subnet.env
FLANNEL_NETWORK=172.16.0.0/16
FLANNEL_SUBNET=10.24.0.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true

这个没看懂。
flannel_network不包含flannel_subnet?

ri0day

赞同来自:


对的。但是集群 一起来就是这样。没有做过特别的设置。我看我flannel 的启动参数里面。
如下:
[ "/opt/bin/flanneld","-v=3", "--ip-masq", "--kube-subnet-mgr" ]
有没有可能在flannel里面让 pod能上外网呢。

xiphis

赞同来自:


我是手工搭建的flanneld网络,pods上外网正常。
kube-core1 ~ # kubectl exec -ti my-nginx-3418754612-lqch4 /bin/bash
root@my-nginx-3418754612-lqch4:/# ping -c2 www.163.com
PING 163.xdwscache.ourglb0.com (183.131.124.101): 48 data bytes
56 bytes from 183.131.124.101: icmp_seq=0 ttl=51 time=10.165 ms
56 bytes from 183.131.124.101: icmp_seq=1 ttl=51 time=10.738 ms

ri0day

赞同来自:


@xiphis

在你pod 里面执行 ip addr 和ip route show 输出给我看一下

最后通过 kubectl get po -o wide , 找出这个pod运行在哪个host上。然后看一下host的iptables-save 输出

我这通过找到这个pod的host,然后临时在这个pod的host上加了一条iptables解决了这个问题
10.24.1.0/24 就是我 这个host的flannel 的网段

/sbin/iptables -t nat -I POSTROUTING -s 10.24.1.0/24 -j MASQUERADE

xiphis

赞同来自:


flannel的vxlan走的是内核,iptable里面应该不体现出来。
多贴点信息,供参考!

-----容器信息----
kube-core1 prometheus # kubectl exec -ti my-nginx-3418754612-lqch4 /bin/bash
root@my-nginx-3418754612-lqch4:/# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
3: eth0@if7: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue state UP
link/ether 0a:58:0a:02:17:03 brd ff:ff:ff:ff:ff:ff
inet 10.2.23.3/24 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::50b6:b4ff:fee6:223d/64 scope link
valid_lft forever preferred_lft forever
root@my-nginx-3418754612-lqch4:/# ip route show
default via 10.2.23.1 dev eth0
10.2.0.0/16 via 10.2.23.1 dev eth0
10.2.23.0/24 dev eth0 proto kernel scope link src 10.2.23.3

---------节点信息---------
- kube-core3 ~ # cat /etc/flannel/options.env
FLANNELD_IFACE=10.212.132.27
FLANNELD_ETCD_ENDPOINTS=https://10.212.132.25:2379,https://10.212.132.26:2379,https://10.212.132.27:2379
FLANNELD_ETCD_KEYFILE=/etc/kubernetes/ssl/etcd-client-key.pem
FLANNELD_ETCD_CERTFILE=/etc/kubernetes/ssl/etcd-client.pem
FLANNELD_ETCD_CAFILE=/etc/kubernetes/ssl/ca.pem
- ----------------
- kube-core3 ~ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:3f:f3:bd brd ff:ff:ff:ff:ff:ff
inet 10.212.132.27/24 brd 10.212.132.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::5054:ff:fe3f:f3bd/64 scope link
valid_lft forever preferred_lft forever
3: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether ca:fd:d5:48:0f:8f brd ff:ff:ff:ff:ff:ff
inet 10.2.23.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::c8fd:d5ff:fe48:f8f/64 scope link
valid_lft forever preferred_lft forever
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:56:91:b6:26 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 scope global docker0
valid_lft forever preferred_lft forever
5: cni0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default qlen 1000
link/ether 0a:58:0a:02:17:01 brd ff:ff:ff:ff:ff:ff
inet 10.2.23.1/24 scope global cni0
valid_lft forever preferred_lft forever
inet6 fe80::b8a7:82ff:fecc:c762/64 scope link
valid_lft forever preferred_lft forever
6: veth8f59220f@flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP group default
link/ether 1e:08:43:f0:23:51 brd ff:ff:ff:ff:ff:ff
inet6 fe80::1c08:43ff:fef0:2351/64 scope link
valid_lft forever preferred_lft forever
7: veth48a377f6@flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP group default
link/ether ba:57:b6:38:7c:69 brd ff:ff:ff:ff:ff:ff
inet6 fe80::b857:b6ff:fe38:7c69/64 scope link
valid_lft forever preferred_lft forever
8: vethe445da60@flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP group default
link/ether b6:2c:89:0d:88:a3 brd ff:ff:ff:ff:ff:ff
inet6 fe80::b42c:89ff:fe0d:88a3/64 scope link
valid_lft forever preferred_lft forever
  • ---------iptables-save -------
  • # Generated by iptables-save v1.4.21 on Fri Apr 21 03:32:05 2017
    *filter
    :INPUT ACCEPT [1059:185525]
    :FORWARD ACCEPT [148:134927]
    :OUTPUT ACCEPT [996:105597]
    :DOCKER - [0:0]
    :DOCKER-ISOLATION - [0:0]
    :KUBE-FIREWALL - [0:0]
    :KUBE-SERVICES - [0:0]
    -A INPUT -j KUBE-FIREWALL
    -A FORWARD -j DOCKER-ISOLATION
    -A FORWARD -o docker0 -j DOCKER
    -A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
    -A FORWARD -i docker0 ! -o docker0 -j ACCEPT
    -A FORWARD -i docker0 -o docker0 -j ACCEPT
    -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
    -A OUTPUT -j KUBE-FIREWALL
    -A DOCKER-ISOLATION -j RETURN
    -A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
    COMMIT
    # Completed on Fri Apr 21 03:32:05 2017
    # Generated by iptables-save v1.4.21 on Fri Apr 21 03:32:05 2017
    *nat
    :PREROUTING ACCEPT [1:229]
    :INPUT ACCEPT [1:229]
    :OUTPUT ACCEPT [11:660]
    :POSTROUTING ACCEPT [11:660]
    :DOCKER - [0:0]
    :KUBE-MARK-DROP - [0:0]
    :KUBE-MARK-MASQ - [0:0]
    :KUBE-NODEPORTS - [0:0]
    :KUBE-POSTROUTING - [0:0]
    :KUBE-SEP-3VMDXCYAHVD6UPDM - [0:0]
    :KUBE-SEP-COA4ALRT3S7H5BAH - [0:0]
    :KUBE-SEP-DZCXX3RATNCWL5VG - [0:0]
    :KUBE-SEP-FQHKQ74TGCXBM7CH - [0:0]
    :KUBE-SEP-M7E7VLF2PDB3QYAL - [0:0]
    :KUBE-SEP-SKJA3X3UWA7HYNYD - [0:0]
    :KUBE-SEP-SM7ZCMC3U75SE54Q - [0:0]
    :KUBE-SEP-SXLBLDJOG466Y5NQ - [0:0]
    :KUBE-SERVICES - [0:0]
    :KUBE-SVC-BJM46V3U5RZHCFRZ - [0:0]
    :KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0]
    :KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0]
    :KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0]
    :KUBE-SVC-XGLOHA7QRQ3V22RZ - [0:0]
    :KUBE-SVC-XP4WJ6VSLGWALMW5 - [0:0]
    -A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
    -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
    -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
    -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
    -A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
    -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE
    -A POSTROUTING -s 10.2.0.0/16 -d 10.2.0.0/16 -j RETURN
    -A POSTROUTING -s 10.2.0.0/16 ! -d 224.0.0.0/4 -j MASQUERADE
    -A POSTROUTING ! -s 10.2.0.0/16 -d 10.2.0.0/16 -j MASQUERADE
    -A DOCKER -i docker0 -j RETURN
    -A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
    -A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000
    -A KUBE-NODEPORTS -p tcp -m comment --comment "kube-system/kubernetes-dashboard:" -m tcp --dport 30785 -j KUBE-MARK-MASQ
    -A KUBE-NODEPORTS -p tcp -m comment --comment "kube-system/kubernetes-dashboard:" -m tcp --dport 30785 -j KUBE-SVC-XGLOHA7QRQ3V22RZ
    -A KUBE-NODEPORTS -p tcp -m comment --comment "kube-system/default-http-backend:http" -m tcp --dport 31586 -j KUBE-MARK-MASQ
    -A KUBE-NODEPORTS -p tcp -m comment --comment "kube-system/default-http-backend:http" -m tcp --dport 31586 -j KUBE-SVC-XP4WJ6VSLGWALMW5
    -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE
    -A KUBE-SEP-3VMDXCYAHVD6UPDM -s 10.2.77.4/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ
    -A KUBE-SEP-3VMDXCYAHVD6UPDM -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.2.77.4:53
    -A KUBE-SEP-COA4ALRT3S7H5BAH -s 10.2.23.4/32 -m comment --comment "kube-system/heapster:" -j KUBE-MARK-MASQ
    -A KUBE-SEP-COA4ALRT3S7H5BAH -p tcp -m comment --comment "kube-system/heapster:" -m tcp -j DNAT --to-destination 10.2.23.4:8082
    -A KUBE-SEP-DZCXX3RATNCWL5VG -s 10.2.77.4/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ
    -A KUBE-SEP-DZCXX3RATNCWL5VG -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.2.77.4:53
    -A KUBE-SEP-FQHKQ74TGCXBM7CH -s 10.2.23.2/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ
    -A KUBE-SEP-FQHKQ74TGCXBM7CH -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.2.23.2:53
    -A KUBE-SEP-M7E7VLF2PDB3QYAL -s 10.2.77.5/32 -m comment --comment "kube-system/kubernetes-dashboard:" -j KUBE-MARK-MASQ
    -A KUBE-SEP-M7E7VLF2PDB3QYAL -p tcp -m comment --comment "kube-system/kubernetes-dashboard:" -m tcp -j DNAT --to-destination 10.2.77.5:9090
    -A KUBE-SEP-SKJA3X3UWA7HYNYD -s 10.2.77.6/32 -m comment --comment "kube-system/default-http-backend:http" -j KUBE-MARK-MASQ
    -A KUBE-SEP-SKJA3X3UWA7HYNYD -p tcp -m comment --comment "kube-system/default-http-backend:http" -m tcp -j DNAT --to-destination 10.2.77.6:8080
    -A KUBE-SEP-SM7ZCMC3U75SE54Q -s 10.2.23.2/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ
    -A KUBE-SEP-SM7ZCMC3U75SE54Q -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.2.23.2:53
    -A KUBE-SEP-SXLBLDJOG466Y5NQ -s 10.212.132.25/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ
    -A KUBE-SEP-SXLBLDJOG466Y5NQ -p tcp -m comment --comment "default/kubernetes:https" -m recent --set --name KUBE-SEP-SXLBLDJOG466Y5NQ --mask 255.255.255.255 --rsource -m tcp -j DNAT --to-destination 10.212.132.25:443
    -A KUBE-SERVICES ! -s 10.2.0.0/16 -d 10.3.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ
    -A KUBE-SERVICES -d 10.3.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU
    -A KUBE-SERVICES ! -s 10.2.0.0/16 -d 10.3.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ
    -A KUBE-SERVICES -d 10.3.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4
    -A KUBE-SERVICES ! -s 10.2.0.0/16 -d 10.3.0.47/32 -p tcp -m comment --comment "kube-system/heapster: cluster IP" -m tcp --dport 80 -j KUBE-MARK-MASQ
    -A KUBE-SERVICES -d 10.3.0.47/32 -p tcp -m comment --comment "kube-system/heapster: cluster IP" -m tcp --dport 80 -j KUBE-SVC-BJM46V3U5RZHCFRZ
    -A KUBE-SERVICES ! -s 10.2.0.0/16 -d 10.3.0.202/32 -p tcp -m comment --comment "kube-system/kubernetes-dashboard: cluster IP" -m tcp --dport 80 -j KUBE-MARK-MASQ
    -A KUBE-SERVICES -d 10.3.0.202/32 -p tcp -m comment --comment "kube-system/kubernetes-dashboard: cluster IP" -m tcp --dport 80 -j KUBE-SVC-XGLOHA7QRQ3V22RZ
    -A KUBE-SERVICES ! -s 10.2.0.0/16 -d 10.3.0.168/32 -p tcp -m comment --comment "kube-system/default-http-backend:http cluster IP" -m tcp --dport 80 -j KUBE-MARK-MASQ
    -A KUBE-SERVICES -d 10.3.0.168/32 -p tcp -m comment --comment "kube-system/default-http-backend:http cluster IP" -m tcp --dport 80 -j KUBE-SVC-XP4WJ6VSLGWALMW5
    -A KUBE-SERVICES ! -s 10.2.0.0/16 -d 10.3.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ
    -A KUBE-SERVICES -d 10.3.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y
    -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
    -A KUBE-SVC-BJM46V3U5RZHCFRZ -m comment --comment "kube-system/heapster:" -j KUBE-SEP-COA4ALRT3S7H5BAH
    -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp" -m statistic --mode random --probability 0.50000000000 -j KUBE-SEP-FQHKQ74TGCXBM7CH
    -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-SEP-3VMDXCYAHVD6UPDM
    -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https" -m recent --rcheck --seconds 10800 --reap --name KUBE-SEP-SXLBLDJOG466Y5NQ --mask 255.255.255.255 --rsource -j KUBE-SEP-SXLBLDJOG466Y5NQ
    -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https" -j KUBE-SEP-SXLBLDJOG466Y5NQ
    -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns" -m statistic --mode random --probability 0.50000000000 -j KUBE-SEP-SM7ZCMC3U75SE54Q
    -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns" -j KUBE-SEP-DZCXX3RATNCWL5VG
    -A KUBE-SVC-XGLOHA7QRQ3V22RZ -m comment --comment "kube-system/kubernetes-dashboard:" -j KUBE-SEP-M7E7VLF2PDB3QYAL
    -A KUBE-SVC-XP4WJ6VSLGWALMW5 -m comment --comment "kube-system/default-http-backend:http" -j KUBE-SEP-SKJA3X3UWA7HYNYD
    COMMIT
    # Completed on Fri Apr 21 03:32:05 2017

ri0day

赞同来自:


@xiphis
看了你host 上的iptables。似乎是这条策略 ,让你pod 能访问出去。
你pod的网段是 10.2.23.0/24
  • -A POSTROUTING -s 10.2.0.0/16 ! -d 224.0.0.0/4 -j MASQUERADE


而我的 iptables 默认是 ,我容器的段是10.24.1.0/24 所以这个条命中不了
-A POSTROUTING -s 172.16.0.0/16 ! -d 224.0.0.0/4 -j MASQUERADE

谢谢了。问题基本已经定位了。为什么我这iptables 是这样 是应为下载了 aliyun上那个部署flannel xvlan backend 的yaml ,里面configmap 写的是这个
net-conf.json: |
{
"Network": "172.16.0.0/16",
"Backend": {
"Type": "vxlan"
}
}

@tonybai_cn
谢谢你,你的文章让我了解了flannel 出去的时候是走iptables 出去的。才找到这个解决方向

要回复问题请先登录注册