Docker网络

Docker网络

理解网络(Docker0)

# lo 本机回环地址
# ens32 内网地址
# docker0 docker地址
[root@localhost ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:6a:1c:48 brd ff:ff:ff:ff:ff:ff
    inet 192.168.10.140/24 brd 192.168.10.255 scope global noprefixroute dynamic ens32
       valid_lft 1424sec preferred_lft 1424sec
    inet6 fe80::cfa6:dba3:87d:262d/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default 
    link/ether 02:42:df:a7:e4:75 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:dfff:fea7:e475/64 scope link 
       valid_lft forever preferred_lft forever
# 运行tomcat       
[root@localhost ~]# docker run -d -P --name tomcat01 tomcat
f91c3224fc7f60335680bcc23b921407210fc783806136efd3e48f5225ab4141

# 查看容器的内部网络地址ip addr,发现容器启动的时候回得到一个eth0@if73ip地址,docker分配的。
[root@localhost ~]# docker exec -it tomcat01 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
72: eth0@if73: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default 
    link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 172.17.0.3/16 brd 172.17.255.255 scope global eth0
       valid_lft forever preferred_lft forever

# 测试是否能ping通
[root@localhost ~]# ping 172.17.0.3
PING 172.17.0.3 (172.17.0.3) 56(84) bytes of data.
64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.060 ms
64 bytes from 172.17.0.3: icmp_seq=2 ttl=64 time=0.089 ms
64 bytes from 172.17.0.3: icmp_seq=3 ttl=64 time=0.045 ms
64 bytes from 172.17.0.3: icmp_seq=4 ttl=64 time=0.093 ms
^C
--- 172.17.0.3 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3000ms
rtt min/avg/max/mdev = 0.045/0.071/0.093/0.022 ms

原理:我们每启动一个docker容器,docker就会给docker容器分配一个ip,我们只要安装docker,就会有一个网卡docker0桥接模式,使用的技术是evth-pair技术!

# 再次使用ip addr查看发现多了一个if72网卡。
[root@localhost ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:6a:1c:48 brd ff:ff:ff:ff:ff:ff
    inet 192.168.10.140/24 brd 192.168.10.255 scope global noprefixroute dynamic ens32
       valid_lft 1765sec preferred_lft 1765sec
    inet6 fe80::cfa6:dba3:87d:262d/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default 
    link/ether 02:42:df:a7:e4:75 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:dfff:fea7:e475/64 scope link 
       valid_lft forever preferred_lft forever
73: veth836eb56@if72: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default 
    link/ether 7e:03:d3:eb:46:0c brd ff:ff:ff:ff:ff:ff link-netnsid 1
    inet6 fe80::7c03:d3ff:feeb:460c/64 scope link 
       valid_lft forever preferred_lft forever
77: veth4c1a67a@if76: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default 
    link/ether 9a:dc:0d:00:76:7a brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet6 fe80::98dc:dff:fe00:767a/64 scope link 
       valid_lft forever preferred_lft forever

我们发现这个容器带来的网卡,都是一对一对的。evth-pair就是一对的虚拟设备接口,他们都是成对出现的,一段 连着协议,一段彼此相连。正因为有这个特性,evth-pair充当一个桥梁。连接各种虚拟网络设备的,OpenStac,Docker容器之间的连接,OVS的连接,都是使用evth-pair技术。

结论:tomcat01和tomcat02是公用的一个路由器.

所有的容器不指定网络的情况下,都是docker0路由的,docker会给我们的路由分配一个默认的可以用IP。

–link 容器互联

思考一个场景,我们编写一个微服务,database url=ip;,项目不重启,数据库ip换掉了,我们希望可以处理这个问题,可以名字来进行访问容器。

[root@localhost ~]# docker run -d -P --name tomcat02 tomcat
eb3e14b6091cfc9901e201e59b7ba1801ba7a851c5eef76f923cdf21824c6734

# 测试tomcat02是否能ping通tomcat01,结果是不能。
[root@localhost ~]# docker exec -it tomcat02 ping tomcat01
ping: tomcat01: No address associated with hostname

# 使用--link完美解决了问题。如果反过来ping是不可以ping通。
[root@localhost ~]# docker run -dP --name tomcat03 --link tomcat02 tomcat
363f35c54064179f061570a02cf139c81c83f05b00ec8b7809c9f452f6aa84f2
[root@localhost ~]# docker exec -it tomcat03 ping tomcat02 
PING tomcat02 (172.17.0.2) 56(84) bytes of data.
64 bytes from tomcat02 (172.17.0.2): icmp_seq=1 ttl=64 time=0.120 ms
64 bytes from tomcat02 (172.17.0.2): icmp_seq=2 ttl=64 time=0.057 ms
64 bytes from tomcat02 (172.17.0.2): icmp_seq=3 ttl=64 time=0.041 ms
64 bytes from tomcat02 (172.17.0.2): icmp_seq=4 ttl=64 time=0.039 ms
^C
--- tomcat02 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 4ms
rtt min/avg/max/mdev = 0.039/0.064/0.120/0.033 ms

自定义网络

# 查看所有的docker网络
[root@localhost ~]# docker network ls
NETWORK ID     NAME      DRIVER    SCOPE
28c2f9180a00   bridge    bridge    local
d6cef74ec597   host      host      local
5724356a108f   none      null      local

网络模式:

  • bridge:桥接docker(默认)。
  • none:不配置网络。
  • host:和宿主机共享网络。
  • container:容器网络连通(用的少!局限很大)。

测试:

# 我们之间启动命令--net bridge,而这个就是我们的docker0
[root@localhost ~]# docker run -dP --name tomcat01 --net bridge tomcat
dd4423cb2a1aa0d472091e249cbc0fedcf7bf226e30475c7a53cdd9909025e10

# docker0特点:默认,域名不能访问,--link可以打通连接!

# 我们可以自定义一个网络!
# --driver 网络模式
# --subnet 子网地址
# --gateway 网关
[root@localhost ~]# docker network create --driver bridge --subnet 192.168.11.0/16 --gateway 192.168.11.1 mynet
0489f9b5c5839bc7f876d5e7f50ff3e7311741bdce18ccbc5055463f707a680b
[root@localhost ~]# docker network ls
NETWORK ID     NAME      DRIVER    SCOPE
28c2f9180a00   bridge    bridge    local
d6cef74ec597   host      host      local
0489f9b5c583   mynet     bridge    local
5724356a108f   none      null      local

# 使用mynet
[root@localhost ~]# docker run -dP --name tomcat-net-01 --net mynet tomcat
48f5ce3fcebbd426fa8bd77a0b507d819c7025e60365621654931249426e093e

# 测试是否能ping通
[root@localhost ~]# docker exec -it tomcat-net-01 ping 192.168.10.140
PING 192.168.10.140 (192.168.10.140) 56(84) bytes of data.
64 bytes from 192.168.10.140: icmp_seq=1 ttl=64 time=0.194 ms
64 bytes from 192.168.10.140: icmp_seq=2 ttl=64 time=0.052 ms
64 bytes from 192.168.10.140: icmp_seq=3 ttl=64 time=0.457 ms
64 bytes from 192.168.10.140: icmp_seq=4 ttl=64 time=0.055 ms
64 bytes from 192.168.10.140: icmp_seq=5 ttl=64 time=0.150 ms
^C
--- 192.168.10.140 ping statistics ---
5 packets transmitted, 5 received, 0% packet loss, time 13ms
rtt min/avg/max/mdev = 0.052/0.181/0.457/0.148 ms

我们自定义的网络docker都是已经帮我们维护好了对应的关系,推荐我们平时这样使用网络!

网络连通

让tomcat01连通tomcat-net-01.

[root@localhost ~]# docker network connect mynet tomcat01
[root@localhost ~]# docker network inspect mynet 
[
    {
        "Name": "mynet",
        "Id": "0489f9b5c5839bc7f876d5e7f50ff3e7311741bdce18ccbc5055463f707a680b",
        "Created": "2021-01-25T19:56:50.348228355+08:00",
        "Scope": "local",
        "Driver": "bridge",
        "EnableIPv6": false,
        "IPAM": {
            "Driver": "default",
            "Options": {},
            "Config": [
                {
                    "Subnet": "192.168.11.0/16",
                    "Gateway": "192.168.11.1"
                }
            ]
        },
        "Internal": false,
        "Attachable": false,
        "Ingress": false,
        "ConfigFrom": {
            "Network": ""
        },
        "ConfigOnly": false,
        "Containers": {
            "48f5ce3fcebbd426fa8bd77a0b507d819c7025e60365621654931249426e093e": {
                "Name": "tomcat-net-01",
                "EndpointID": "aeb3590cdd2cd3d4db32d36493a93f2d427664836574efb5f9919b4682379c73",
                "MacAddress": "02:42:c0:a8:00:01",
                "IPv4Address": "192.168.0.1/16",
                "IPv6Address": ""
            },
            "728e351a2086ed0830fd5779f49cb85c2532c647acfd19181f5b3258bf11c063": {
                "Name": "tomcat01",
                "EndpointID": "1d2bf3e7edbdb9256a631b642da1d0258580c040786a596d6ea7dca3f05b10d2",
                "MacAddress": "02:42:c0:a8:00:02",
                "IPv4Address": "192.168.0.2/16",
                "IPv6Address": ""
            }
        },
        "Options": {},
        "Labels": {}
    }
]
[root@localhost ~]# docker exec -it tomcat01 ping tomcat-net-01
PING tomcat-net-01 (192.168.0.1) 56(84) bytes of data.
64 bytes from tomcat-net-01.mynet (192.168.0.1): icmp_seq=1 ttl=64 time=0.053 ms
64 bytes from tomcat-net-01.mynet (192.168.0.1): icmp_seq=2 ttl=64 time=0.127 ms
64 bytes from tomcat-net-01.mynet (192.168.0.1): icmp_seq=3 ttl=64 time=0.064 ms
64 bytes from tomcat-net-01.mynet (192.168.0.1): icmp_seq=4 ttl=64 time=0.054 ms

--- tomcat-net-01 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 10ms
rtt min/avg/max/mdev = 0.053/0.074/0.127/0.031 ms

redis集群

  • 创建网卡。
[root@localhost ~]# docker network create redis --subnet 192.170.0.0/16
bd798eec91084e17130ce48c710f50a51bb2de76a6de7b8d3c10b5619ac8202d
[root@localhost ~]# docker network ls
NETWORK ID     NAME      DRIVER    SCOPE
28c2f9180a00   bridge    bridge    local
d6cef74ec597   host      host      local
0489f9b5c583   mynet     bridge    local
5724356a108f   none      null      local
bd798eec9108   redis     bridge    local
  • 通过脚本创建六个redis配置。
for port in (seq 1 6); \
do \
mkdir -p /mydata/redis/node-{port}/conf
touch /mydata/redis/node-{port}/conf/redis.conf
cat << EOF >/mydata/redis/node-{port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1{port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
# 通过脚本一次启动6个redis容器
docker run -p 637{port}:6379 -p 1637{port}:16379 --name redis-{port} \
-v /mydata/redis/node-{port}/data:/data \
-v /mydata/redis/node-{port}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 192.170.0.1${port} redis redis-server /etc/redis/redis.conf
done
  • 进入redis-1,配置集群。
docker run -it redis-1 /bin/

redis-cli --cluster create 192.170.0.11:6379 192.170.12:6379 192.170.13:6379 192.170.14:6379 192.170.15:6379 192.170.16:6379 --cluster-replicas 1