机器网络基本配置

1、安装k8s的节点必须是大于1核心的CPU

2、安装节点的网络信息

master
BOOTPROTO=static
IPADDR=192.168.1.198
NETMASK=255.255.255.0
GATEWAY=192.168.1.1  #指定到koolshare的软路由上

node1
BOOTPROTO=static
IPADDR=192.168.1.199
NETMASK=255.255.255.0
GATEWAY=192.168.1.1

node2
BOOTPROTO=static
IPADDR=192.168.1.197
NETMASK=255.255.255.0
GATEWAY=192.168.1.1

3、koolshare 软路由的默认面是koolshare

系统初始化

设置系统主机名以及Host文件

hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02

安装依赖包

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git

设置防火墙为Iptables并设置空规则

systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save

## 关闭SELINUX
因为K8s安装的时候会去检测swap分区有无关闭,如果开启了话可能会把pod放在虚拟内存运行,大大降低工作效率。(也可以通过–ingress排除)

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

调整内核参数,对于K8s

#必备三调参数:开启bridge网桥模式,关闭ipv6协议
cat > kubernetes.conf << EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用swap空间,只有当系统OOM时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf

报错1:显示/proc/sys/net/bridge/bridge-nf-call-iptables:没有这个文件或者目录

modprobe br_netfilter

报错2:显示sysctl: cannot stat /proc/sys/net/netfilter/nf_conntrack_max: 没有那个文件或目录

modprobe ip_conntrack

调整系统时区

#设置系统时区为 中国/上海
timedatectl set-timezone Asia/Shanghai
#将当前的UTC时间写入硬件时钟
timedatectl set-local-rtc 0
#重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond
## 关闭系统不需要的服务
systemctl stop postfix && systemctl disable postfix

修改系统内核为4.44

#CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定,例如:rpm -Uvh
http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

#安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次!

yum --enablerepo=elrepo-kernel install -y kernel-lt

#设置开机从新内核启动

grub2-set-default ‘CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)’

#检测:
[root@k8s-master01 ~]# uname -r
4.4.189-1.el7.elrepo.x86_64

修改DNS

vim /etc/hosts

192.168.1.198 k8s-master01

192.168.1.199 k8s-node01

192.168.1.197 k8s-node02

scp /etc/hosts root@k8s-node01:/etc/hosts

scp /etc/hosts root@k8s-node02:/etc/hosts

kube-proxy开启ipvs的前置条件

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe – ip_vs
modprobe – ip_vs_rr
modprobe – ip_vs_wrr
modprobe – ip_vs_sh
modprobe – nf_conntrack_ipv4
EOF
Chmod755 /etc/sysconfig/modules/ipvs.modules &&
bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

安装Docker软件

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager \
 --add-repo \
 http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum update -y && yum install -y docker-ce

## 创建/etc/docker目录
mkdir /etc/docker

# 配置daemon
cat > /etc/docker/daemon.json << EOF
{
	"exec-opts":["native.cgroupdriver=systemd"],
	"log-driver":"json-file",
	"log-opts":{
		"max-size":"100m"
	}
}
EOF

#创建存放docker的配置文件
mkdir -p /etc/systemd/system/docker.service.d

#重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

安装Kubeadm(主从配置)

#让kubeadm去引导成为k8s
cat <<EOF >/etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
#kubelet是与容器接口进行交互,而k8s通过kubeadm安装以后都是以Pod方式存在,底层是以容器的方式运行。所以一定要开机自启,不然的话启动不了k8s集群
systemctl enable kubelet.service

初始化主节点

#利用阿里云初始化下载镜像
kubeadm init --image-repository=registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --kubernetes-version=v1.15.1

#接着,显示默认init初始化文件打印到 yaml文件中。从而得到默认的初始化模板
kubeadm config print init-defaults > kubeadm-config.yaml

vim kubeadm-config.yaml
#修改为(默认的调度方式是ipvs):
advertiseAddress: 10.0.100.10
kubernetesVersion: v1.15.1
#添加覆盖: 
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

然后
kubeadm init --config=kubeadm-config.yaml | tee kubeadm-init.log

完成后提示
Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.198:6443 --token l1pc10.hsnqt6uqgo05yy2f \
    --discovery-token-ca-cert-hash sha256:1ce61cd816ef20304759553f2ee4cb9f0f63d0c932bb8f5d83a469434da941fa

# 在node机器执行,node机器需要安装好k8s和docker
kubeadm join 192.168.1.198:6443 --token l1pc10.hsnqt6uqgo05yy2f \
    --discovery-token-ca-cert-hash sha256:1ce61cd816ef20304759553f2ee4cb9f0f63d0c932bb8f5d83a469434da941fa
#即可

安装完成后,还需要进行如下设置

在当前家目录下创建.kube文件,这里会保存连接配置
kubectl和kubeapi交互,采取HTTPS协议,所以需要些缓存和认证文件都会保存到.kube文件
然后拷贝管理配置文件到.kube目录下

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

部署网络flannel

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
1
[root@k8s-master01 ~]# mkdir install-k8s
mv kubeadm-init.log kubeadm-config.yaml install-k8s/
cd install-k8s/
mkdir core
mv * core/
mkdir plugin
cd plugin
mkdir flannel
cd flannel/
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

kubectl create -f kube-flannel.yml

查看组件运行,发现flannel已经在运行
kubectl get pod -n kube-system

发现已经ready了
kubectl get node

原理:kubectl是命令行管理工具,get获取pod状态,-n是指定名称空间为kube-system。因为所有的系统组件都被安装在kube-system
如果不加-n指定,默认是default

部署metallb负载均衡器

$ wget https://raw.githubusercontent.com/google/metallb/v0.7.3/manifests/metallb.yaml
$ kubectl apply -f metallb.yaml
[centos@k8s-master ~]$ kubectl get pod -n metallb-system  -o wide
NAME                          READY   STATUS    RESTARTS   AGE    IP              NODE         NOMINATED NODE   READINESS GATES
controller-7cc9c87cfb-n25kc   1/1     Running   1          166m   10.244.1.39     k8s-node1    <none>           <none>
speaker-cbhcg                 1/1     Running   1          166m   192.168.92.56   k8s-master   <none>           <none>
speaker-l6vv2                 1/1     Running   1          166m   192.168.92.58   k8s-node2    <none>           <none>
speaker-pxscm                 1/1     Running   1          166m   192.168.92.57   k8s-node1    <none>           <none>
[centos@k8s-master ~]$ 

[centos@k8s-master ~]$ kubectl get daemonset -n metallb-system 
NAME      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
speaker   3         3         3       3            3           <none>          13h
[centos@k8s-master ~]$ kubectl get deployment -n metallb-system 
NAME         READY   UP-TO-DATE   AVAILABLE   AGE
controller   1/1     1            1           13h
[centos@k8s-master ~]$ 

wget https://raw.githubusercontent.com/google/metallb/v0.7.3/manifests/example-layer2-config.yaml

[centos@k8s-master ~]$ vim example-layer2-config.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |
    address-pools:
    - name: default
      protocol: layer2
      addresses:
      - 192.168.92.200-192.168.92.210 #主要修改这里分配ip
	  
	  
kubectl apply -f example-layer2-config.yaml

[centos@k8s-master ~]$ vim tutorial-2.yaml 
apiVersion: apps/v1beta2
kind: Deployment
metadata:
  name: nginx
spec:
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1
        ports:
        - name: http
          containerPort: 80

apiVersion: v1
kind: Service
metadata:
  name: nginx
spec:
  ports:

name: http
port: 80
protocol: TCP
targetPort: 80
  selector:
app: nginx
  type: LoadBalancer #主要修改这里使用LoadBalancer

$ wget https://raw.githubusercontent.com/google/metallb/master/manifests/tutorial-2.yaml
$ kubectl apply -f tutorial-2.yaml

查看yaml文件配置,包含了一个deployment和一个LoadBalancer类型的service,默认即可。

查看service分配的EXTERNAL-IP
[centos@k8s-master ~]$ kubectl get service 
NAME         TYPE           CLUSTER-IP     EXTERNAL-IP      PORT(S)        AGE
kubernetes   ClusterIP      10.96.0.1      <none>           443/TCP        3d15h
nginx        LoadBalancer   10.101.112.1   192.168.92.201   80:31274/TCP   123m
[centos@k8s-master ~]$
集群内访问该IP地址
[centos@k8s-master ~]$ curl 192.168.92.201
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
......
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[centos@k8s-master ~]$ 

PS:参考

2020最新最详细K8s安装教程

kuberntes部署metallb LoadBalancer

Kubernetes(K8S)集群部署搭建图文教程(最全)