准备环境:
3台2核2G的centos7虚拟机
说明:下面初始化环境工作master节点和node节点都需要执行
1.关闭防火墙
systemctl stop firewalld
setenforce 0
2.时间同步
[root@localhost ~]# yum -y install ntpdate
[root@localhost ~]# ntpdate pool.ntp.org
16 Nov 18:22:38 ntpdate[1985]: adjust time server 185.255.55.20 offset 0.003975 sec
3.修改主机名(一台master节点,两台node节点)
[root@localhost ~]# hostnamectl set-hostname k8s-master
[root@localhost ~]# hostnamectl set-hostname k8s-node1
[root@localhost ~]# hostnamectl set-hostname k8s-node2
切换为修改好的主机名:使用su -
[root@localhost ~]# su -
[root@k8s-master ~]#
[root@k8s-node1 ~]#
[root@k8s-node2 ~]#
4.编辑hosts文件
vim /etc/hosts
192.168.232.135 k8s-master
192.168.232.136 k8s-node1
192.168.232.137 k8s-node2
5.安装指定版本的docker
master节点和所有node节点都需要执行
[root@k8s-master ~]# yum -y install wget
[root@k8s-master ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
[root@k8s-master ~]# yum -y install docker-ce-18.06.3.ce-3.el7
6.运行docker
[root@k8s-master ~]# systemctl enable docker
[root@k8s-master ~]# systemctl start docker
7.docker镜像加速
[root@k8s-master ~]# cd /etc/docker/
[root@k8s-master docker]# vim daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"registry-mirrors":[
"https://kfwkfulq.mirror.aliyuncs.com",
"https://2lqq34jg.mirror.aliyuncs.com",
"https://pee6w651.mirror.aliyuncs.com",
"http://hub-mirror.c.163.com",
"https://docker.mirrors.ustc.edu.cn",
"https://registry.docker-cn.com"
]
}
7.1.重启docker
[root@k8s-master docker]# systemctl restart docker
8.上传k8s的yum源并安装

[root@k8s-master yum.repos.d]# yum -y install kubelet-1.17.0 kubeadm-1.17.0 kubectl-1.17.0
9.设置kubelet 开机自启
[root@k8s-master yum.repos.d]# systemctl enable kubelet
先不要启动它!!
10.初始化集群
在master节点上执行:
[root@k8s-master yum.repos.d]# kubeadm init --apiserver-advertise-address=192.168.232.135 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.17.0 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16
报错处理:
[ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables contents are not set to 1
[root@k8s-master yum.repos.d]# echo "1" > /proc/sys/net/bridge/bridge-nf-call-iptables
[root@k8s-master yum.repos.d]# swapoff -a(3个节点都要执行)
再次执行:
[root@k8s-master yum.repos.d]# kubeadm init --apiserver-advertise-address=192.168.232.135 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.17.0 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16
master上执行:
[root@k8s-master yum.repos.d]# mkdir -p $HOME/.kube
[root@k8s-master yum.repos.d]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master yum.repos.d]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
11.将node节点加入到集群中:(在两个node节点执行)
[root@k8s-node1 yum.repos.d]# kubeadm join 192.168.232.135:6443 --token flbxjj.1krdrzqi90cj2cdx --discovery-token-ca-cert-hash sha256:169c1533a5d4da1d5c20423a063e409dd65340ae8bd864b18779e54d5dfb7677

11.1.验证:
[root@k8s-master yum.repos.d]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master NotReady master 60m v1.17.0
k8s-node1 NotReady <none> 51m v1.17.0
k8s-node2 NotReady <none> 50m v1.17.0
12.安装k8s的网络组件falneel
上传k8s的网络组件包
[root@k8s-master ~]# ls
anaconda-ks.cfg k8s-v1.17.0.zip
[root@k8s-master ~]# yum -y install unzip
[root@k8s-master ~]# unzip k8s-v1.17.0.zip
[root@k8s-master ~]# cd k8s-v1.17.0
[root@k8s-master k8s-v1.17.0]# ls
images kube-flannel.yml
[root@k8s-master k8s-v1.17.0]# kubectl apply -f kube-flannel.yml
13.查看pod转态
[root@k8s-master k8s-v1.17.0]# kubectl get pod -A
这里稍等1分钟就会变成Running
14.再次查看node状态
[root@k8s-master k8s-v1.17.0]# kubectl get nodes

已经由NotReady变为Ready!!
至此,k8s安装部署完成,接下来我们做仪表盘编写yaml文件!
15.编写dash_board.yaml文件
[root@k8s-master ~]# vim dash_board.yaml
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashboard-admin
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30002
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dashboard-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: dashboard-admin
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0-rc7
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
16.执行创建
[root@k8s-master ~]# kubectl apply -f dash_board.yaml

17.获取登录仪表盘的密钥
[root@k8s-master ~]# kubectl get secret -n kubernetes-dashboard |grep dashboard-admin
dashboard-admin-token-g9phc kubernetes.io/service-account-token 3 2m55s
[root@k8s-master ~]# kubectl describe secret dashboard-admin-token-g9phc -n kubernetes-dashboard

18.查看仪表盘在哪个节点生成
[root@k8s-master ~]# kubectl get pod -A -o wide
可以看出在node1节点生成
19.查看端口
[root@k8s-master ~]# kubectl get svc -A -o wide
端口:30002
20.登录仪表盘

仪表盘搭建完成!
版权声明:本文为xuetengbo111原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。