\r\n[root@localhost ~]# rm -rfv \/etc\/yum.repos.d\/*\r\n[root@localhost ~]# curl -o \/etc\/yum.repos.d\/CentOS-Base.repo http:\/\/mirrors.aliyun.com\/repo\/Centos-8.repo\r\n<\/pre>\n\u914d\u7f6e\u4e3b\u673a\u540d<\/p>\n
\r\n[root@master01 ~]# cat \/etc\/hosts\r\n127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4\r\n::1 localhost localhost.localdomain localhost6 localhost6.localdomain6\r\n192.168.122.21 master01.paas.com master01\r\n<\/pre>\n\u5173\u95edswap\uff0c\u6ce8\u91caswap\u5206\u533a<\/p>\n
\r\n[root@master01 ~]# swapoff -a\r\n[root@master01 ~]# cat \/etc\/fstab\r\n\r\n#\r\n# \/etc\/fstab\r\n# Created by anaconda on Tue Mar 31 22:44:34 2020\r\n#\r\n# Accessible filesystems, by reference, are maintained under '\/dev\/disk\/'.\r\n# See man pages fstab(5), findfs(8), mount(8) and\/or blkid(8) for more info.\r\n#\r\n# After editing this file, run 'systemctl daemon-reload' to update systemd\r\n# units generated from this file.\r\n#\r\n\/dev\/mapper\/cl-root \/ xfs defaults 0 0\r\nUUID=5fecb240-379b-4331-ba04-f41338e81a6e \/boot ext4 defaults 1 2\r\n\/dev\/mapper\/cl-home \/home xfs defaults 0 0\r\n#\/dev\/mapper\/cl-swap swap swap defaults 0 0\r\n<\/pre>\n\u914d\u7f6e\u5185\u6838\u53c2\u6570\uff0c\u5c06\u6865\u63a5\u7684IPv4\u6d41\u91cf\u4f20\u9012\u5230iptables\u7684\u94fe<\/p>\n
\r\n[root@master01 ~]# vim \/etc\/sysctl.d\/k8s.conf\r\nnet.bridge.bridge-nf-call-ip6tables = 1\r\nnet.bridge.bridge-nf-call-iptables = 1\r\n<\/pre>\n\r\nsysctl --system\r\n<\/pre>\n\u5b89\u88c5\u5e38\u7528\u5305<\/strong><\/span><\/div>\n\r\n[root@master01 ~]# yum install vim bash-completion net-tools gcc -y\r\n<\/pre>\n\u4f7f\u7528aliyun\u6e90\u5b89\u88c5docker-ce<\/strong><\/span><\/div>\n\r\n[root@master01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2\r\n[root@master01 ~]# yum-config-manager --add-repo https:\/\/mirrors.aliyun.com\/docker-ce\/linux\/centos\/docker-ce.repo\r\n[root@master01 ~]# yum -y install docker-ce\r\n<\/pre>\n\u5b89\u88c5docker-ce\u5982\u679c\u51fa\u73b0\u4ee5\u4e0b\u9519<\/p>\n
\r\n[root@master01 ~]# yum -y install docker-ce\r\nCentOS-8 - Base - mirrors.aliyun.com 14 kB\/s | 3.8 kB 00:00\r\nCentOS-8 - Extras - mirrors.aliyun.com 6.4 kB\/s | 1.5 kB 00:00\r\nCentOS-8 - AppStream - mirrors.aliyun.com 16 kB\/s | 4.3 kB 00:00\r\nDocker CE Stable - x86_64 40 kB\/s | 22 kB 00:00\r\nError:\r\n Problem: package docker-ce-3:19.03.8-3.el7.x86_64 requires containerd.io >= 1.2.2-3, but none of the providers can be installed\r\n - cannot install the best candidate for the job\r\n - package containerd.io-1.2.10-3.2.el7.x86_64 is excluded\r\n - package containerd.io-1.2.13-3.1.el7.x86_64 is excluded\r\n - package containerd.io-1.2.2-3.3.el7.x86_64 is excluded\r\n - package containerd.io-1.2.2-3.el7.x86_64 is excluded\r\n - package containerd.io-1.2.4-3.1.el7.x86_64 is excluded\r\n - package containerd.io-1.2.5-3.1.el7.x86_64 is excluded\r\n - package containerd.io-1.2.6-3.3.el7.x86_64 is excluded\r\n(try to add '--skip-broken' to skip uninstallable packages or '--nobest' to use not only best candidate packages)\r\n<\/pre>\n\u89e3\u51b3\u65b9\u6cd5<\/p>\n
\r\n[root@master01 ~]# wget https:\/\/download.docker.com\/linux\/centos\/7\/x86_64\/edge\/Packages\/containerd.io-1.2.6-3.3.el7.x86_64.rpm\r\n[root@master01 ~]# yum install containerd.io-1.2.6-3.3.el7.x86_64.rpm\r\n<\/pre>\n\u7136\u540e\u518d\u5b89\u88c5docker-ce\u5373\u53ef\u6210\u529f<\/p>\n
\u6dfb\u52a0aliyundocker\u4ed3\u5e93\u52a0\u901f\u5668<\/p>\n
\r\n[root@master01 ~]# mkdir -p \/etc\/docker\r\n[root@master01 ~]# tee \/etc\/docker\/daemon.json <<-'EOF'\r\n{\r\n \"registry-mirrors\": [\"https:\/\/fl791z1h.mirror.aliyuncs.com\"]\r\n}\r\nEOF\r\n[root@master01 ~]# systemctl daemon-reload\r\n[root@master01 ~]# systemctl restart docker\r\n<\/pre>\n\u5b89\u88c5kubectl\u3001kubelet\u3001kubeadm<\/strong><\/span><\/div>\n\u6dfb\u52a0\u963f\u91cckubernetes\u6e90<\/p>\n
\r\n[root@master01 ~]# cat < \/etc\/yum.repos.d\/kubernetes.repo\r\n[kubernetes]\r\nname=Kubernetes\r\nbaseurl=https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/repos\/kubernetes-el7-x86_64\/\r\nenabled=1\r\ngpgcheck=1\r\nrepo_gpgcheck=1\r\ngpgkey=https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/doc\/yum-key.gpg https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/doc\/rpm-package-key.gpg\r\nEOF\r\n<\/eof><\/pre>\n\u5b89\u88c5<\/p>\n
\r\n[root@master01 ~]# yum install kubectl kubelet kubeadm\r\n[root@master01 ~]# systemctl enable kubelet\r\n<\/pre>\n\u521d\u59cb\u5316k8s\u96c6\u7fa4<\/strong><\/span><\/div>\n\r\n[root@master01 ~]# kubeadm init --kubernetes-version=1.18.0 \\\r\n--apiserver-advertise-address=192.168.122.21 \\\r\n--image-repository registry.aliyuncs.com\/google_containers \\\r\n--service-cidr=10.10.0.0\/16 --pod-network-cidr=10.122.0.0\/16\r\n<\/pre>\nPOD\u7684\u7f51\u6bb5\u4e3a: 10.122.0.0\/16\uff0c api server\u5730\u5740\u5c31\u662fmaster\u672c\u673aIP\u3002<\/p>\n
\u8fd9\u4e00\u6b65\u5f88\u5173\u952e\uff0c\u7531\u4e8ekubeadm \u9ed8\u8ba4\u4ece\u5b98\u7f51k8s.grc.io\u4e0b\u8f7d\u6240\u9700\u955c\u50cf\uff0c\u56fd\u5185\u65e0\u6cd5\u8bbf\u95ee\uff0c\u56e0\u6b64\u9700\u8981\u901a\u8fc7\u2013image-repository\u6307\u5b9a\u963f\u91cc\u4e91\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3002<\/p>\n
\u96c6\u7fa4\u521d\u59cb\u5316\u6210\u529f\u540e\u8fd4\u56de\u5982\u4e0b\u4fe1\u606f\uff1a<\/p>\n
\r\nW0408 09:36:36.121603 14098 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]\r\n[init] Using Kubernetes version: v1.18.0\r\n[preflight] Running pre-flight checks\r\n [WARNING FileExisting-tc]: tc not found in system path\r\n[preflight] Pulling images required for setting up a Kubernetes cluster\r\n[preflight] This might take a minute or two, depending on the speed of your internet connection\r\n[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'\r\n[kubelet-start] Writing kubelet environment file with flags to file \"\/var\/lib\/kubelet\/kubeadm-flags.env\"\r\n[kubelet-start] Writing kubelet configuration to file \"\/var\/lib\/kubelet\/config.yaml\"\r\n[kubelet-start] Starting the kubelet\r\n[certs] Using certificateDir folder \"\/etc\/kubernetes\/pki\"\r\n[certs] Generating \"ca\" certificate and key\r\n[certs] Generating \"apiserver\" certificate and key\r\n[certs] apiserver serving cert is signed for DNS names [master01.paas.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.10.0.1 192.168.122.21]\r\n[certs] Generating \"apiserver-kubelet-client\" certificate and key\r\n[certs] Generating \"front-proxy-ca\" certificate and key\r\n[certs] Generating \"front-proxy-client\" certificate and key\r\n[certs] Generating \"etcd\/ca\" certificate and key\r\n[certs] Generating \"etcd\/server\" certificate and key\r\n[certs] etcd\/server serving cert is signed for DNS names [master01.paas.com localhost] and IPs [192.168.122.21 127.0.0.1 ::1]\r\n[certs] Generating \"etcd\/peer\" certificate and key\r\n[certs] etcd\/peer serving cert is signed for DNS names [master01.paas.com localhost] and IPs [192.168.122.21 127.0.0.1 ::1]\r\n[certs] Generating \"etcd\/healthcheck-client\" certificate and key\r\n[certs] Generating \"apiserver-etcd-client\" certificate and key\r\n[certs] Generating \"sa\" key and public key\r\n[kubeconfig] Using kubeconfig folder \"\/etc\/kubernetes\"\r\n[kubeconfig] Writing \"admin.conf\" kubeconfig file\r\n[kubeconfig] Writing \"kubelet.conf\" kubeconfig file\r\n[kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\r\n[kubeconfig] Writing \"scheduler.conf\" kubeconfig file\r\n[control-plane] Using manifest folder \"\/etc\/kubernetes\/manifests\"\r\n[control-plane] Creating static Pod manifest for \"kube-apiserver\"\r\n[control-plane] Creating static Pod manifest for \"kube-controller-manager\"\r\nW0408 09:36:43.343191 14098 manifests.go:225] the default kube-apiserver authorization-mode is \"Node,RBAC\"; using \"Node,RBAC\"\r\n[control-plane] Creating static Pod manifest for \"kube-scheduler\"\r\nW0408 09:36:43.344303 14098 manifests.go:225] the default kube-apiserver authorization-mode is \"Node,RBAC\"; using \"Node,RBAC\"\r\n[etcd] Creating static Pod manifest for local etcd in \"\/etc\/kubernetes\/manifests\"\r\n[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"\/etc\/kubernetes\/manifests\". This can take up to 4m0s\r\n[apiclient] All control plane components are healthy after 23.002541 seconds\r\n[upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\r\n[kubelet] Creating a ConfigMap \"kubelet-config-1.18\" in namespace kube-system with the configuration for the kubelets in the cluster\r\n[upload-certs] Skipping phase. Please see --upload-certs\r\n[mark-control-plane] Marking the node master01.paas.com as control-plane by adding the label \"node-role.kubernetes.io\/master=''\"\r\n[mark-control-plane] Marking the node master01.paas.com as control-plane by adding the taints [node-role.kubernetes.io\/master:NoSchedule]\r\n[bootstrap-token] Using token: v2r5a4.veazy2xhzetpktfz\r\n[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\r\n[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes\r\n[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\r\n[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\r\n[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\r\n[bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\r\n[kubelet-finalize] Updating \"\/etc\/kubernetes\/kubelet.conf\" to point to a rotatable kubelet client certificate and key\r\n[addons] Applied essential addon: CoreDNS\r\n[addons] Applied essential addon: kube-proxy\r\n\r\nYour Kubernetes control-plane has initialized successfully!\r\n\r\nTo start using your cluster, you need to run the following as a regular user:\r\n\r\n mkdir -p $HOME\/.kube\r\n sudo cp -i \/etc\/kubernetes\/admin.conf $HOME\/.kube\/config\r\n sudo chown $(id -u):$(id -g) $HOME\/.kube\/config\r\n\r\nYou should now deploy a pod network to the cluster.\r\nRun \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\r\n https:\/\/kubernetes.io\/docs\/concepts\/cluster-administration\/addons\/\r\n\r\nThen you can join any number of worker nodes by running the following on each as root:\r\n\r\nkubeadm join 192.168.122.21:6443 --token v2r5a4.veazy2xhzetpktfz \\\r\n --discovery-token-ca-cert-hash sha256:daded8514c8350f7c238204979039ff9884d5b595ca950ba8bbce80724fd65d4\r\n[root@master01 ~]#\r\n<\/pre>\n\u8bb0\u5f55\u751f\u6210\u7684\u6700\u540e\u90e8\u5206\u5185\u5bb9\uff0c\u6b64\u5185\u5bb9\u9700\u8981\u5728\u5176\u5b83\u8282\u70b9\u52a0\u5165Kubernetes\u96c6\u7fa4\u65f6\u6267\u884c\u3002<\/p>\n
\u6839\u636e\u63d0\u793a\u521b\u5efakubectl<\/p>\n
\r\n[root@master01 ~]# mkdir -p $HOME\/.kube\r\n[root@master01 ~]# sudo cp -i \/etc\/kubernetes\/admin.conf $HOME\/.kube\/config\r\n[root@master01 ~]# sudo chown $(id -u):$(id -g) $HOME\/.kube\/config\r\n<\/pre>\n\u6267\u884c\u4e0b\u9762\u547d\u4ee4\uff0c\u4f7fkubectl\u53ef\u4ee5\u81ea\u52a8\u8865\u5145<\/p>\n
\r\n[root@master01 ~]# source <(kubectl completion bash)\r\n<\/pre>\n\u67e5\u770b\u8282\u70b9\uff0cpod<\/p>\n
\r\n[root@master01 ~]# kubectl get node\r\nNAME STATUS ROLES AGE VERSION\r\nmaster01.paas.com NotReady master 2m29s v1.18.0\r\n[root@master01 ~]# kubectl get pod --all-namespaces\r\nNAMESPACE NAME READY STATUS RESTARTS AGE\r\nkube-system coredns-7ff77c879f-fsj9l 0\/1 Pending 0 2m12s\r\nkube-system coredns-7ff77c879f-q5ll2 0\/1 Pending 0 2m12s\r\nkube-system etcd-master01.paas.com 1\/1 Running 0 2m22s\r\nkube-system kube-apiserver-master01.paas.com 1\/1 Running 0 2m22s\r\nkube-system kube-controller-manager-master01.paas.com 1\/1 Running 0 2m22s\r\nkube-system kube-proxy-th472 1\/1 Running 0 2m12s\r\nkube-system kube-scheduler-master01.paas.com 1\/1 Running 0 2m22s\r\n[root@master01 ~]#\r\n<\/pre>\nnode\u8282\u70b9\u4e3aNotReady\uff0c\u56e0\u4e3acorednspod\u6ca1\u6709\u542f\u52a8\uff0c\u7f3a\u5c11\u7f51\u7edcpod<\/p>\n
\u5b89\u88c5calico\u7f51\u7edc<\/strong><\/span><\/div>\n\r\n[root@master01 ~]# kubectl apply -f https:\/\/docs.projectcalico.org\/manifests\/calico.yaml\r\nconfigmap\/calico-config created\r\ncustomresourcedefinition.apiextensions.k8s.io\/bgpconfigurations.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/bgppeers.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/blockaffinities.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/clusterinformations.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/felixconfigurations.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/globalnetworkpolicies.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/globalnetworksets.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/hostendpoints.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/ipamblocks.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/ipamconfigs.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/ipamhandles.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/ippools.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/networkpolicies.crd.projectcalico.org created\r\ncustomresourcedefinition.apiextensions.k8s.io\/networksets.crd.projectcalico.org created\r\nclusterrole.rbac.authorization.k8s.io\/calico-kube-controllers created\r\nclusterrolebinding.rbac.authorization.k8s.io\/calico-kube-controllers created\r\nclusterrole.rbac.authorization.k8s.io\/calico-node created\r\nclusterrolebinding.rbac.authorization.k8s.io\/calico-node created\r\ndaemonset.apps\/calico-node created\r\nserviceaccount\/calico-node created\r\ndeployment.apps\/calico-kube-controllers created\r\nserviceaccount\/calico-kube-controllers created\r\n<\/pre>\n\u67e5\u770bpod\u548cnode<\/p>\n
\r\n[root@master01 ~]# kubectl get pod --all-namespaces\r\nNAMESPACE NAME READY STATUS RESTARTS AGE\r\nkube-system calico-kube-controllers-555fc8cc5c-k8rbk 1\/1 Running 0 36s\r\nkube-system calico-node-5km27 1\/1 Running 0 36s\r\nkube-system coredns-7ff77c879f-fsj9l 1\/1 Running 0 5m22s\r\nkube-system coredns-7ff77c879f-q5ll2 1\/1 Running 0 5m22s\r\nkube-system etcd-master01.paas.com 1\/1 Running 0 5m32s\r\nkube-system kube-apiserver-master01.paas.com 1\/1 Running 0 5m32s\r\nkube-system kube-controller-manager-master01.paas.com 1\/1 Running 0 5m32s\r\nkube-system kube-proxy-th472 1\/1 Running 0 5m22s\r\nkube-system kube-scheduler-master01.paas.com 1\/1 Running 0 5m32s\r\n[root@master01 ~]# kubectl get node\r\nNAME STATUS ROLES AGE VERSION\r\nmaster01.paas.com Ready master 5m47s v1.18.0\r\n[root@master01 ~]#\r\n<\/pre>\n\u6b64\u65f6\u96c6\u7fa4\u72b6\u6001\u6b63\u5e38<\/p>\n
\u5b89\u88c5kubernetes-dashboard<\/strong><\/span><\/div>\n\u5b98\u65b9\u90e8\u7f72dashboard\u7684\u670d\u52a1\u6ca1\u4f7f\u7528nodeport\uff0c\u5c06yaml\u6587\u4ef6\u4e0b\u8f7d\u5230\u672c\u5730\uff0c\u5728service\u91cc\u6dfb\u52a0nodeport<\/p>\n
\r\n[root@master01 ~]# wget https:\/\/raw.githubusercontent.com\/kubernetes\/dashboard\/v2.0.0-rc7\/aio\/deploy\/recommended.yaml\r\n[root@master01 ~]# vim recommended.yaml\r\nkind: Service\r\napiVersion: v1\r\nmetadata:\r\n labels:\r\n k8s-app: kubernetes-dashboard\r\n name: kubernetes-dashboard\r\n namespace: kubernetes-dashboard\r\nspec:\r\n type: NodePort\r\n ports:\r\n - port: 443\r\n targetPort: 8443\r\n nodePort: 30000\r\n selector:\r\n k8s-app: kubernetes-dashboard\r\n\r\n[root@master01 ~]# kubectl create -f recommended.yaml\r\nnamespace\/kubernetes-dashboard created\r\nserviceaccount\/kubernetes-dashboard created\r\nservice\/kubernetes-dashboard created\r\nsecret\/kubernetes-dashboard-certs created\r\nsecret\/kubernetes-dashboard-csrf created\r\nsecret\/kubernetes-dashboard-key-holder created\r\nconfigmap\/kubernetes-dashboard-settings created\r\nrole.rbac.authorization.k8s.io\/kubernetes-dashboard created\r\nclusterrole.rbac.authorization.k8s.io\/kubernetes-dashboard created\r\nrolebinding.rbac.authorization.k8s.io\/kubernetes-dashboard created\r\nclusterrolebinding.rbac.authorization.k8s.io\/kubernetes-dashboard created\r\ndeployment.apps\/kubernetes-dashboard created\r\nservice\/dashboard-metrics-scraper created\r\ndeployment.apps\/dashboard-metrics-scraper created\r\n<\/pre>\n\u67e5\u770bpod\uff0cservice<\/p>\n
\r\nNAME READY STATUS RESTARTS AGE\r\ndashboard-metrics-scraper-dc6947fbf-869kf 1\/1 Running 0 37s\r\nkubernetes-dashboard-5d4dc8b976-sdxxt 1\/1 Running 0 37s\r\n[root@master01 ~]# kubectl get svc -n kubernetes-dashboard\r\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\r\ndashboard-metrics-scraper ClusterIP 10.10.58.93 8000\/TCP 44s\r\nkubernetes-dashboard NodePort 10.10.132.66 <\/none> 443:30000\/TCP 44s\r\n[root@master01 ~]#\r\n<\/none><\/pre>\n\u4f7f\u7528token\u8fdb\u884c\u767b\u5f55\uff0c\u6267\u884c\u4e0b\u9762\u547d\u4ee4\u83b7\u53d6token<\/p>\n
\r\n[root@master01 ~]# kubectl describe secrets -n kubernetes-dashboard kubernetes-dashboard-token-t4hxz | grep token | awk 'NR==3{print $2}'\r\neyJhbGciOiJSUzI1NiIsImtpZCI6IlhJaDgyTWEzZ3FtWE9hTnJqUHN1akdHZU1pRHN3QWM2RUlQbUVOT0g0Qm8ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi10NGh4eiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImUxOWYwMGI5LTI3MWItNDY5OS1hMjI3LTAzZWEyZTllMDE4YiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.Mcw9zYSbTfhaYV38vlEaI0CSomYLtb05F2AIGpyT_PjIN8xmRdnIQhWGANBDuuDjdxScSXHOytHAKdj3pzBFVw_lfU5PseBg6hmdv_EPFPh2GvRd9XCs0TE5CVX8qfHkAGKc-DltA7jPwt5VqIFjnolLLGXB-exhiU73YMG_Xy9dZE-u0KKCvSq7XZDR87P_X30JYCAZXDlxcv8iOsuI4I-wlacm6LRF6HgyJqctJNVyE7seVVIgLqetAtt9LicTo6BBozbefHeK6zqRYeITU8AHhe-PLS4xo2fey5up77v4vyPHy_SEnKOtZcBzje1XKNPolGfiXItLYF7u95m9_A\r\n<\/pre>\n\u901a\u8fc7\u67e5\u770bdashboard\u65e5\u5fd7\uff0c\u5f97\u5230\u5982\u4e0b \u4fe1\u606f<\/p>\n
\r\n[root@master01 ~]# kubectl logs -f -n kubernetes-dashboard kubernetes-dashboard-5d4dc8b976-sdxxt\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: namespaces is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Incoming HTTP\/2.0 GET \/api\/v1\/cronjob\/default?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 01:54:31 Getting list of all cron jobs in the cluster\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: cronjobs.batch is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"cronjobs\" in API group \"batch\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Incoming HTTP\/2.0 POST \/api\/v1\/token\/refresh request from 192.168.122.21:7788: { contents hidden }\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Incoming HTTP\/2.0 GET \/api\/v1\/daemonset\/default?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Incoming HTTP\/2.0 GET \/api\/v1\/deployment\/default?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: daemonsets.apps is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"daemonsets\" in API group \"apps\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: pods is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"pods\" in API group \"\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: events is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"events\" in API group \"\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Incoming HTTP\/2.0 GET \/api\/v1\/csrftoken\/token request from 192.168.122.21:7788:\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 01:54:31 Getting list of all deployments in the cluster\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: deployments.apps is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"deployments\" in API group \"apps\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: pods is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"pods\" in API group \"\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: events is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"events\" in API group \"\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: replicasets.apps is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"replicasets\" in API group \"apps\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Incoming HTTP\/2.0 GET \/api\/v1\/job\/default?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Incoming HTTP\/2.0 GET \/api\/v1\/pod\/default?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 01:54:31 Getting list of all jobs in the cluster\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: jobs.batch is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"jobs\" in API group \"batch\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: pods is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"pods\" in API group \"\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: events is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"events\" in API group \"\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 01:54:31 Getting list of all pods in the cluster\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: pods is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"pods\" in API group \"\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: events is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"events\" in API group \"\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Getting pod metrics\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Incoming HTTP\/2.0 GET \/api\/v1\/replicaset\/default?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 01:54:31 Getting list of all replica sets in the cluster\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Incoming HTTP\/2.0 GET \/api\/v1\/replicationcontroller\/default?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: replicasets.apps is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"replicasets\" in API group \"apps\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: pods is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"pods\" in API group \"\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: events is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"events\" in API group \"\" in the namespace \"default\"\r\n2020\/04\/08 01:54:31 [2020-04-08T01:54:31Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 01:54:31 Getting list of all replication controllers in the cluster\r\n2020\/04\/08 01:54:31 Non-critical error occurred during resource retrieval: replicationcontrollers is forbidden: User \"system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard\" cannot list resource \"replicationcontrollers\" in API group \"\" in the namespace \"default\"\r\n<\/pre>\n\u89e3\u51b3\u65b9\u6cd5<\/p>\n
\r\n[root@master01 ~]# kubectl create clusterrolebinding serviceaccount-cluster-admin --clusterrole=cluster-admin --group=system:serviceaccount \r\nclusterrolebinding.rbac.authorization.k8s.io\/serviceaccount-cluster-admin created\r\n<\/pre>\n\u67e5\u770bdashboard\u65e5\u5fd7<\/p>\n
\r\n[root@master01 ~]# kubectl logs -f -n kubernetes-dashboard kubernetes-dashboard-5d4dc8b976-sdxx\r\n2020\/04\/08 02:07:03 Getting list of namespaces\r\n2020\/04\/08 02:07:03 [2020-04-08T02:07:03Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:08 [2020-04-08T02:07:08Z] Incoming HTTP\/2.0 GET \/api\/v1\/node?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:08 [2020-04-08T02:07:08Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:08 [2020-04-08T02:07:08Z] Incoming HTTP\/2.0 GET \/api\/v1\/namespace request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:08 Getting list of namespaces\r\n2020\/04\/08 02:07:08 [2020-04-08T02:07:08Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:13 [2020-04-08T02:07:13Z] Incoming HTTP\/2.0 GET \/api\/v1\/node?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:13 [2020-04-08T02:07:13Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:13 [2020-04-08T02:07:13Z] Incoming HTTP\/2.0 GET \/api\/v1\/namespace request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:13 Getting list of namespaces\r\n2020\/04\/08 02:07:13 [2020-04-08T02:07:13Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:18 [2020-04-08T02:07:18Z] Incoming HTTP\/2.0 GET \/api\/v1\/node?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:18 [2020-04-08T02:07:18Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:18 [2020-04-08T02:07:18Z] Incoming HTTP\/2.0 GET \/api\/v1\/namespace request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:18 Getting list of namespaces\r\n2020\/04\/08 02:07:18 [2020-04-08T02:07:18Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:23 [2020-04-08T02:07:23Z] Incoming HTTP\/2.0 GET \/api\/v1\/node?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:23 [2020-04-08T02:07:23Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:23 [2020-04-08T02:07:23Z] Incoming HTTP\/2.0 GET \/api\/v1\/namespace request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:23 Getting list of namespaces\r\n2020\/04\/08 02:07:23 [2020-04-08T02:07:23Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:28 [2020-04-08T02:07:28Z] Incoming HTTP\/2.0 GET \/api\/v1\/node?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:28 [2020-04-08T02:07:28Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:28 [2020-04-08T02:07:28Z] Incoming HTTP\/2.0 GET \/api\/v1\/namespace request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:28 Getting list of namespaces\r\n2020\/04\/08 02:07:28 [2020-04-08T02:07:28Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n2020\/04\/08 02:07:33 [2020-04-08T02:07:33Z] Incoming HTTP\/2.0 GET \/api\/v1\/node?itemsPerPage=10&page=1&sortBy=d,creationTimestamp request from 192.168.122.21:7788:\r\n2020\/04\/08 02:07:33 [2020-04-08T02:07:33Z] Outcoming response to 192.168.122.21:7788 with 200 status code\r\n<\/pre>\n\u6b64\u65f6\u518d\u67e5\u770bdashboard\uff0c\u5373\u53ef\u770b\u5230\u6709\u8d44\u6e90\u5c55\u793a<\/p>\n
<\/p>\n
\u6309\u7167\u4ee5\u4e0a\u6b65\u9aa4\u53ef\u5728CentOS8\u6210\u529f\u5b89\u88c5k8s1.18\u7248\u672c\u3002<\/p>\n","protected":false},"excerpt":{"rendered":"
\u67e5\u770b\u7cfb\u7edf\u7248\u672c [root@localhost]# cat \/etc\/centos-release CentOS […]<\/p>\n","protected":false},"author":1479,"featured_media":200232,"comment_status":"closed","ping_status":"closed","sticky":false,"template":"","format":"standard","meta":{"_acf_changed":false,"footnotes":""},"categories":[55],"tags":[],"class_list":["post-205517","post","type-post","status-publish","format-standard","has-post-thumbnail","hentry","category-thread"],"acf":[],"_links":{"self":[{"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/posts\/205517","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/users\/1479"}],"replies":[{"embeddable":true,"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/comments?post=205517"}],"version-history":[{"count":3,"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/posts\/205517\/revisions"}],"predecessor-version":[{"id":205522,"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/posts\/205517\/revisions\/205522"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/media\/200232"}],"wp:attachment":[{"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/media?parent=205517"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/categories?post=205517"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/lrxjmw.cn\/wp-json\/wp\/v2\/tags?post=205517"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}