• 首页 首页 icon
  • 工具库 工具库 icon
    • IP查询 IP查询 icon
  • 内容库 内容库 icon
    • 快讯库 快讯库 icon
    • 精品库 精品库 icon
    • 问答库 问答库 icon
  • 更多 更多 icon
    • 服务条款 服务条款 icon

k8s/Kubernetes集群安装

武飞扬头像
xixiyuguang
帮助1

Kubernetes集群安装

1、集群规划
机器名 IP地址 集群角色 CPU(>=) 内存(>=)
Servera 172.16.220.10/24 Master 2C 2G
Serverb 172.16.220.20/24 Worker 2C 2G
Serverc 172.16.220.30/24 Worker 2C 2G
2、必要的准备
  • 所有节点都要执行的操作
# 1.关闭防火墙
systemctl stop firewalld
systemctl disable firewalld

# 2.同步时间注意:虚拟机不管关机还是挂起,每次重新操作都需要更新时间进行同步。
yum install ntpdate -y
ntpdate time.windows.com

# 3.设置主机名字
hostnamectl set-hostname servera
hostnamectl set-hostname serverb
hostnamectl set-hostname serverc

# 4.设置hosts
cat >> /etc/hosts << EOF
192.168.192.129 servera
192.168.192.130 serverb
192.168.192.131 serverc
EOF

学新通
  • 还原所有虚拟机和设置,每个机器必须为2颗CPU,2G内存 必须满足,调整CPU颗数

  • 关闭每台机器的swap分区、设置内核参数和命令行自动补全功能,servera、serverb、serverc都需要操作

#关闭swap分区,编辑/etc/fstab,注释swap分区
$ vim /etc/fstab 
#/dev/mapper/centos-swap swap     swap    defaults      0 0

#调整内核参数
$ vim  /etc/sysctl.conf 
net.bridge.bridge-nf-call-iptables = 1

#永久生效,sysctl.conf kernel参数
$ modprobe br_netfilter
$ sysctl  -p

#sysctl  -p这里会报错
[root@serverb ~]# sysctl -p
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
[root@serverb ~]# modprobe br_netfilter
[root@serverb ~]# sysctl -p
net.bridge.bridge-nf-call-iptables = 1
[root@servera ~]# ls /proc/sys/net/bridge
bridge-nf-call-arptables  bridge-nf-call-iptables        bridge-nf-filter-vlan-tagged
bridge-nf-call-ip6tables  bridge-nf-filter-pppoe-tagged  bridge-nf-pass-vlan-input-dev
[root@servera ~]#

#安装bash命令行补全工具
$ yum install bash-completion -y
$ reboot 

#调整环境变量,将一下两行添加到文件最下方
vim /root/.bash_profile
$ /root/.bash_profile
. <(kubeadm completion bash)
. <(kubectl completion bash)

#安装所需要的基础软件
$ yum install -y yum-utils device-mapper-persistent-data lvm2

#查看docker软件是否存在,如果存在卸载
$ rpm -qa |grep docker
docker-ce-rootless-extras-20.10.17-3.el7.x86_64
docker-scan-plugin-0.17.0-3.el7.x86_64
docker-ce-20.10.17-3.el7.x86_64
docker-ce-cli-20.10.17-3.el7.x86_64

$ rpm -e --nodeps xxx
rpm -e --nodeps docker-ce-rootless-extras-20.10.17-3.el7.x86_64
rpm -e --nodeps docker-scan-plugin-0.17.0-3.el7.x86_64
rpm -e --nodeps docker-ce-20.10.17-3.el7.x86_64
rpm -e --nodeps docker-ce-cli-20.10.17-3.el7.x86_64

学新通
3、上传离线软件到servera,serverb,serverc
  • 使用百度网盘下载集群安装的离线软件包


#下载地址
链接: https://pan.百度.com/s/1TD6G2JKUoZsJ18NBLeQMRA  密码: c3cr
  • 使用文件上传工具将文件上传至servera、serverb、serverc的/tmp目录下,每台机器都要上传,都上传至/tmp下
  • 解压缩文件,不需要解压缩images-k8s.tar文件
# win中复制命令
scp -r D:\01-R-training\07-kuberbets-20210316\k8s-offline-software\* root@172.16.220.10:/tmp

scp -r D:\01-R-training\07-kuberbets-20210316\k8s-offline-software\* root@172.16.220.20:/tmp

scp -r D:\01-R-training\07-kuberbets-20210316\k8s-offline-software\* root@172.16.220.30:/tmp

$  cd /tmp
$  tar xvf rpm-k8s.tar.bz2
$  tar xvf rpm-docker.tar.bz2
4、安装docker-ce软件,三台机器都需要安装
  • 安装docker-ce (servera,serverb,serverc), 必须使用**yum localinstall **命令安装,如果直接下载互联网软件docker版本将是20.10.
$  yum localinstall -y /tmp/rpm-docker/*.rpm
  • 启动docker服务 (servera,serverb,serverc)
$  systemctl enable --now docker 

# 等价于 先start 再 enable
5、安装K8s软件
  • 安装k8s软件 (servera,serverb,serverc)
$ yum localinstall -y /tmp/rpm-k8s/*.rpm
  • 启动kubelet服务 (servera,serverb,serverc)
$ systemctl enable --now docker kubelet

# 等价于 先start 再 enable
6、加载k8s所需要的镜像
  • 加载docker 镜像 (servera,serverb,serverc)
$ docker load -i /tmp/image-k8s.tar

#使用docker images命令校验
$  docker images 
7、设置docker daemon参数
  • 创建docker daemon文件 (servera,serverb,serverc)
$ cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF
  • 重新加载systemd文件及启动服务 (servera,serverb,serverc)
$ systemctl daemon-reload
$ systemctl restart docker
8、创建kubeadm-config.yaml文件安装集群
  • 创建kubeadm-config.yaml文件(servera)
$ cat > /tmp/kubeadm-config.yaml <<EOF
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
kind: ClusterConfiguration
kubernetesVersion: v1.16.12
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

EOF
学新通
  • kubeadm初始化集群 (servera)
$ kubeadm init --upload-certs --config /tmp/kubeadm-config.yaml

systemctl status kubelet
# 删除老service
1)执行重置命令:kubeadm reset -f

2)删除所有相关数据
rm -rf /etc/cni /etc/kubernetes /var/lib/dockershim /var/lib/etcd /var/lib/kubelet /var/run/kubernetes ~/.kube/*

3)刷新所有防火墙(iptables)规则
iptables -F && iptables -X
iptables -t nat -F && iptables -t nat -X
iptables -t raw -F && iptables -t raw -X
iptables -t mangle -F && iptables -t mangle -X

4)重启Docker服务
systemctl restart docker
kubeadm reset
systemctl disable kubelet
rm -rf /etc/systemd/system/kubelet.service  

## 生成的命令行内容复制下来
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.192.129:6443 --token 8ow2fk.7qfwswis5od7c389 \
    --discovery-token-ca-cert-hash sha256:7d5b404aa17ca7aeef938ee4ccc42bde7386241d9d0c2e5a6e74c3fe77848098 
[root@servera tmp]# 


学新通
  • 配置api链接配置文件(kubectl)
$ mkdir -p $HOME/.kube
$ cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$  chown $(id -u):$(id -g) $HOME/.kube/config
10、部署k8s网络
  • 创建flannel网络配置文件
$  vim  kube-flannel.yml 
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.13.1-rc1
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.13.1-rc1
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
学新通
  • 执行配置文件(servera)
$ kubectl apply -f  kube-flannel.yml 

指定目录:kubectl apply -f /tmp/kube-flannel.yml 

  • 查看是否有flannel pod其是否启动(servera)
#查看时有部分的pod处于在pening状态
$ kubectl  get pods -n kube-system

# kube-system是系统默认的namespace
[root@servera ~]# kubectl get pods -n kube-system
NAME                                                   READY   STATUS     RESTARTS   AGE
coredns-5644d7b6d9-k9d28                               0/1     Pending    0          3m30s
coredns-5644d7b6d9-z7564                               0/1     Pending    0          3m30s
etcd-servera.training.example.com                      1/1     Running    0          2m37s
kube-apiserver-servera.training.example.com            1/1     Running    0          2m40s
kube-controller-manager-servera.training.example.com   1/1     Running    0          2m27s
kube-flannel-ds-nb6ns                                  0/1     Init:0/1   0          53s
kube-proxy-cz7ff                                       1/1     Running    0          3m29s
kube-scheduler-servera.training.example.com            1/1     Running    0          2m42s
[root@servera ~]# 

学新通
11、其他节点接入集群
  • 加入集群(serverc、serverd)
# 在servera 执行kubeadm 初始化命令,输出的
$  kubeadm join 172.16.220.10:6443 --token tss6it.kn7j83kseixt0v8u     --discovery-token-ca-cert-hash sha256:707e3776b1d4377a331523a8b51b062d3cb86305b05083be8f3be72017d58059
  • 集群扩容,添加节点
$ kubeadm  token  create --print-join-command
#我的输出如下
[root@servera ~]# kubeadm  token  create --print-join-command
kubeadm join 172.16.220.10:6443 --token 9q7jq5.em3ozm547y3znjin     --discovery-token-ca-cert-hash sha256:f268772578ce0e1ab65639fe6d03f65a6a0667bfbc0701c6d4245af13c510b73 
 

12、查看集群阶段是都正常
  • 获取集群阶段信息(servera)
$ kubectl  get nodes 
[root@servera ~]# kubectl  get nodes 
NAME                           STATUS     ROLES    AGE   VERSION
servera.training.example.com   Ready     master   20m   v1.16.12
serverb.training.example.com   NotReady  <none>   19s   v1.16.12
serverc.training.example.com   NotReady  <none>   18s   v1.16.12

#几分钟之后
[root@servera ~]# kubectl  get nodes 
NAME                           STATUS   ROLES    AGE   VERSION
servera.training.example.com   Ready    master   20m   v1.16.12
serverb.training.example.com   Ready    <none>   35s   v1.16.12
serverc.training.example.com   Ready    <none>   34s   v1.16.12
13、集群安装后测试
#调整环境变量,将一下两行添加到文件最下方,三个机器都要执行
$ vim /root/.bash_profile
. <(kubeadm completion bash)
. <(kubectl completion bash)

$ source /root/.bash_profile

#启动一个app(任意) 
#-l:label=“”
$ kubectl  create  namespace  training
$ kubectl  run  first-app  -l app=first-app  --image=nginx:1.16 -n training
$ kubectl get pods -n training
$ kubectl describe pods first-app-xxxxxx-yyyyy -n trianing

13.1、slave节点不能执行集群kubectl 命令
  • 13.1.1将master中的/etc/kubernetes/admin.conf 配置文件复制到slave节点
# 1、先复制到从节点
[root@servera ~]# scp -r /etc/kubernetes/admin.conf root@serverc:/etc/kubernetes/admin.conf
admin.conf                                                                                    100% 5453     1.8MB/s   00:00    
[root@servera ~]# 


  • 13.1.2slave节点中配置环境变量
#2、配置环境变量
[root@serverc kubernetes]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
[root@serverc kubernetes]# source ~/.bash_profile 

#从节点执行 kubectl get nodes,可以查看该内容代表成功
[root@serverc kubernetes]# kubectl get nodes
NAME                           STATUS   ROLES    AGE   VERSION
servera.training.example.com   Ready    master   46m   v1.16.12
serverc.training.example.com   Ready    <none>   39m   v1.16.12
[root@serverc kubernetes]# 
13.2 /etc/kubernetes/admin.conf 内容

[root@servera ~]# cat /etc/kubernetes/admin.conf 
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1ETXhOakV4TURnek5Wb1hEVE14TURNeE5ERXhNRGd6TlZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHJtClVtSmVvRk9POXN1SzRJeGVseUIrZGZXVVEzLzRqcFZuRnNnaDdZcC90SjBoem9pTmtlYTB2WjV1QjhTQzNLNFcKK0dQdHEzTnRsTWJXdjVIdERVMHp4NTQvb3JiVUFBc3A1Ui9ZTnFGSXMzdjRYZlFRWmdBVldpdkR6R0hRWnVBdQpQRnhTUTJCM3B2aGJPb0l6V3o3NFJYM1hBRkNXY2NUNXptd1djeEtRdEpiZDNBZEc5TVJNS3R3SUE3VHlsQTdBCkFKeU44Q1pEMEhRWWtNQjJuRkVZVEtYSmxGL1A5L3N4Rk9HN2dYUXBJK0pWK3oxejZtR2RNcDJ3bU1pdXEvdkEKazYzNHovWVRzMjhSZmxjT1o5bll6RGU0QXNONEZJR1RnRmY5V3BZbkcyZDNENDgrZVVaZzAxeDNOVVlKc21seAo3V2NTQnM1b1lnNVVTeURNVlA4Q0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKalNEMU9kOVpVSDZoamc3SmxPY2N5d014ekUKL0xTcHZnN3hlZUF6KytpSC9TME1udjkxRDZnL0JIRUVJK29XS2JBZklGemV0YkZ5QVNFV0I2RmZWb0pMbjI3Vgo1QU9DejZBUlRqZHJjNjJIQTl4eG1RNFBBZmlxU2V6UWZnTnFPamVnNzlXTTF1aHpjdFVsZ0N6MmUxRGowenIxCndRUWM5NHo2Kzh0NndjdTN1MkhDN2VrNWx1emRqNnY3KzlDTmFGcHVaOFBsVGEwVlNldU1lNkMwWjBxNUV0UU4KTUVNT1Y0b3FKK0w4dSsyM0wzK2wwZnVvL0FIT3FPTkNFa1BZejVpbDBnOENvUWNhN2tHM1NBQmQweUtGYkt3RgpmTUxscVFrdWxaaGt4YlE4MXk5MlI0bTZVRHhJYjhlQkJDaE5VZ0RMVURrdUt1bFphTkVsanE3bTV3QT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    server: https://172.16.220.10:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJSHNyWENuZnYwVE13RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TVRBek1UWXhNVEE0TXpWYUZ3MHlNakF6TVRZeE1UQTRNemRhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQW4wNy9EUGd1MURacTZwRTMKRWQ5dVdLc0tHV0VQVVJnbW5wMkF6SFVHc2FiMGlNWHB4UUNuZG1JK2J0czh3Qk83OUNCSVU5YThxN29GMXlPdgpSV29vam0wR1BpVVVhMi9XMmFtOWE0VlNZaXIxdUdHQkp5cmg1Vmo3Nkt5UExncDdReCs2L0lFTzFRL1o2bVJjCnNwYy9xRFk5dFdIRm1VK1RnRHJzclJacnloQWFlaWgxN2pNVHd0a1JheGZBc1ZUZXl2U3d0bjJKQ1FXQlhyOW0KV3Jnd2tBTjN5ZE5haTJ1c29hR3Rub2c5NzRWbXk4NUJZdUQ5MHBNaVZzZVhjRVFkYmZyRTBkK3hjcFZSYVdBaQpVWTVZMkZnMHZvMDNiOHJ5YkJ6TVNRWnRDZC91WGJIRUEzZHdvVU1vR1ZUaFduR3AzbFpKRkovdTlpVUdzQ25tCm5McEgyUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFDdGozd1NoOGtPMHhtU2JxWk4yNHhLVHJseWltQUVhWGVTYQo0a1ZtRi8vdHNpYkpTakFIZzdEVy9JbG0zeU84bmdIdWd2MzNRSzhOdEZzeWd6Q3F6SHowZGliVm9LRXp0VzF4CjNFY2dySmhVZmFHcXRnQ2dSeEdCRFdsVGdOd1lheWs2Vm9ESVVacXNwSjRDd2tWelJyS1hmV2kza1o3RFgrQ2EKZWxOOXA4WWRXaTQ2LzkybUZHV2t4K1A0NDI3RGlPVXFFY0xQd1pYMzFxRjIwUUgvUStzdHZPeXlSUEUvS0Y3Ugp6Q0ZQWDZ4TGN6cVdyUTFqWEhteWMxbzJWelhSbHYvSUhJWmR4aG03b295cnVGOGljbUNFTzJGbmN0eTlQOGlUCitWenVvTmVTZXE4RFljUHZ5anMrTDZjL3VUQ0xYYUhMNlpQNThEN1R6bVBHeUpZS2Y1WT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcGdJQkFBS0NBUUVBbjA3L0RQZ3UxRFpxNnBFM0VkOXVXS3NLR1dFUFVSZ21ucDJBekhVR3NhYjBpTVhwCnhRQ25kbUkrYnRzOHdCTzc5Q0JJVTlhOHE3b0YxeU92Uldvb2ptMEdQaVVVYTIvVzJhbTlhNFZTWWlyMXVHR0IKSnlyaDVWajc2S3lQTGdwN1F4KzYvSUVPMVEvWjZtUmNzcGMvcURZOXRXSEZtVStUZ0Ryc3JSWnJ5aEFhZWloMQo3ak1Ud3RrUmF4ZkFzVlRleXZTd3RuMkpDUVdCWHI5bVdyZ3drQU4zeWROYWkydXNvYUd0bm9nOTc0Vm15ODVCCll1RDkwcE1pVnNlWGNFUWRiZnJFMGQreGNwVlJhV0FpVVk1WTJGZzB2bzAzYjhyeWJCek1TUVp0Q2QvdVhiSEUKQTNkd29VTW9HVlRoV25HcDNsWkpGSi91OWlVR3NDbm1uTHBIMlFJREFRQUJBb0lCQVFDSjlXMG1IQjdobzZaLwpML1pTd2Z6YlNITmNleE02bFFHTXhyVHRXa0xMd2R0dVBvWXRGbDN2UlREdjYyZjJwcUJUejBrVFlaak4xakNNCm1OaUY4bUxneEVWdHQ4cXhKNDhWdThyUHZBc1YvRUlDeEYyR1A3cVVWTFVDVDcxR2o4a2kzZTJlRVYxNDRWdEQKZ2xwTVJuTFlCdkQxakpBRUxrZDcrb3RQY1dVeSs5Vkx2RkdEU2s1VGt1VWtaVHFvdC9UM2o4M0wweTdKWlZRSwpia2hLWERTeEh0a1BCNTdkNFhkY0hlZ3N5VlZ1cEcybG9TYy80cmlSRG1PcW1FYVFSZi9jVUltNkdFUCtBSVpQCkl3QWFKK3grQXNQZ0VkY3hKbHNRTWJWdktCREswblhyUU5zdFBTbzR6SnJ1ZnVSQVo0NXZNTWs3ZmVEelFzcmUKWkNyYVFYa2hBb0dCQU1sUHZXelR4TjV0L0NoaUdOL0k1Z0R2SnMrR2VZYmdocGR1QVBGSUcvcFBUMk9qcDlvcgpaNkJ0WFZxemRnM2Z5STlyZjRtSnFrcFBSVDlRZmF3NUY0VEQrcjJ6ZTlhdFhYcXVJN1JhUWdTdmJ1ekI0Y2V6CnpnSk9JQVVwZ3ptcjZEeW9YeXlZeEZNNmRVcWV4VitlR2ZqbTJ1ekhYL3JXYzQ0ZHQybFhacXJsQW9HQkFNcVcKSno1Q1VKeGlKVnVjS081M29zYnlTb05XOXpIKytwS1VEb2NtRHQ1NlI2K1dsUmpwTml5SG1jemR4RWNzcEpBKwpCY2tIa05LbjMyWUpyQW9NVkQ2bi90MGwraUhydndPUGFjcmozLzg1UlRmOUFUY1ZwUWhXUzZLdWlqWGptSVBoCmc5RzdDdW5ZTUxhM3g5Q2ZOcDExaUVKZEw0Ty9pMStCV3lVNFdqWGxBb0dCQUxvY0NoZU9RRGNTbjBnR1NOQzYKdmtpbXo4NlRsODVseklnNm5veHlDY3VEZnZXeTJ2OExtYVhxOVo5Z3dnZFpsK1Zma0d6cTVKQUdjUkp6cTN1SQozSUZWWFgzN2JNV3ZTUGlkL1V4REZUMlhvb2tuUlVDR3JqSGJZQ2JsbGZlL1FkaXlJQ3J1b0xJQkhXWjdHaVduCjJJTVBpZDNWQkpxMW4rY0crZ1ZkVkFFeEFvR0JBTCtSejloQzgyRitPZDl3UXhDRmx5clVNWHdUZjd3S1pyNjMKUFNtMTVuZWhZQ3A5bGM0eHNpWFhzS3VXeGlXYjVKMzEySmlxQzl2NEhQdXJoOEVVdVpGcVlVcVV2L3pWcGhKaQp5bUt1cThINFVTbEptWjJTWFRZT0RJWHcwUWQyaW1Ham5uaDBqRFJuV0dGZ1ZwWkVDR3hMbVNkUVlUOFBvV2NWCnRxL3lvdk8xQW9HQkFKYkxINHRwbllERlpUU2JOVEcvMUtqUlh1NWg5TXNaY1VXSWdtQ0U0L1BnV1hTUk4wdGUKWkNXYmRuMk9QOVgzTzVVWWk0Tm53ZWhpakNTUDFqTTRSc2RzeHRoVUNITTNTYTQ0QWlhNVorMU1QSXNmZWE5ZQo1Ymc3OHRJRTNWSmNPMmRSQi9KYUR0VVBHNzllRmh2RUYyUG15WDZUekpxR21YTW5xeXVNZmxFbwotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
[root@servera ~]# 
学新通
14、kubectl命令记录
14.1、kubectl create namespace
[root@servera ~]# kubectl create namespace training 
namespace/training created
[root@servera ~]# kubectl get ns
NAME              STATUS   AGE
default           Active   8m15s
kube-node-lease   Active   8m16s
kube-public       Active   8m16s
kube-system       Active   8m17s
training          Active   9s
[root@servera ~]# 

14.2、kubectl run(任意节点)
[root@servera ~]# kubectl run first-app -l app=first-app --image=nginx:1.16 -n training
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/first-app created

# 在从节点执行
kubectl  run  app02  -l app=app02  --image=nginx:1.16 -n training

kubectl  run  app03  -l app=app03  --image=nginx:1.16 -n training

14.3、kubectl get pods
  1. 获取所有namespace的pods:kubectl get pods --all-namespaces
  2. 获取某个namespace的pods: kubectl get pods -n training
[root@servera ~]# kubectl get pods -n training
NAME                        READY   STATUS              RESTARTS   AGE
first-app-b5fd44d75-nxl4q   0/1     ContainerCreating   0          61s
[root@servera ~]# kubectl get pods -n training
NAME                        READY   STATUS    RESTARTS   AGE
first-app-b5fd44d75-nxl4q   1/1     Running   0          114s


14.4、kubectl describe
  • 必须要加命名空间才能找到,否则默认找的是默认的命名空间
  • kubectl describe pods pods的名字 -n training
[root@servera ~]# kubectl describe first-app-b5fd44d75-nxl4q
error: the server doesn't have a resource type "first-app-b5fd44d75-nxl4q"
[root@servera ~]# kubectl describe pods first-app-b5fd44d75-nxl4q
Error from server (NotFound): pods "first-app-b5fd44d75-nxl4q" not found
[root@servera ~]# kubectl describe pods first-app-b5fd44d75-nxl4q -n training
Name:         first-app-b5fd44d75-nxl4q
Namespace:    training
Priority:     0
Node:         serverc.training.example.com/172.16.220.30
Start Time:   Tue, 16 Mar 2021 19:20:22  0800
Labels:       app=first-app
              pod-template-hash=b5fd44d75
Annotations:  <none>
Status:       Running
IP:           10.244.1.2
IPs:
  IP:           10.244.1.2
Controlled By:  ReplicaSet/first-app-b5fd44d75
Containers:
  first-app:
    Container ID:   docker://4915829e420365bab5ec728de823e29541d162ea598afc2ab526ba881edd12ee
    Image:          nginx:1.16
    Image ID:       docker-pullable://nginx@sha256:d20aa6d1cae56fd17cd458f4807e0de462caf2336f0b70b5eeb69fcaaf30dd9c
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Tue, 16 Mar 2021 19:22:12  0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-26jqn (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-26jqn:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-26jqn
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age    From                                   Message
  ----    ------     ----   ----                                   -------
  Normal  Scheduled  3m47s  default-scheduler                      Successfully assigned training/first-app-b5fd44d75-nxl4q to serverc.training.example.com
  Normal  Pulling    3m45s  kubelet, serverc.training.example.com  Pulling image "nginx:1.16"
  Normal  Pulled     2m     kubelet, serverc.training.example.com  Successfully pulled image "nginx:1.16"
  Normal  Created    117s   kubelet, serverc.training.example.com  Created container first-app
  Normal  Started    117s   kubelet, serverc.training.example.com  Started container first-app
[root@servera ~]# 
学新通
14.5、kubectl get nodes
[root@servera ~]# kubectl get nodes
NAME                           STATUS   ROLES    AGE   VERSION
servera.training.example.com   Ready    master   37m   v1.16.12
serverc.training.example.com   Ready    <none>   30m   v1.16.12
[root@servera ~]# 

14.6、systemctl stop kubelet
  • 关闭k8s : systemctl stop kubelet
  • 查看集群节点: kubectl get nodes

## 1、查看节点信息
[root@servera ~]# kubectl get nodes
NAME                           STATUS   ROLES    AGE   VERSION
servera.training.example.com   Ready    master   72m   v1.16.12
serverc.training.example.com   Ready    <none>   65m   v1.16.12


## 2、关闭kubelet
[root@servera ~]# systemctl stop kubelet

## 3、查看节点信息
[root@servera ~]# kubectl get nodes
NAME                           STATUS     ROLES    AGE   VERSION
servera.training.example.com   NotReady   master   73m   v1.16.12
serverc.training.example.com   Ready      <none>   66m   v1.16.12
[root@servera ~]# 




学新通
14.7、kubectl describe node
  • 查看节点信息:kubectl describe node
[root@servera ~]# kubectl describe node
Name:               servera.training.example.com
Roles:              master
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/os=linux
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=servera.training.example.com
                    kubernetes.io/os=linux
                    node-role.kubernetes.io/master=
Annotations:        flannel.alpha.coreos.com/backend-data: {"VNI":1,"VtepMAC":"4a:3b:6b:29:5a:12"}
                    flannel.alpha.coreos.com/backend-type: vxlan
                    flannel.alpha.coreos.com/kube-subnet-manager: true
                    flannel.alpha.coreos.com/public-ip: 172.16.220.10
                    kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Tue, 16 Mar 2021 19:09:00  0800
Taints:             node.kubernetes.io/unreachable:NoExecute
                    node-role.kubernetes.io/master:NoSchedule
                    node.kubernetes.io/unreachable:NoSchedule
Unschedulable:      false
Conditions:
  Type                 Status    LastHeartbeatTime                 LastTransitionTime                Reason              Message
  ----                 ------    -----------------                 ------------------                ------              -------
  NetworkUnavailable   False     Tue, 16 Mar 2021 19:12:57  0800   Tue, 16 Mar 2021 19:12:57  0800   FlannelIsUp         Flannel is running on this node
  MemoryPressure       Unknown   Tue, 16 Mar 2021 20:21:23  0800   Tue, 16 Mar 2021 20:22:04  0800   NodeStatusUnknown   Kubelet stopped posting node status.
  DiskPressure         Unknown   Tue, 16 Mar 2021 20:21:23  0800   Tue, 16 Mar 2021 20:22:04  0800   NodeStatusUnknown   Kubelet stopped posting node status.
  PIDPressure          Unknown   Tue, 16 Mar 2021 20:21:23  0800   Tue, 16 Mar 2021 20:22:04  0800   NodeStatusUnknown   Kubelet stopped posting node status.
  Ready                Unknown   Tue, 16 Mar 2021 20:21:23  0800   Tue, 16 Mar 2021 20:22:04  0800   NodeStatusUnknown   Kubelet stopped posting node status.
Addresses:
  InternalIP:  172.16.220.10
  Hostname:    servera.training.example.com
Capacity:
 cpu:                2
 ephemeral-storage:  38770180Ki
 hugepages-1Gi:      0
 hugepages-2Mi:      0
 memory:             2027968Ki
 pods:               110
Allocatable:
 cpu:                2
 ephemeral-storage:  35730597829
 hugepages-1Gi:      0
 hugepages-2Mi:      0
 memory:             1925568Ki
 pods:               110
System Info:
 Machine ID:                 4f6a0370e9cc4a319530254664f67a53
 System UUID:                D57E4D56-1F03-F2B3-DF03-6F7F9AD5DAF2
 Boot ID:                    9920ec06-dcac-408f-a08f-7e5014291134
 Kernel Version:             3.10.0-1062.el7.x86_64
 OS Image:                   CentOS Linux 7 (Core)
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  docker://18.9.9
 Kubelet Version:            v1.16.12
 Kube-Proxy Version:         v1.16.12
PodCIDR:                     10.244.0.0/24
PodCIDRs:                    10.244.0.0/24
Non-terminated Pods:         (8 in total)
  Namespace                  Name                                                    CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                  ----                                                    ------------  ----------  ---------------  -------------  ---
  kube-system                coredns-5644d7b6d9-k9d28                                100m (5%)     0 (0%)      70Mi (3%)        170Mi (9%)     82m
  kube-system                coredns-5644d7b6d9-z7564                                100m (5%)     0 (0%)      70Mi (3%)        170Mi (9%)     82m
  kube-system                etcd-servera.training.example.com                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         81m
  kube-system                kube-apiserver-servera.training.example.com             250m (12%)    0 (0%)      0 (0%)           0 (0%)         81m
  kube-system                kube-controller-manager-servera.training.example.com    200m (10%)    0 (0%)      0 (0%)           0 (0%)         81m
  kube-system                kube-flannel-ds-nb6ns                                   100m (5%)     100m (5%)   50Mi (2%)        50Mi (2%)      79m
  kube-system                kube-proxy-cz7ff                                        0 (0%)        0 (0%)      0 (0%)           0 (0%)         82m
  kube-system                kube-scheduler-servera.training.example.com             100m (5%)     0 (0%)      0 (0%)           0 (0%)         81m
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource           Requests     Limits
  --------           --------     ------
  cpu                850m (42%)   100m (5%)
  memory             190Mi (10%)  390Mi (20%)
  ephemeral-storage  0 (0%)       0 (0%)
Events:              <none>


Name:               serverc.training.example.com
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/os=linux
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=serverc.training.example.com
                    kubernetes.io/os=linux
Annotations:        flannel.alpha.coreos.com/backend-data: {"VNI":1,"VtepMAC":"aa:d1:fe:dc:8b:8f"}
                    flannel.alpha.coreos.com/backend-type: vxlan
                    flannel.alpha.coreos.com/kube-subnet-manager: true
                    flannel.alpha.coreos.com/public-ip: 172.16.220.30
                    kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Tue, 16 Mar 2021 19:15:46  0800
Taints:             <none>
Unschedulable:      false
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Tue, 16 Mar 2021 19:16:33  0800   Tue, 16 Mar 2021 19:16:33  0800   FlannelIsUp                  Flannel is running on this node
  MemoryPressure       False   Tue, 16 Mar 2021 20:31:00  0800   Tue, 16 Mar 2021 20:20:50  0800   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Tue, 16 Mar 2021 20:31:00  0800   Tue, 16 Mar 2021 20:20:50  0800   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Tue, 16 Mar 2021 20:31:00  0800   Tue, 16 Mar 2021 20:20:50  0800   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Tue, 16 Mar 2021 20:31:00  0800   Tue, 16 Mar 2021 20:20:50  0800   KubeletReady                 kubelet is posting ready status
Addresses:
  InternalIP:  172.16.220.30
  Hostname:    serverc.training.example.com
Capacity:
 cpu:                2
 ephemeral-storage:  37722116Ki
 hugepages-1Gi:      0
 hugepages-2Mi:      0
 memory:             2027968Ki
 pods:               110
Allocatable:
 cpu:                2
 ephemeral-storage:  34764702049
 hugepages-1Gi:      0
 hugepages-2Mi:      0
 memory:             1925568Ki
 pods:               110
System Info:
 Machine ID:                 b7122b397ee04469a8575c982f89104f
 System UUID:                962C4D56-EE94-5D7E-23E1-8B06A9E19810
 Boot ID:                    d7a8c90c-40fc-4b6a-a023-d0fe0fbd0197
 Kernel Version:             3.10.0-1062.el7.x86_64
 OS Image:                   CentOS Linux 7 (Core)
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  docker://18.9.9
 Kubelet Version:            v1.16.12
 Kube-Proxy Version:         v1.16.12
PodCIDR:                     10.244.1.0/24
PodCIDRs:                    10.244.1.0/24
Non-terminated Pods:         (7 in total)
  Namespace                  Name                         CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                  ----                         ------------  ----------  ---------------  -------------  ---
  kube-system                coredns-5644d7b6d9-44tp6     100m (5%)     0 (0%)      70Mi (3%)        170Mi (9%)     4m24s
  kube-system                coredns-5644d7b6d9-4h7r6     100m (5%)     0 (0%)      70Mi (3%)        170Mi (9%)     4m24s
  kube-system                kube-flannel-ds-dqg79        100m (5%)     100m (5%)   50Mi (2%)        50Mi (2%)      75m
  kube-system                kube-proxy-hvj9m             0 (0%)        0 (0%)      0 (0%)           0 (0%)         75m
  training                   app02-8dc96bddf-hgz6h        0 (0%)        0 (0%)      0 (0%)           0 (0%)         50m
  training                   app03-7bb69d77f-8nl56        0 (0%)        0 (0%)      0 (0%)           0 (0%)         21m
  training                   first-app-b5fd44d75-nxl4q    0 (0%)        0 (0%)      0 (0%)           0 (0%)         71m
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource           Requests     Limits
  --------           --------     ------
  cpu                300m (15%)   100m (5%)
  memory             190Mi (10%)  390Mi (20%)
  ephemeral-storage  0 (0%)       0 (0%)
Events:
  Type    Reason                   Age                From                                   Message
  ----    ------                   ----               ----                                   -------
  Normal  Starting                 10m                kubelet, serverc.training.example.com  Starting kubelet.
  Normal  NodeHasSufficientMemory  10m (x2 over 10m)  kubelet, serverc.training.example.com  Node serverc.training.example.com status is now: NodeHasSufficientMemory
  Normal  NodeHasNoDiskPressure    10m (x2 over 10m)  kubelet, serverc.training.example.com  Node serverc.training.example.com status is now: NodeHasNoDiskPressure
  Normal  NodeHasSufficientPID     10m (x2 over 10m)  kubelet, serverc.training.example.com  Node serverc.training.example.com status is now: NodeHasSufficientPID
  Normal  NodeNotReady             10m                kubelet, serverc.training.example.com  Node serverc.training.example.com status is now: NodeNotReady
  Normal  NodeAllocatableEnforced  10m                kubelet, serverc.training.example.com  Updated Node Allocatable limit across pods
  Normal  NodeReady                10m                kubelet, serverc.training.example.com  Node serverc.training.example.com status is now: NodeReady
[root@servera ~]# 

学新通

Kuberbnetes集群基本操作命令

1、命名空间(namespace)
  • 默认进入的default namespace
#查看集群阶段状态
$ kubectl  get nodes
NAME              STATUS     ROLES    AGE   VERSION
servera.training.example.com  NotReady  master 4m36s   v1.16.12


$  kubectl get namespace
$  kubectl get ns
NAME              STATUS   AGE
default           Active   13h
kube-node-lease   Active   13h
kube-public       Active   13h
kube-system       Active   13h


#创建命名空间
$  create ns hefei
namespace/hefei created

$  kubectl  get ns  
NAME              STATUS   AGE
default           Active   13h
hefei             Active   4s
kube-node-lease   Active   13h
kube-public       Active   13h
kube-system       Active   13h
training          Active   13h
学新通
2、资源查看
#pod
$ kubectl  get pods -n kube-system
$ kubectl  get pods -n training

#service
$ kubectl  get service -n kube-system 
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   13h

#deployment
$ kubectl  get deployment -n kube-system 
NAME      READY   UP-TO-DATE   AVAILABLE   AGE
coredns   2/2     2            2           13h

#rs  relplica set
$ kubectl  get rs -n kube-system 
NAME                 DESIRED   CURRENT   READY   AGE
coredns-5644d7b6d9   2         2         2       13h

#获取namespace下的所有对象
$ kubectl  get all -n hefei

#删除对象
$ kubectl  delete  pod/demo-nginx-76bf78b864-zqjsf 
#按照标签删除
$ kubectl  delete  all -l run=demo-nginx

$ kubectl  run  demo-nginx --image=nginx:1.16 --port=80  --replicas=5  --save-config=true -l app=demo-nginx -n hefei
# deployment, replica set , Pods
$ kubectl  expose deployment  demo-nginx --port=80  --target-port=80 -n hefei
# create service

#获取所有endpoints
$ kubectl  get endpoints  -n hefei

学新通
3、资源相信信息
$ kubectl  describe  pods coredns-5644d7b6d9-bzlvr -n kube-system 
Name:                 coredns-5644d7b6d9-bzlvr
Namespace:            kube-system
Priority:             2000000000
Priority Class Name:  system-cluster-critical
Node:                 serverc.training.example.com/172.16.220.30
Start Time:           Mon, 15 Mar 2021 21:28:25  0800
Labels:               k8s-app=kube-dns
                      pod-template-hash=5644d7b6d9
Annotations:          <none>
Status:               Running
IP:                   10.244.1.16
IPs:
  IP:           10.244.1.16
Controlled By:  ReplicaSet/coredns-5644d7b6d9
Containers:
... ....


$ kubectl  describe rs coredns-5644d7b6d9  -n kube-system 
Name:           coredns-5644d7b6d9
Namespace:      kube-system
Selector:       k8s-app=kube-dns,pod-template-hash=5644d7b6d9
Labels:         k8s-app=kube-dns
                pod-template-hash=5644d7b6d9
Annotations:    deployment.kubernetes.io/desired-replicas: 2
                deployment.kubernetes.io/max-replicas: 3
                deployment.kubernetes.io/revision: 1
Controlled By:  Deployment/coredns
Replicas:       2 current / 2 desired
Pods Status:    2 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:           k8s-app=kube-dns
                    pod-template-hash=5644d7b6d9
  Service Account:  coredns
  Containers:
   coredns:
    Image:       k8s.gcr.io/coredns:1.6.2
    Ports:       53/UDP, 53/TCP, 9153/TCP
    Host Ports:  0/UDP, 0/TCP, 0/TCP
    Args:
      -conf
      /etc/coredns/Corefile
    Limits:
      memory:  170Mi
    Requests:
      cpu:        100m
      memory:     70Mi
... ...

$ kubectl  describe  svc kube-dns  -n kube-system 
Name:              kube-dns
Namespace:         kube-system
Labels:            k8s-app=kube-dns
                   kubernetes.io/cluster-service=true
                   kubernetes.io/name=KubeDNS
Annotations:       prometheus.io/port: 9153
                   prometheus.io/scrape: true
Selector:          k8s-app=kube-dns
Type:              ClusterIP
IP:                10.96.0.10
Port:              dns  53/UDP
TargetPort:        53/UDP
Endpoints:         10.244.1.16:53,10.244.1.20:53
Port:              dns-tcp  53/TCP
TargetPort:        53/TCP
Endpoints:         10.244.1.16:53,10.244.1.20:53
Port:              metrics  9153/TCP
TargetPort:        9153/TCP
Endpoints:         10.244.1.16:9153,10.244.1.20:9153
Session Affinity:  None
Events:            <none>
学新通
4、资源修改(谨慎使用)
$ kubectl  edit  deployment coredns -n kube-system

这篇好文章是转载于:学新通技术网

  • 版权申明: 本站部分内容来自互联网,仅供学习及演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,请提供相关证据及您的身份证明,我们将在收到邮件后48小时内删除。
  • 本站站名: 学新通技术网
  • 本文地址: /boutique/detail/tanhfgkiii
系列文章
更多 icon
同类精品
更多 icon
继续加载