Toggle navigation
主页
工具
归档
标签
vmware 虚拟机 for M1
0
无
2020-09-02 12:10:21
0
0
myron
## service规则链 iptables -t nat -nvL OUTPUT iptables -t nat -nvL KUBE-SERVICES iptables -t nat -nvL KUBE-SVC-NKMXGPDRPTPBYCFR iptables -t nat -nvL KUBE-SEP-BEZQY5746EOCOLRD calico 网络安装参考 https://yq.aliyun.com/articles/668151 kubeadm reset systemctl stop kubelet systemctl stop docker rm -rf /var/lib/cni/ rm -rf /var/lib/kubelet/* rm -rf /etc/cni/ ifconfig cni0 down ifconfig flannel.1 down ifconfig docker0 down ip link delete cni0 ip link delete flannel.1 kubeadm reset systemctl stop kubelet systemctl stop docker rm -rf /var/lib/cni/ rm -rf /var/lib/kubelet/* rm -rf /etc/cni/ ifconfig cni0 down ifconfig flannel.1 down ifconfig docker0 down TLS_ETCD_CA=$(cat /etc/etcd/ssl/ca.pem | base64 | tr -d "\n") TLS_ETCD_KEY=$(cat /etc/etcd/ssl/etcd-key.pem | base64 | tr -d "\n") TLS_ETCD_CERT=$(cat /etc/etcd/ssl/etcd.pem | base64 | tr -d "\n") sed -i "s#TLS_ETCD_KEY#$TLS_ETCD_KEY#g" calico.yaml sed -i "s#TLS_ETCD_CERT#$TLS_ETCD_CERT#g" calico.yaml sed -i "s#TLS_ETCD_CA#$TLS_ETCD_CA#g" calico.yaml kubectl apply -f calico.yaml kubectl apply -f rbac.yaml # Calico Version v3.2.3 # https://docs.projectcalico.org/v3.2/releases#v3.2.3 # This manifest includes the following component versions: # calico/node:v3.2.3 # calico/cni:v3.2.3 # calico/kube-controllers:v3.2.3 # This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system data: # Configure this with the location of your etcd cluster. etcd_endpoints: "https://ETCD_LVS_HOST:2379" # If you're using TLS enabled etcd uncomment the following. # You must also populate the Secret below with these files. etcd_ca: "/calico-secrets/etcd-ca" # "/calico-secrets/etcd-ca" etcd_cert: "/calico-secrets/etcd-cert" # "/calico-secrets/etcd-cert" etcd_key: "/calico-secrets/etcd-key" # "/calico-secrets/etcd-key" # Configure the Calico backend to use. calico_backend: "bird" # Configure the MTU to use veth_mtu: "1440" # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.3.0", "plugins": [ { "type": "calico", "log_level": "info", "etcd_endpoints": "__ETCD_ENDPOINTS__", "etcd_key_file": "__ETCD_KEY_FILE__", "etcd_cert_file": "__ETCD_CERT_FILE__", "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", "mtu": __CNI_MTU__, "ipam": { "type": "calico-ipam" }, "policy": { "type": "k8s" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} } ] } --- # The following contains k8s Secrets for use with a TLS enabled etcd cluster. # For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ apiVersion: v1 kind: Secret type: Opaque metadata: name: calico-etcd-secrets namespace: kube-system data: # Populate the following files with etcd TLS configuration if desired, but leave blank if # not using TLS for etcd. # This self-hosted install expects three files with the following names. The values # should be base64 encoded strings of the entire contents of each file. etcd-key: TLS_ETCD_KEY etcd-cert: TLS_ETCD_CERT etcd-ca: TLS_ETCD_CA --- # This manifest installs the calico/node container, as well # as the Calico CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node spec: selector: matchLabels: k8s-app: calico-node updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 template: metadata: labels: k8s-app: calico-node annotations: # This, along with the CriticalAddonsOnly toleration below, # marks the pod as a critical add-on, ensuring it gets # priority scheduling and that its resources are reserved # if it ever gets evicted. scheduler.alpha.kubernetes.io/critical-pod: '' spec: nodeSelector: beta.kubernetes.io/os: linux hostNetwork: true tolerations: # Make sure calico-node gets scheduled on all nodes. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists serviceAccountName: calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 containers: # Runs calico/node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node image: hub.yun.paic.com.cn/calico/node:v3.2.3 env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # Location of the CA certificate for etcd. - name: ETCD_CA_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_ca # Location of the client key for etcd. - name: ETCD_KEY_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_key # Location of the client certificate for etcd. - name: ETCD_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_cert # Set noderef for node controller. - name: CALICO_K8S_NODE_REF valueFrom: fieldRef: fieldPath: spec.nodeName # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "k8s,bgp" # Auto-detect the BGP IP address. - name: IP value: "autodetect" # Enable IPIP - name: CALICO_IPV4POOL_IPIP value: "CrossSubnet" # Set MTU for tunnel device used if ipip is enabled - name: FELIX_IPINIPMTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within `--cluster-cidr`. - name: CALICO_IPV4POOL_CIDR value: "172.30.0.0/16" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Set Felix endpoint to host default action to ACCEPT. - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN value: "info" - name: FELIX_HEALTHENABLED value: "true" securityContext: privileged: true resources: requests: cpu: 250m livenessProbe: httpGet: path: /liveness port: 9099 host: localhost periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 readinessProbe: exec: command: - /bin/calico-node - -bird-ready - -felix-ready periodSeconds: 10 volumeMounts: - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /var/lib/calico name: var-lib-calico readOnly: false - mountPath: /calico-secrets name: etcd-certs # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni image: hub.yun.paic.com.cn/calico/cni:v3.2.3 command: ["/install-cni.sh"] env: # Name of the CNI config file to create. - name: CNI_CONF_NAME value: "10-calico.conflist" # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config # CNI MTU Config variable - name: CNI_MTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir - mountPath: /calico-secrets name: etcd-certs volumes: # Used by calico/node. - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico - name: var-lib-calico hostPath: path: /var/lib/calico # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d # Mount in the etcd TLS secrets with mode 400. # See https://kubernetes.io/docs/concepts/configuration/secret/ - name: etcd-certs secret: secretName: calico-etcd-secrets defaultMode: 0400 --- apiVersion: v1 kind: ServiceAccount imagePullSecrets: - name: default metadata: name: calico-node namespace: kube-system --- # This manifest deploys the Calico Kubernetes controllers. # See https://github.com/projectcalico/kube-controllers apiVersion: extensions/v1beta1 kind: Deployment metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: # The controllers can only have a single active instance. replicas: 1 strategy: type: Recreate template: metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers spec: nodeSelector: beta.kubernetes.io/os: linux # The controllers must run in the host network namespace so that # it isn't governed by policy that would prevent it from working. hostNetwork: true tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: calico-kube-controllers containers: - name: calico-kube-controllers image: hub.yun.paic.com.cn/calico/kube-controllers:v3.2.3 env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # Location of the CA certificate for etcd. - name: ETCD_CA_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_ca # Location of the client key for etcd. - name: ETCD_KEY_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_key # Location of the client certificate for etcd. - name: ETCD_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_cert # Choose which controllers to run. - name: ENABLED_CONTROLLERS value: policy,profile,workloadendpoint,node volumeMounts: # Mount in the etcd TLS secrets. - mountPath: /calico-secrets name: etcd-certs readinessProbe: exec: command: - /usr/bin/check-status - -r volumes: # Mount in the etcd TLS secrets with mode 400. # See https://kubernetes.io/docs/concepts/configuration/secret/ - name: etcd-certs secret: secretName: calico-etcd-secrets defaultMode: 0400 --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers namespace: kube-system rbac.yaml文件如下: # Calico Version v3.2.3 # https://docs.projectcalico.org/v3.2/releases#v3.2.3 --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-kube-controllers rules: - apiGroups: - "" - extensions resources: - pods - namespaces - networkpolicies - nodes - serviceaccounts verbs: - watch - list - apiGroups: - networking.k8s.io resources: - networkpolicies verbs: - watch - list --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-kube-controllers roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-kube-controllers subjects: - kind: ServiceAccount name: calico-kube-controllers namespace: kube-system --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-node rules: - apiGroups: [""] resources: - pods - nodes verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: calico-node roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-node subjects: - kind: ServiceAccount name: calico-node namespace: kube-system ———————————————————————————— master node参与工作负载 使用kubeadm初始化的集群,出于安全考虑Pod不会被调度到Master Node上,也就是说Master Node不参与工作负载。 这里搭建的是测试环境可以使用下面的命令使Master Node参与工作负载: kubectl taint nodes docker10 node-role.kubernetes.io/master-node "docker10" untainted kubectl taint nodes --all node-role.kubernetes.io/master- 1 输出如下: node "k8s" untainted 1 输出error: taint “node-role.kubernetes.io/master:” not found错误忽略。 禁止master部署pod kubectl taint nodes k8s node-role.kubernetes.io/master=true:NoSchedule ———————————————————————————— Your Kubernetes master has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ You can now join any number of machines by running the following on each node as root: master1: kubeadm join 10.0.54.31:6443 --token 023601.eun138w5gqh1gq5x --discovery-token-ca-cert-hash sha256:235506061156d14433c970b8deee1d5f966eb848a973d7604476d841f7dc655e master2: kubeadm join 10.0.54.31:6443 --token go98sd.a3z0zbdsnnz1joqu --discovery-token-ca-cert-hash sha256:235506061156d14433c970b8deee1d5f966eb848a973d7604476d841f7dc655e master3: kubeadm join 10.0.54.31:6443 --token qmaafa.bqh0af3soxo56dnx --discovery-token-ca-cert-hash sha256:235506061156d14433c970b8deee1d5f966eb848a973d7604476d841f7dc655e ———————————————————————————— [root@master1 /mnt/nfs/v1.13.0]#cat config.yaml apiVersion: kubeadm.k8s.io/v1alpha3 kind: ClusterConfiguration kubernetesVersion: 1.13.0 controlPlaneEndpoint: "10.0.54.31:6443" apiServerExtraArgs: authorization-mode: "Node,RBAC" controllerManagerExtraArgs: node-cidr-mask-size: "23" networking: serviceSubnet: 10.6.64.0/20 podSubnet: 10.6.0.0/18 dnsDomain: bigtree.idc apiServerCertSANs: - master1 - master2 - master3 - 10.0.54.24 - 10.0.54.25 - 10.0.54.26 - 10.0.54.31 - kubernetes.bigtree.idc etcd: external: endpoints: - https://10.0.54.24:2379 - https://10.0.54.25:2379 - https://10.0.54.26:2379 caFile: /etc/etcd/ssl/ca.pem certFile: /etc/etcd/ssl/etcd.pem keyFile: /etc/etcd/ssl/etcd-key.pem dataDir: /var/lib/etcd token: b99a00.a144ef80536d4344 tokenTTL: "0" imageRepository: "k8s.gcr.io" ———————————————————————————— [init] Using Kubernetes version: v1.13.0 [preflight] Running pre-flight checks [WARNING HTTPProxyCIDR]: connection to "10.6.64.0/20" uses proxy "http://proxy.xcodes.com.cn:443". This may lead to malfunctional cluster setup. Make sure that Pod and Services IP ranges specified correctly as exceptions in proxy configuration [WARNING HTTPProxyCIDR]: connection to "10.6.0.0/18" uses proxy "http://proxy.xcodes.com.cn:443". This may lead to malfunctional cluster setup. Make sure that Pod and Services IP ranges specified correctly as exceptions in proxy configuration [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Activating the kubelet service [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Using existing ca certificate authority [certs] Using existing apiserver-kubelet-client certificate and key on disk [certs] Using existing apiserver certificate and key on disk [certs] Using existing front-proxy-ca certificate authority [certs] Using existing front-proxy-client certificate and key on disk [certs] External etcd mode: Skipping etcd/ca certificate authority generation [certs] External etcd mode: Skipping etcd/server certificate authority generation [certs] External etcd mode: Skipping etcd/healthcheck-client certificate authority generation [certs] External etcd mode: Skipping apiserver-etcd-client certificate authority generation [certs] External etcd mode: Skipping etcd/peer certificate authority generation [certs] Using the existing "sa" key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Using existing up-to-date kubeconfig file: "/etc/kubernetes/admin.conf" [kubeconfig] Using existing up-to-date kubeconfig file: "/etc/kubernetes/kubelet.conf" [kubeconfig] Using existing up-to-date kubeconfig file: "/etc/kubernetes/controller-manager.conf" [kubeconfig] Using existing up-to-date kubeconfig file: "/etc/kubernetes/scheduler.conf" [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" [control-plane] Creating static Pod manifest for "kube-scheduler" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 16.003165 seconds [uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "master1" as an annotation [mark-control-plane] Marking the node master1 as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node master1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: cymgqa.n7edy4ul6shor7ge [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy Your Kubernetes master has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ You can now join any number of machines by running the following on each node as root: master1: kubeadm join 10.0.54.31:6443 --token o47c11.2a1eozucj1jvpx4q --discovery-token-ca-cert-hash sha256:f80bfe1bbb0f9663112010effccb164b0142e6f4dd5a9013e167646a657557e1 master2: kubeadm join 10.0.54.31:6443 --token he3jpl.zdr9e4wi43lqup4l --discovery-token-ca-cert-hash sha256:f80bfe1bbb0f9663112010effccb164b0142e6f4dd5a9013e167646a657557e1 master3: kubeadm join 10.0.54.31:6443 --token f5u2fi.snf8chwrbav4u4im --discovery-token-ca-cert-hash sha256:f80bfe1bbb0f9663112010effccb164b0142e6f4dd5a9013e167646a657557e1 ————————————————————————————— 1、更改docker cgroup-driver 的启动参数 vim /usr/lib/systemd/system/docker.service #ExecStart=/usr/bin/dockerd ExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd 2、重启docker systemctl daemon-reload systemctl restart docker ————————————————————————————— /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 官方安装文档 https://kubernetes.io/docs/setup/independent/install-kubeadm/ [root@master1 /mnt/nfs/v1.13.0]#docker tag k8s.gcr.io/kube-apiserver:v1.13.0 k8s.gcr.io/kube-apiserver-amd64:v1.13.0 [root@master1 /mnt/nfs/v1.13.0]#docker tag k8s.gcr.io/kube-controller-manager:v1.13.0 k8s.gcr.io/kube-controller-manager-amd64:v1.13.0 [root@master1 /mnt/nfs/v1.13.0]#docker tag k8s.gcr.io/kube-scheduler:v1.13.0 k8s.gcr.io/kube-scheduler-amd64:v1.13.0 [root@master1 /mnt/nfs/v1.13.0]#docker tag k8s.gcr.io/kube-proxy:v1.13.0 k8s.gcr.io/kube-proxy-amd64:v1.13.0 https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3 apiVersion: kubeadm.k8s.io/v1alpha3 kind: InitConfiguration bootstrapTokens: - token: "9a08jv.c0izixklcxtmnze7" description: "kubeadm bootstrap token" ttl: "24h" - token: "783bde.3f89s0fje9f38fhf" description: "another bootstrap token" usages: - signing groups: - system:anonymous nodeRegistration: name: "ec2-10-100-0-1" criSocket: "/var/run/dockershim.sock" taints: - key: "kubeadmNode" value: "master" effect: "NoSchedule" kubeletExtraArgs: cgroupDriver: "cgroupfs" apiEndpoint: advertiseAddress: "10.100.0.1" bindPort: 6443 --- apiVersion: kubeadm.k8s.io/v1alpha3 kind: ClusterConfiguration etcd: # one of local or external local: image: "k8s.gcr.io/etcd-amd64:3.2.18" dataDir: "/var/lib/etcd" extraArgs: listen-client-urls: "http://10.100.0.1:2379" serverCertSANs: - "ec2-10-100-0-1.compute-1.amazonaws.com" peerCertSANs: - "10.100.0.1" external: endpoints: - "10.100.0.1:2379" - "10.100.0.2:2379" caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" certKey: "/etcd/kubernetes/pki/etcd/etcd.key" networking: serviceSubnet: "10.96.0.0/12" podSubnet: "10.100.0.1/24" dnsDomain: "cluster.local" kubernetesVersion: "v1.12.0" controlPlaneEndpoint: "10.100.0.1:6443" apiServerExtraArgs: authorization-mode: "Node,RBAC" controllerManagerExtraArgs: node-cidr-mask-size: 20 schedulerExtraArgs: address: "10.100.0.1" apiServerExtraVolumes: - name: "some-volume" hostPath: "/etc/some-path" mountPath: "/etc/some-pod-path" writable: true pathType: File controllerManagerExtraVolumes: - name: "some-volume" hostPath: "/etc/some-path" mountPath: "/etc/some-pod-path" writable: true pathType: File schedulerExtraVolumes: - name: "some-volume" hostPath: "/etc/some-path" mountPath: "/etc/some-pod-path" writable: true pathType: File apiServerCertSANs: - "10.100.1.1" - "ec2-10-100-0-1.compute-1.amazonaws.com" certificatesDir: "/etc/kubernetes/pki" imageRepository: "k8s.gcr.io" unifiedControlPlaneImage: "k8s.gcr.io/controlplane:v1.12.0" auditPolicy: # https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy path: "/var/log/audit/audit.json" logDir: "/var/log/audit" logMaxAge: 7 # in days featureGates: selfhosting: false clusterName: "example-cluster" —————————————————————————————— keepalived VIP 配置 global_defs { router_id LVS_k8s } vrrp_script CheckK8sMaster { script "curl -k https://10.0.54.31:6443" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state MASTER interface eth0 virtual_router_id 61 # 主节点权重最高 依次减少 priority 120 advert_int 1 #修改为本地IP mcast_src_ip 10.0.54.24 nopreempt authentication { auth_type PASS auth_pass sqP05dQgMSlzrxHj } unicast_peer { #注释掉本地IP #10.0.54.24 10.0.54.25 10.0.54.26 } virtual_ipaddress { 10.0.54.31/24 } track_script { CheckK8sMaster } } —————————————————————————————— k8s.gcr.io/kube-apiserver:v1.13.0 k8s.gcr.io/kube-controller-manager:v1.13.0 k8s.gcr.io/kube-scheduler:v1.13.0 k8s.gcr.io/kube-proxy:v1.13.0 k8s.gcr.io/pause:3.1 k8s.gcr.io/etcd:3.2.24 k8s.gcr.io/coredns:1.2.6 ——————————————————————————————— [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' error execution phase preflight: [preflight] Some fatal errors occurred: [ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-apiserver:v1.13.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) , error: exit status 1 [ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-controller-manager:v1.13.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) , error: exit status 1 [ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-scheduler:v1.13.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) , error: exit status 1 [ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-proxy:v1.13.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) , error: exit status 1 [ERROR ImagePull]: failed to pull image k8s.gcr.io/pause:3.1: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) , error: exit status 1 [ERROR ImagePull]: failed to pull image k8s.gcr.io/etcd:3.2.24: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) , error: exit status 1 [ERROR ImagePull]: failed to pull image k8s.gcr.io/coredns:1.2.6: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) , error: exit status 1 [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
Centos7 更改时区
LAMP
0
赞
1 人读过
新浪微博
微信
更多分享
腾讯微博
QQ空间
人人网
文档导航