Last login: Thu Mar 1 00:45:37 2018 from 192.168.1.248
[root@clusterserver1 ~]# uname -a
Linux clusterserver1.rmohan.com 3.10.0-693.17.1.el7.x86_64 #1 SMP Thu Jan 25 20:13:58 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
[root@clusterserver1 ~]#
Master : 192.168.1.20
Worker1 : 192.168.1.21
Worker2 : 192.168.1.23
worker3 : 192.168.1.24
hostnamectl set-hostname ‘clusterserver1.rmohan.com’
exec bash
setenforce 0
sed -i –follow-symlinks ‘s/SELINUX=enforcing/SELINUX=disabled/g’ /etc/sysconfig/selinux
[root@clusterserver1 ~]# cat /etc/redhat-release
CentOS Linux release 7.4.1708 (Core)
[root@clusterserver1 ~]#
CentOS Linux release 7.4.1708 (Core)
[root@clusterserver1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.20 clusterserver1.rmohan.com clusterserver1 master
192.168.1.21 clusterserver2.rmohan.com clusterserver2 worker1
192.168.1.22 clusterserver3.rmohan.com clusterserver3 worker2
192.168.1.23 clusterserver4.rmohan.com clusterserver4 worker3
[root@clusterserver1 ~]#
[root@clusterserver1 ~]#systemctl stop firewalld && systemctl disable firewalld
[root@clusterserver1 ~]#yum update -y
[root@clusterserver1 ~]# modprobe br_netfilter
[root@clusterserver1 ~]#sysctl net.bridge.bridge-nf-call-iptables=1
[root@clusterserver1 ~]#sysctl net.bridge.bridge-nf-call-ip6tables=1
[root@clusterserver1 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@clusterserver1 ~]# sysctl –system
cat <<EOF > /etc/yum.repos.d/virt7-dockerrelease.repo
[virt7-docker-common-release]
name=virt7-docker-common-release
baseurl=http://cbs.centos.org/repos/virt7-docker-common-release/x86_64/os/
gpgcheck=0
EOF
cat <<eof > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
eof
[root@clusterserver1 ~]# yum install ebtables ethtool
[root@clusterserver1 ~]# yum install -y kubelet kubeadm kubectl docker
#systemctl enable kubelet && systemctl start kubelet
After this Check Kubelet is running fine
#systemctl status kubelet -l
[root@clusterserver1 ~]# systemctl status kubelet -l
? kubelet.service – kubelet: The Kubernetes Node Agent
Loaded: loaded (/etc/systemd/system/kubelet.service; enabled; vendor preset: disabled)
Drop-In: /etc/systemd/system/kubelet.service.d
??10-kubeadm.conf
Active: activating (auto-restart) (Result: exit-code) since Thu 2018-03-01 16:16:40 +08; 8s ago
Docs: http://kubernetes.io/docs/
Process: 2504 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_CADVISOR_ARGS $KUBELET_CGROUP_ARGS $KUBELET_CERTIFICATE_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=1/FAILURE)
Main PID: 2504 (code=exited, status=1/FAILURE)
Mar 01 16:16:40 clusterserver1.rmohan.com systemd[1]: kubelet.service: main process exited, code=exited, status=1/FAILURE
Mar 01 16:16:40 clusterserver1.rmohan.com systemd[1]: Unit kubelet.service entered failed state.
Mar 01 16:16:40 clusterserver1.rmohan.com systemd[1]: kubelet.service failed.
[root@clusterserver1 ~]# ystemctl status kubelet -l
-bash: ystemctl: command not found
[root@clusterserver1 ~]# systemctl status kubelet -l
? kubelet.service – kubelet: The Kubernetes Node Agent
Loaded: loaded (/etc/systemd/system/kubelet.service; enabled; vendor preset: disabled)
Drop-In: /etc/systemd/system/kubelet.service.d
??10-kubeadm.conf
Active: activating (auto-restart) (Result: exit-code) since Thu 2018-03-01 16:17:01 +08; 1s ago
Docs: http://kubernetes.io/docs/
Process: 2517 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_CADVISOR_ARGS $KUBELET_CGROUP_ARGS $KUBELET_CERTIFICATE_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=1/FAILURE)
Main PID: 2517 (code=exited, status=1/FAILURE)
Can verify errors in /var/log/message
[root@clusterserver1 ~]# tail -f /var/log/messages
Mar 1 16:17:11 clusterserver1 systemd: kubelet.service holdoff time over, scheduling restart.
Mar 1 16:17:11 clusterserver1 systemd: Started kubelet: The Kubernetes Node Agent.
Mar 1 16:17:11 clusterserver1 systemd: Starting kubelet: The Kubernetes Node Agent…
Mar 1 16:17:11 clusterserver1 kubelet: I0301 16:17:11.580912 2523 feature_gate.go:226] feature gates: &{{} map[]}
Mar 1 16:17:11 clusterserver1 kubelet: I0301 16:17:11.581022 2523 controller.go:114] kubelet config controller: starting controller
Mar 1 16:17:11 clusterserver1 kubelet: I0301 16:17:11.581030 2523 controller.go:118] kubelet config controller: validating combination of defaults and flags
Mar 1 16:17:11 clusterserver1 kubelet: error: unable to load client CA file /etc/kubernetes/pki/ca.crt: open /etc/kubernetes/pki/ca.crt: no such file or directory
Mar 1 16:17:11 clusterserver1 systemd: kubelet.service: main process exited, code=exited, status=1/FAILURE
Mar 1 16:17:11 clusterserver1 systemd: Unit kubelet.service entered failed state.
Mar 1 16:17:11 clusterserver1 systemd: kubelet.service failed.
Mar 1 16:17:21 clusterserver1 systemd: kubelet.service holdoff time over, scheduling restart.
Mar 1 16:17:21 clusterserver1 systemd: Started kubelet: The Kubernetes Node Agent.
Mar 1 16:17:21 clusterserver1 systemd: Starting kubelet: The Kubernetes Node Agent…
Mar 1 16:17:21 clusterserver1 kubelet: I0301 16:17:21.830092 2535 feature_gate.go:226] feature gates: &{{} map[]}
Mar 1 16:17:21 clusterserver1 kubelet: I0301 16:17:21.830201 2535 controller.go:114] kubelet config controller: starting controller
Mar 1 16:17:21 clusterserver1 kubelet: I0301 16:17:21.830209 2535 controller.go:118] kubelet config controller: validating combination of defaults and flags
Mar 1 16:17:21 clusterserver1 kubelet: error: unable to load client CA file /etc/kubernetes/pki/ca.crt: open /etc/kubernetes/pki/ca.crt: no such file or directory
Mar 1 16:17:21 clusterserver1 systemd: kubelet.service: main process exited, code=exited, status=1/FAILURE
Mar 1 16:17:21 clusterserver1 systemd: Unit kubelet.service entered failed state.
Mar 1 16:17:21 clusterserver1 systemd: kubelet.service failed.
So here Issue is “error: unable to load client CA file /etc/kubernetes/pki/ca.crt” As we have not installed kubeadm here
So now if this you are trying on worker node and already installed kubeadm on master then you can try Copying files from Master.
If this is you Master Node setup then let Start Master Node setup here
If kubernates was tried/installed any time on your machine then you need to reset kubeadm
run command on Master Node
[root@clusterserver1 ~]# kubeadm reset
[preflight] Running pre-flight checks.
[reset] Stopping the kubelet service.
[reset] Unmounting mounted directories in “/var/lib/kubelet”
[reset] Removing kubernetes-managed containers.
[reset] Docker doesn’t seem to be running. Skipping the removal of running Kubernetes containers.
[reset] No etcd manifest found in “/etc/kubernetes/manifests/etcd.yaml”. Assuming external etcd.
[reset] Deleting contents of stateful directories: [/var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes]
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[root@clusterserver1 ~]#
[root@clusterserver1 ~]# kubeadm init
[init] Using Kubernetes version: v1.9.3
[init] Using Authorization modes: [Node RBAC]
[preflight] Running pre-flight checks.
[preflight] The system verification failed. Printing the output from the verification:
KERNEL_VERSION: 3.10.0-693.17.1.el7.x86_64
CONFIG_NAMESPACES: enabled
CONFIG_NET_NS: enabled
CONFIG_PID_NS: enabled
CONFIG_IPC_NS: enabled
CONFIG_UTS_NS: enabled
CONFIG_CGROUPS: enabled
CONFIG_CGROUP_CPUACCT: enabled
CONFIG_CGROUP_DEVICE: enabled
CONFIG_CGROUP_FREEZER: enabled
CONFIG_CGROUP_SCHED: enabled
CONFIG_CPUSETS: enabled
CONFIG_MEMCG: enabled
CONFIG_INET: enabled
CONFIG_EXT4_FS: enabled (as module)
CONFIG_PROC_FS: enabled
CONFIG_NETFILTER_XT_TARGET_REDIRECT: enabled (as module)
CONFIG_NETFILTER_XT_MATCH_COMMENT: enabled (as module)
CONFIG_OVERLAY_FS: enabled (as module)
CONFIG_AUFS_FS: not set – Required for aufs.
CONFIG_BLK_DEV_DM: enabled (as module)
OS: Linux
CGROUPS_CPU: enabled
CGROUPS_CPUACCT: enabled
CGROUPS_CPUSET: enabled
CGROUPS_DEVICES: enabled
CGROUPS_FREEZER: enabled
CGROUPS_MEMORY: enabled
[WARNING FileExisting-crictl]: crictl not found in system path
[WARNING Service-Docker]: docker service is not enabled, please run ‘systemctl enable docker.service’
[preflight] Some fatal errors occurred:
[ERROR SystemVerification]: failed to get docker info: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
[ERROR Swap]: running with swap on is not supported. Please disable swap
[ERROR Service-Docker]: docker service is not active, please run ‘systemctl start docker.service’
[preflight] If you know what you are doing, you can make a check non-fatal with `–ignore-preflight-errors=…`
Disable swap
swapoff -a
enable docker and start the docker services
[root@clusterserver1 ~]# kubeadm init
[init] Using Kubernetes version: v1.9.3
[init] Using Authorization modes: [Node RBAC]
[preflight] Running pre-flight checks.
[WARNING FileExisting-crictl]: crictl not found in system path
[preflight] Starting the kubelet service
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [clusterserver1.rmohan.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.20]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] Generated sa key and public key.
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] Valid certificates and keys now exist in “/etc/kubernetes/pki”
[kubeconfig] Wrote KubeConfig file to disk: “admin.conf”
[kubeconfig] Wrote KubeConfig file to disk: “kubelet.conf”
[kubeconfig] Wrote KubeConfig file to disk: “controller-manager.conf”
[kubeconfig] Wrote KubeConfig file to disk: “scheduler.conf”
[controlplane] Wrote Static Pod manifest for component kube-apiserver to “/etc/kubernetes/manifests/kube-apiserver.yaml”
[controlplane] Wrote Static Pod manifest for component kube-controller-manager to “/etc/kubernetes/manifests/kube-controller-manager.yaml”
[controlplane] Wrote Static Pod manifest for component kube-scheduler to “/etc/kubernetes/manifests/kube-scheduler.yaml”
[etcd] Wrote Static Pod manifest for a local etcd instance to “/etc/kubernetes/manifests/etcd.yaml”
[init] Waiting for the kubelet to boot up the control plane as Static Pods from directory “/etc/kubernetes/manifests”.
[init] This might take a minute or longer if the control plane images have to be pulled.
[apiclient] All control plane components are healthy after 85.002972 seconds
[uploadconfig] Storing the configuration used in ConfigMap “kubeadm-config” in the “kube-system” Namespace
[markmaster] Will mark node clusterserver1.rmohan.com as master by adding a label and a taint
[markmaster] Master clusterserver1.rmohan.com tainted and labelled with key/value: node-role.kubernetes.io/master=””
[bootstraptoken] Using token: 8c45ed.70033b8135e5439a
[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] Creating the “cluster-info” ConfigMap in the “kube-public” namespace
[addons] Applied essential addon: kube-dns
[addons] Applied essential addon: kube-proxy
Your Kubernetes master has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run “kubectl apply -f [podnetwork].yaml” with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of machines by running the following on each node
as root:
kubeadm join –token 8c45ed.70033b8135e5439a 192.168.1.20:6443 –discovery-token-ca-cert-hash sha256:1df9c0250f28e5a4d137f29307b787954948fc417f2fa9a06a195d65f41b959d
[root@clusterserver1 ~]# mkdir -p $HOME/.kube
[root@clusterserver1 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@clusterserver1 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@clusterserver1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
clusterserver1.rmohan.com NotReady master 2m v1.9.3
[root@clusterserver1 ~]#
Install kubeadm and docker package on both nodes
[root@clusterserver2 ~]# yum install kubeadm docker -y
[root@clusterserver3 ~]# yum install kubeadm docker -y
[root@clusterserver4 ~]# yum install kubeadm docker -y
Start and enable docker service
[root@clusterserver2 ~]# systemctl restart docker && systemctl enable docker
[root@clusterserver3 ~]# systemctl restart docker && systemctl enable docker
[root@clusterserver4 ~]# systemctl restart docker && systemctl enable docker
Step 4: Now Join worker nodes to master node
To join worker nodes to Master node, a token is required. Whenever kubernetes master initialized , then in the output we get command and token. Copy that command and run on both nodes.
[root@worker-node1 ~]# kubeadm join –token 8c45ed.70033b8135e5439a 192.168.1.20:6443 –discovery-token-ca-cert-hash sha256:1df9c0250f28e5a4d137f29307b787954948fc417f2fa9a06a195d65f41b959d
yum install epel-release -y
yum install ansible -y
Recent Comments