February 8, 2018

How to Install Multi Master Kubernetes 1.9 on RHEL 7

マルチマスタの k8s を RHEL7(CentOS7 でもそう変わらないと思う) にデプロイする覚え書き。以下基本方針。

主なコンポーネントバージョンは以下の通り。

name version
kube-apiserver 1.9.2
flannel 0.9.1
etcd 3.1.11
docker 1.12.6

今回の構成は以下の通り。

host ip role
test-kube1.example.com 10.1.1.11 master
test-kube2.example.com 10.1.1.12 master
test-kube3.example.com 10.1.1.13 master
test-kube4.example.com 10.1.1.14 worker
test-kube5.example.com 10.1.1.15 worker
test-kube-lb.example.com 10.1.1.20 LB (no docker)

3台のマスタと、2台のワーカー、そしてエンドポイントとして 1台のLBを用意した。LB は Nginx で L4 LB として適当に作成。本番運用を考えるなら冗長化なり別途考えること。

Prepare

  • Docker

    • 今回は docker パッケージ(from rhel-7-server-extras-rpms)を使った
    • docker-latest or docker-ee などでも良いだろう
    • 17.06.x 以内で。k8s のリリースノートで都度互換バージョンを確認すること
  • SELinux disabled

    • vim /etc/sysconfig/selinux
  • firewalld off

    • systemctl disable firewalld.service
  • yum repo

    • rhel-7-server-rpms
    • rhel-7-server-optional-rpms
    • rhel-7-server-extras-rpms
  • yum install vim bash-completion lsof screen -y && yum update -y

  • Swap off 忘れるべからず

Installing kubeadm

all node

yum install -y docker

ストレージドライバは用途に応じて別途検討すること。今回は overlay2 で。

cat << EOF > /etc/docker/daemon.json
{
  "storage-driver": "overlay2"
}
EOF
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
systemctl enable docker && systemctl start docker
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet
reboot

Setup etcd

node 1,2,3

ens192 の所は勿論環境に合わせること。

curl -o /usr/local/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
curl -o /usr/local/bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x /usr/local/bin/cfssl*

export PEER_NAME=$(hostname)
export PRIVATE_IP=$(ip addr show ens192 | grep -Po 'inet \K[\d.]+')

node 1

mkdir -p /etc/kubernetes/pki/etcd
cd /etc/kubernetes/pki/etcd
cat >ca-config.json <<EOL
{
    "signing": {
        "default": {
            "expiry": "43800h"
        },
        "profiles": {
            "server": {
                "expiry": "43800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            },
            "client": {
                "expiry": "43800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "43800h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}
EOL
cat >ca-csr.json <<EOL
{
    "CN": "etcd",
    "key": {
        "algo": "rsa",
        "size": 2048
    }
}
EOL
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

node 1

cat >client.json <<EOL
{
    "CN": "client",
    "key": {
        "algo": "ecdsa",
        "size": 256
    }
}
EOL
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client.json | cfssljson -bare client

node 1, 2, 3

ssh-keygen -t rsa -b 4096
ssh-copy-id root@10.1.1.11 # node 1
ssh-copy-id root@10.1.1.12 # node 2
ssh-copy-id root@10.1.1.13 # node 3

node 2,3

mkdir -p /etc/kubernetes/pki/etcd
cd /etc/kubernetes/pki/etcd
scp root@10.1.1.11:/etc/kubernetes/pki/etcd/* .

node 1,2,3

cfssl print-defaults csr > config.json
sed -i '0,/CN/{s/example\.net/'"$PEER_NAME"'/}' config.json
sed -i 's/www\.example\.net/'"$PRIVATE_IP"'/' config.json
sed -i 's/example\.net/'"$PUBLIC_IP"'/' config.json

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server config.json | cfssljson -bare server
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer config.json | cfssljson -bare peer
export ETCD_VERSION=v3.1.11
curl -sSL https://github.com/coreos/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz | tar -xzv --strip-components=1 -C /usr/local/bin/
rm -rf etcd-$ETCD_VERSION-linux-amd64*

touch /etc/etcd.env
echo "PEER_NAME=$PEER_NAME" >> /etc/etcd.env
echo "PRIVATE_IP=$PRIVATE_IP" >> /etc/etcd.env

# set env
export PEER_HOST1IP=10.1.1.11
export PEER_HOST2IP=10.1.1.12
export PEER_HOST3IP=10.1.1.13
export PEER_HOST1=test-kube1.example.com
export PEER_HOST2=test-kube2.example.com
export PEER_HOST3=test-kube3.example.com

cat >/etc/systemd/system/etcd.service <<EOL
[Unit]
Description=etcd
Documentation=https://github.com/coreos/etcd
Conflicts=etcd.service
Conflicts=etcd2.service

[Service]
EnvironmentFile=/etc/etcd.env
Type=notify
Restart=always
RestartSec=5s
LimitNOFILE=40000
TimeoutStartSec=0

ExecStart=/usr/local/bin/etcd --name ${PEER_NAME} \
    --data-dir /var/lib/etcd \
    --listen-client-urls https://${PRIVATE_IP}:2379 \
    --advertise-client-urls https://${PRIVATE_IP}:2379 \
    --listen-peer-urls https://${PRIVATE_IP}:2380 \
    --initial-advertise-peer-urls https://${PRIVATE_IP}:2380 \
    --cert-file=/etc/kubernetes/pki/etcd/server.pem \
    --key-file=/etc/kubernetes/pki/etcd/server-key.pem \
    --client-cert-auth \
    --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem \
    --peer-cert-file=/etc/kubernetes/pki/etcd/peer.pem \
    --peer-key-file=/etc/kubernetes/pki/etcd/peer-key.pem \
    --peer-client-cert-auth \
    --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem \
    --initial-cluster ${PEER_HOST1}=https://${PEER_HOST1IP}:2380,${PEER_HOST2}=https://${PEER_HOST2IP}:2380,${PEER_HOST3}=https://${PEER_HOST3IP}:2380 \
    --initial-cluster-token my-etcd-token \
    --initial-cluster-state new

[Install]
WantedBy=multi-user.target
EOL
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd

Setup k8s master(s)

master 1, 2, 3

mkdir ~/kube-ha
cd ~/kube-ha
cat >config.yaml <<EOL
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
api:
  advertiseAddress: 10.1.1.20
etcd:
  endpoints:
  - https://${PEER_HOST1IP}:2379
  - https://${PEER_HOST2IP}:2379
  - https://${PEER_HOST3IP}:2379
  caFile: /etc/kubernetes/pki/etcd/ca.pem
  certFile: /etc/kubernetes/pki/etcd/client.pem
  keyFile: /etc/kubernetes/pki/etcd/client-key.pem
networking:
  podSubnet: 10.244.0.0/16
apiServerCertSANs:
- 10.1.1.20
apiServerExtraArgs:
  apiserver-count: "3"
EOL

master 1

  • --pod-network-cidr=10.244.0.0/16 for flannel
[root@test-kube1 ~]# kubeadm init --config=config.yaml

...

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join --token a58ed6.212019bca545e227 10.1.1.20:6443 --discovery-token-ca-cert-hash sha256:728e974399c279cc57698457673b94cf8f14c2025c54478cec639e52dcb62b90

master 1 bashrc

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bashrc

echo "source <(kubectl completion bash)" >> ~/.bashrc

master 2,3

cd /etc/kubernetes/pki/
scp root@${PEER_HOST1IP}:/etc/kubernetes/pki/* /etc/kubernetes/pki
cd ~/kube-ha
kubeadm init --config=config.yaml

mastrer 1

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.9.1/Documentation/kube-flannel.yml

kubectl taint nodes test-kube1.example.com node-role.kubernetes.io/master="":NoSchedule

Setup k8s worker(s)

node 4, 5

kubeadm join --token a58ed6.212019bca545e227 10.1.1.20:6443 --discovery-token-ca-cert-hash sha256:728e974399c279cc57698457673b94cf8f14c2025c54478cec639e52dcb62b90

Confirm

kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {"health": "true"}
etcd-1               Healthy   {"health": "true"}
etcd-2               Healthy   {"health": "true"}
kubectl get node
NAME                     STATUS    ROLES     AGE       VERSION
test-kube1.example.com   Ready     master    xxx       v1.9.2
test-kube2.example.com   Ready     master    xxx       v1.9.2
test-kube3.example.com   Ready     master    xxx       v1.9.2
test-kube4.example.com   Ready     <none>    xxx       v1.9.2
test-kube5.example.com   Ready     <none>    xxx       v1.9.2

Dashbord

master 1

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
cat > dashboard-admin.yml <<EOL
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system
EOL

kubectl create -f dashboard-admin.yml
kubectl -n kube-system get secret |grep token
kubectl -n kube-system describe secret kubernetes-dashboard-token-944tt
kubectl proxy --address=0.0.0.0 -p 8001

local PC

ssh -L 8001:localhost:8001 root@10.1.1.11

© Kazuhisa Hara 2014-2022