[CentOS7] kubernetes install

kubernetes install

주의!!! 본 문서는 작성중인 문서 이며, 내용이 완벽하게 정리 되지 않았습니다.

단순참고 부탁 드립니다.

참고 사이트 :

https://www.howtoforge.com/tutorial/centos-kubernetes-docker-cluster/

https://kubernetes.io/docs/setup/cri/

https://juejin.im/post/5caea3ffe51d456e79545c32

https://cloud.tencent.com/developer/article/1409419

https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/

 

 

  • 모든 node 에서 작업
  • hosts파일 설정
[root@k8s-all-node ~]# vi /etc/hosts
10.10.10.27     k8s-master
10.10.10.28     k8s-node01
10.10.10.29     k8s-node02

 

  • SELINUX Disable
[root@k8s-all-node ~]# vi /etc/selinux/config

# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled

 

  • firewalld disable
[root@k8s-all-node ~]# iptables -L
Chain INPUT (policy ACCEPT)
target     prot opt source               destination         

Chain FORWARD (policy ACCEPT)
target     prot opt source               destination         

Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination         
[root@k8s-all-node ~]#

 

  • sysctl.conf 설정
[root@k8s-all-node ~]# cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF


[root@k8s-all-node ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
[root@k8s-all-node ~]#

 

  • swap off
[root@k8s-all-node ~]# swapoff -a

[root@k8s-all-node ~]# vi /etc/fstab
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
UUID=d7bb5d3b-5b37-47e0-8c26-fe40f7311597 /                       xfs     defaults        0 0
UUID=43ec35ea-2e35-46f1-864c-b13603a8acac /boot                   xfs     defaults        0 0
#UUID=2de336ec-4a33-36r1-8w2s-asdf2342ccgg swap                   swap     defaults        0 0

 

[root@k8s-all-node ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-all-node ~]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
[root@k8s-all-node ~]# yum list docker-ce --showduplicates | sort -r
[root@k8s-all-node ~]# yum install -y docker-ce-18.06.3.ce

[root@k8s-all-node ~]# mkdir /etc/docker

# cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF


[root@k8s-all-node ~]# mkdir -p /etc/systemd/system/docker.service.d
[root@k8s-all-node ~]# systemctl daemon-reload
[root@k8s-all-node ~]# systemctl restart docker

 

  • kubernetes install & system rebooting
[root@k8s-all-node ~]# vi /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
        https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

[root@k8s-all-node ~]# yum install -y kubelet kubeadm kubectl
[root@k8s-all-node ~]# init 6

[root@k8s-all-node ~]# systemctl start docker ; systemctl enable docker
[root@k8s-all-node ~]# systemctl start kubelet ; systemctl enable kubelet

 

  • k8s-master only
  • coredns 의 경우 network add-on 설치후 Running 으로 상태가 바뀝니다.
[root@k8s-master ~]# kubeadm init --apiserver-advertise-address=10.10.10.27 --pod-network-cidr=20.20.0.0/16
~중략

[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.10.10.27:6443 --token syojz8.svxybs8x0f3iy28a \
    --discovery-token-ca-cert-hash sha256:b28c6474e92e2bc87e8f7b470119e506df36ae6ae08a8f50dd070f5d714a28e1
[root@k8s-master ~]#

[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE     VERSION
k8s-master   Ready    master   2m22s   v1.14.1
[root@k8s-master ~]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   coredns-fb8b8dccf-c9hvh              0/1     Pending   0          78s
kube-system   coredns-fb8b8dccf-hmt6w              0/1     Pending   0          78s
kube-system   etcd-k8s-master                      1/1     Running   0          41s
kube-system   kube-apiserver-k8s-master            1/1     Running   0          42s
kube-system   kube-controller-manager-k8s-master   1/1     Running   0          31s
kube-system   kube-proxy-92c9h                     1/1     Running   0          78s
kube-system   kube-scheduler-k8s-master            1/1     Running   0          16s
[root@k8s-master ~]#

 

[root@k8s-master ~]# kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
[root@k8s-master ~]# kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml

[root@k8s-master ~]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   calico-node-r72sb                    2/2     Running   0          38s
kube-system   coredns-fb8b8dccf-c9hvh              0/1     Running   0          4m15s
kube-system   coredns-fb8b8dccf-hmt6w              0/1     Running   0          4m15s
kube-system   etcd-k8s-master                      1/1     Running   0          3m38s
kube-system   kube-apiserver-k8s-master            1/1     Running   0          3m39s
kube-system   kube-controller-manager-k8s-master   1/1     Running   0          3m28s
kube-system   kube-proxy-92c9h                     1/1     Running   0          4m15s
kube-system   kube-scheduler-k8s-master            1/1     Running   0          3m13s
[root@k8s-master ~]#

 

  • k8s-master 에서 확인
[root@k8s-master ~]# kubeadm token create --print-join-command
kubeadm join 10.10.10.27:6443 --token eq8odd.rxcfznxvepos1pg8     --discovery-token-ca-cert-hash sha256:aa3949ebeec315e5d303a18fc049c33a89a9110d8bdec0a93f3c065dcb78c689 
[root@k8s-master ~]#

 

  • k8s-node01 / k8s-node02 에서 작업
[root@k8s-node01 ~]# kubeadm join 10.10.10.27:6443 --token \
 eq8odd.rxcfznxvepos1pg8     --discovery-token-ca-cert-hash sha256:aa3949ebeec315e5d303a18fc049c33a89a9110d8bdec0a93f3c065dcb78c689

[root@k8s-node02 ~]# kubeadm join 10.10.10.27:6443 --token \
eq8odd.rxcfznxvepos1pg8     --discovery-token-ca-cert-hash sha256:aa3949ebeec315e5d303a18fc049c33a89a9110d8bdec0a93f3c065dcb78c689

 

  • k8s-master 에서 확인
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE     VERSION
k8s-master   Ready    master   8m43s   v1.14.3
k8s-node01   Ready    <none>   73s     v1.14.3
k8s-node02   Ready    <none>   62s     v1.14.3
[root@k8s-master ~]#

 

  • dash-board 설치 를 위하여 인증서를 생성 합니다.
[root@k8s-master ~]# mkdir /root/certs
[root@k8s-master ~]# cd /root/certs
[root@k8s-master certs]# openssl genrsa -des3 -passout pass:x -out dashboard.pass.key 2048
[root@k8s-master certs]# openssl rsa -passin pass:x -in dashboard.pass.key -out dashboard.key
[root@k8s-master certs]# openssl req -new -key dashboard.key -out dashboard.csr
[root@k8s-master certs]# openssl x509 -req -sha256 -days 365 -in dashboard.csr -signkey dashboard.key -out dashboard.crt

 

  • dash-board 를 설치 합니다.
[root@k8s-master ~]# kubectl create secret generic kubernetes-dashboard-certs --from-file=/root/certs -n kube-system
[root@k8s-master ~]# kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml

 

  • dash-borad 설정을 변경합니다.
[root@k8s-master ~]# kubectl edit service kubernetes-dashboard -n kube-system
#   type: ClusterIP    <--  부분을 NodePort 으로 변경

# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: Service
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"k8s-app":"kubernetes-dashboard"},"name":"kubernetes-dashboard","namespace":"kube-system"},"spec":{"ports":[{"port":443,"targetPort":8443}],"selector":{"k8s-app":"kubernetes-dashboard"}}}
  creationTimestamp: "2019-06-12T07:41:01Z"
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
  resourceVersion: "2224"
  selfLink: /api/v1/namespaces/kube-system/services/kubernetes-dashboard
  uid: 6cb7d772-8ce5-11e9-ad2b-525400fce674
spec:
  clusterIP: 10.108.72.190
  ports:
  - port: 443
    protocol: TCP
    targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
  sessionAffinity: None
#  type: ClusterIP    <--  변경전
  type: NodePort      <--  변경후 
status:
  loadBalancer: {}

 

  • dashboard 상태 확인 및 접속 정보 확인
  • 443:30906/TCP  로 맵핑되어 있습니다.
[root@k8s-master ~]#  kubectl get pods -n kube-system
NAME                                    READY   STATUS    RESTARTS   AGE
calico-node-8mvl8                       2/2     Running   0          15m
calico-node-br9sw                       2/2     Running   0          15m
calico-node-r72sb                       2/2     Running   0          18m
coredns-fb8b8dccf-c9hvh                 1/1     Running   0          22m
coredns-fb8b8dccf-hmt6w                 1/1     Running   0          22m
etcd-k8s-master                         1/1     Running   0          21m
kube-apiserver-k8s-master               1/1     Running   0          21m
kube-controller-manager-k8s-master      1/1     Running   0          21m
kube-proxy-6t9vw                        1/1     Running   0          15m
kube-proxy-8vw5v                        1/1     Running   0          15m
kube-proxy-92c9h                        1/1     Running   0          22m
kube-scheduler-k8s-master               1/1     Running   0          21m
kubernetes-dashboard-5f7b999d65-t88x2   1/1     Running   0          3m56s
[root@k8s-master ~]# 

[root@k8s-master ~]# kubectl get service -n kube-system
NAME                   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE
calico-typha           ClusterIP   10.101.41.222   <none>        5473/TCP                 20m
kube-dns               ClusterIP   10.96.0.10      <none>        53/UDP,53/TCP,9153/TCP   23m
kubernetes-dashboard   NodePort    10.108.72.190   <none>        443:30906/TCP            5m5s
[root@k8s-master ~]#

 

  • dash-borad 계정생성
[root@k8s-master ~]# kubectl create serviceaccount cluster-admin-dashboard-sa
[root@k8s-master ~]# kubectl create clusterrolebinding cluster-admin-dashboard-sa --clusterrole=cluster-admin --serviceaccount=default:cluster-admin-dashboard-sa

 

  • dash-borad 토큰값 생성
[root@k8s-master ~]# kubectl get secret $(kubectl get serviceaccount cluster-admin-dashboard-sa -o jsonpath="{.secrets[0].name}") -o jsonpath="{.data.token}" | base64 --decode
eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImNsdXN0ZXItYWRtaW4tZGFzaGJvYXJkLXNhLXRva2VuLWNzZ3A4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImNsdXN0ZXItYWRtaW4tZGFzaGJvYXJkLXNhIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNGFkYjU3Y2QtOGNlNi0xMWU5LWFkMmItNTI1NDAwZmNlNjc0Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6Y2x1c3Rlci1hZG1pbi1kYXNoYm9hcmQtc2EifQ.E_T09ftzrV_68Ie0nuthJ1yjFeNByeok87x3F653dB9Pt0a7n6hWGOZsiCUaU0mevm56kl2QUgzV5J-waNvr5Fv4IZ5NMmId_XfIGWlsul2P6y4wag96DuG65K1T2DwoGix4GO8a1p7HISOQ0knxr0OVMOjXRLcOXUov3h3Mv87T-O1gjVIUHAMvB70aZK1ScBaULegqzQbHwjpRc7FFOKUQB4HANJ6gw1asMF4yw0M_dF3GK16GaCxxKEW6rQWGrdN_TNB2nIXKgKqfqHS_35o02yYd2_cU3TDZ14xGl7F2zSVJxzB99ftyC6pwquPF3y3qhXeUFNU0tyCyxKUrWQ
[root@k8s-master ~]#

 

  • dash-borad 접속 ( https://10.10.10.27:30906/#!/login )
  • 안전하지 않음(으)로 이동을 클릭 합니다.

 

  • 토큰정보를 입력 합니다.

 

  • dash-board 화면

 

  • k8s Testing — 작성중
[root@k8s-master ~]# kubectl create deployment nginx --image=nginx
[root@k8s-master ~]# kubectl describe deployment nginx
Name:                   nginx
Namespace:              default
CreationTimestamp:      Fri, 03 May 2019 00:28:11 +0900
Labels:                 app=nginx
Annotations:            deployment.kubernetes.io/revision: 1
Selector:               app=nginx
Replicas:               1 desired | 1 updated | 1 total | 0 available | 1 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=nginx
  Containers:
   nginx:
    Image:        nginx
    Port:         <none>
    Host Port:    <none>
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      False   MinimumReplicasUnavailable
  Progressing    True    ReplicaSetUpdated
OldReplicaSets:  <none>
NewReplicaSet:   nginx-65f88748fd (1/1 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  18s   deployment-controller  Scaled up replica set nginx-65f88748fd to 1
[root@k8s-master ~]#


[root@k8s-master ~]# kubectl create service nodeport nginx --tcp=80:80


[root@k8s-master ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP        6m33s
nginx        NodePort    10.102.109.228   <none>        80:30187/TCP   21s
[root@k8s-master ~]#

 

  • nginx 확인
[root@k8s-master ~]#  curl k8s-node01:30187
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@k8s-master ~]#

 

  • pods scale
최초 생성시 1개의 pods 입니다. 

[root@k8s-master ~]# kubectl get pods
NAME                     READY   STATUS    RESTARTS   AGE
nginx-65f88748fd-8lqrb   1/1     Running   0          5m12s
[root@k8s-master ~]#

pods 을 5개로 늘립니다. 
[root@k8s-master ~]# kubectl scale --replicas=5 deployment/nginx
deployment.extensions/nginx scaled


[root@k8s-master ~]# kubectl get pods
NAME                     READY   STATUS              RESTARTS   AGE
nginx-65f88748fd-6v7n5   1/1     Running             0          13s
nginx-65f88748fd-86svl   0/1     ContainerCreating   0          13s
nginx-65f88748fd-8lqrb   1/1     Running             0          12m
nginx-65f88748fd-pq8p8   0/1     ContainerCreating   0          13s
nginx-65f88748fd-w4tq8   0/1     ContainerCreating   0          13s
[root@k8s-master ~]#

 

  • pod 삭제
[root@k8s-master ~]# kubectl delete deployment/nginx

삭제 확인 
[root@k8s-master ~]# kubectl get pods -o wide
No resources found.

 

 

댓글 남기기