• 의존성 패키지 설치
[root@centos76 ~]# yum install git curl zip unzip

 

  • composer Down
[root@centos76 ~]# curl -sS https://getcomposer.org/installer | php
All settings correct for using Composer
Downloading...

Composer (version 1.9.0) successfully installed to: /root/composer.phar
Use it: php composer.phar
[root@centos76 ~]#

 

  • /usr/bin/composer 로 위치 변경 및 laravel 설치
[root@centos76 ~]# mv composer.phar /usr/bin/composer
[root@centos76 ~]# composer create-project laravel/laravel /var/www/html/laravel

 

  • httpd.conf 수정 및 laravel 권한 수정
[root@centos76 ~]# vi /etc/httpd/conf/httpd.conf
DocumentRoot "/var/www/html/laravel/public"

[root@centos76 ~]# systemctl restart httpd
[root@centos76 ~]# chown -R apache:apache /var/www/html/laravel
[root@centos76 ~]# chmod -R 755 /var/www/html/laravel/storage

 

  • web site 확인

본문서는 작성중인 문서 입니다.  간단하게 nginx-proxy 와 공유디렉토리를 이용하여 wordpress 를 테스트 할수 있습니다.

Test 환경 kvm 에서 Centos7 Version vm 을 3대 준비 합니다.

Nginx-proxy 1대 / LEMP Stack 2대를 구성 합니다.

도메인은 임시로 test.com 도메인을 준비 하였습니다.

구성은 nginx-proxy + LEMP Stack + glusterfs 이며 app 는 WP 가 올라 갑니다.

  • Nginx-proxy 구성도

nginx-proxy

epel-release 패키지 설치 및 nginx 설치

[root@nginx-proxy ~]# yum install epel-release -y

[root@nginx-proxy ~]# vi /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
enabled=1
[root@nginx-proxy ~]# yum install -y nginx

 

VM nginx-proxy

nginx 설정

[root@nginx-proxy ~]# cd /etc/nginx/conf.d/
[root@nginx-proxy conf.d]# cp default.conf default.conf.org


[root@nginx-proxy conf.d]# cat default.conf
server {
    listen       80;
    server_name  test.com;
    location / {
        rewrite ^/(/.*)$ $1 break; 
        proxy_pass http://test.com;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_redirect off;
    }
}
upstream test.com {
    server 10.10.10.11:80;
    server 10.10.10.22:80;
}

[root@nginx-proxy conf.d]# systemctl enable nginx ; systemctl start nginx

 

VM nginx-www1 / nginx-www2 에서 작업

NGINX , PHP 7.1 을 설치 합니다.

[root@nginx-www1 ~]# vi /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
enabled=1

[root@nginx-www1 ~]# yum install -y nginx
[root@nginx-www1 ~]# yum install -y epel-release yum-utils
[root@nginx-www1 ~]# rpm -Uvh http://ftp.riken.jp/Linux/remi/enterprise/remi-release-7.rpm
[root@nginx-www1 ~]# yum clean all && yum list
[root@nginx-www1 ~]# yum-config-manager --enable remi-php71
[root@nginx-www1 ~]# yum -y install php php-mysql php-fpm php-opcache php-gd php-ldap \
php-odbc php-pear php-xml php-xmlrpc php-mbstring php-soap curl curl-devel

 

nginx 설정 및 php-fpm 설정

테스트 도메인은 내부 dns 를 구성하여 test.com 으로 생성 하였습니다.

test.com 은 도메인 아이피를 nginx-proxy 로 설정 합니다.

[root@nginx-www1 ~]# mkdir /etc/nginx/sites-enabled
[root@nginx-www1 ~]# vi /etc/nginx/nginx.conf
user  nginx;
worker_processes  1;

error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}


http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    keepalive_timeout  65;

    #gzip  on;

    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*.conf;
}


[root@nginx-www1 ~]# vi /etc/php-fpm.d/www.conf

user = nginx
group = nginx

listen.owner = nginx
listen.group = nginx
listen.mode = 0660
[root@nginx-www1 ~]# systemctl enable nginx ; systemctl start nginx
[root@nginx-www1 ~]# systemctl enable php-fpm ; systemctl start php-fpm

 

glusterfs 설치

VM nginx-www1 / nginx-www2 에서 작업

/etc/hosts 파일 수정

[root@nginx-www1 ~]# vi /etc/hosts
10.10.10.11     www1
10.10.10.22     www2
10.10.10.33     db01

 

glusterfs 설치

[root@nginx-www1 ~]# yum install centos-release-gluster -y
[root@nginx-www1 ~]# yum install glusterfs-server -y
[root@nginx-www1 ~]# systemctl enable glusterd ; systemctl start glusterd

 

gluster 공유 디렉토리 생성

[root@nginx-www1 ~]# gluster peer probe www2
peer probe: success.
[root@nginx-www2 ~]# gluster peer probe www1



[root@nginx-www1 ~]# mkdir /gluster-storage
[root@nginx-www2 ~]# mkdir /gluster-storage


[root@nginx-www1 ~]# gluster volume create volume01 replica 2 transport tcp www1:/gluster-storage www2:/gluster-storage force
volume create: volume01: success: please start the volume to access data

[root@nginx-www1 ~]# gluster volume start volume01
volume start: volume01: success
[root@nginx-www1 ~]# gluster volume info

Volume Name: volume01
Type: Replicate
Volume ID: b24c3e2b-f458-4733-9bc0-38d9bd441bb6
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: www1:/gluster-storage
Brick2: www2:/gluster-storage
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
[root@nginx-www1 ~]#

[root@nginx-www1 ~]# mkdir -p /var/www/html/test.com/{public_html,logs}

[root@nginx-www1 ~]# vi /etc/fstab

~중략
www1:/volume01  /var/www/html/test.com/public_html glusterfs defaults,_netdev,x-systemd.automount 0 0

[root@nginx-www1 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda3        18G  1.6G   17G   9% /
devtmpfs        1.9G     0  1.9G   0% /dev
tmpfs           1.9G     0  1.9G   0% /dev/shm
tmpfs           1.9G  8.6M  1.9G   1% /run
tmpfs           1.9G     0  1.9G   0% /sys/fs/cgroup
/dev/vda1      1014M  215M  800M  22% /boot
tmpfs           379M     0  379M   0% /run/user/0
www1:/volume01   18G  1.8G   17G  10% /var/www/html/test.com/public_html
[root@nginx-www1 ~]#

 

test.com nginx 설정

[root@nginx-www1 ~]# vi /etc/nginx/sites-enabled/test_com.conf
server {
    listen       80;
    server_name  www.test.com test.com;
    root   /var/www/html/test.com/public_html;
    index  index.php index.html index.htm;
    location / {
        try_files $uri $uri/ /index.php?$query_string;
        autoindex on;
    }

    access_log  /var/www/html/test.com/logs/access.log;
    error_log  /var/www/html/test.com/logs/error.log warn;

    error_page   500 502 503 504  /50x.html;
    location = /50x.html {
    }

    location ~ \.php$ {
        try_files $uri =404;
        fastcgi_pass 127.0.0.1:9000;
        fastcgi_split_path_info ^(.+\.php)(/.+)$;
        fastcgi_index index.php;
        fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
        include fastcgi_params;
    }
}

 

nginx 데몬 재시작 및 phpinfo 확인

[root@nginx-www1 ~]# systemctl restart nginx
[root@nginx-www1 ~]# vi /var/www/html/test.com/public_html/info.php
<?php phpinfo(); ?>

 

www2 시스템 에서 test.com public_html 디렉토리 확인시 정상적으로 info.php 를 확인 할수 있습니다.

[root@nginx-www2 ~]# ls -al /var/www/html/test.com/public_html/
total 1
drwxr-xr-x 3 nginx nginx 40 Jul 26 14:37 .
drwxr-xr-x 4 nginx nginx 37 Jul 26 14:33 ..
-rw-r--r-- 1 root  root  20 Jul 26 14:37 info.php
[root@nginx-www2 ~]#

 

phpinfo 확인

 

Mariadb 10.1 설치

별도의 vm 에 db 를 설치 합니다.

[root@nginx-mariadb01 ~]# vi /etc/yum.repos.d/mariadb.repo
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.1/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1


[root@nginx-mariadb01 ~]# yum install -y mariadb mariadb-server
[root@nginx-mariadb01 ~]# systemctl start mariadb ; systemctl enable mariadb

 

mysql_secure_installation 을 실행 합니다.

[root@nginx-mariadb01 ~]# /usr/bin/mysql_secure_installation

NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB
      SERVERS IN PRODUCTION USE!  PLEASE READ EACH STEP CAREFULLY!

In order to log into MariaDB to secure it, we'll need the current
password for the root user.  If you've just installed MariaDB, and
you haven't set the root password yet, the password will be blank,
so you should just press enter here.

Enter current password for root (enter for none):
OK, successfully used password, moving on...

Setting the root password ensures that nobody can log into the MariaDB
root user without the proper authorisation.

Set root password? [Y/n] y
New password:
Re-enter new password:
Password updated successfully!
Reloading privilege tables..
 ... Success!


By default, a MariaDB installation has an anonymous user, allowing anyone
to log into MariaDB without having to have a user account created for
them.  This is intended only for testing, and to make the installation
go a bit smoother.  You should remove them before moving into a
production environment.

Remove anonymous users? [Y/n] y
 ... Success!

Normally, root should only be allowed to connect from 'localhost'.  This
ensures that someone cannot guess at the root password from the network.

Disallow root login remotely? [Y/n] y
 ... Success!

By default, MariaDB comes with a database named 'test' that anyone can
access.  This is also intended only for testing, and should be removed
before moving into a production environment.

Remove test database and access to it? [Y/n] y
 - Dropping test database...
 ... Success!
 - Removing privileges on test database...
 ... Success!

Reloading the privilege tables will ensure that all changes made so far
will take effect immediately.

Reload privilege tables now? [Y/n] y
 ... Success!

Cleaning up...

All done!  If you've completed all of the above steps, your MariaDB
installation should now be secure.

Thanks for using MariaDB!
[root@nginx-mariadb01 ~]#

 

character set 설정

[root@nginx-mariadb01 ~]# vi /etc/my.cnf.d/server.cnf
[mysqld]
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci

[root@nginx-mariadb01 ~]# vi /etc/my.cnf.d/client.cnf
[client]
default-character-set = utf8mb4

 

character set 확인

[root@nginx-mariadb01 ~]#  systemctl restart mariadb
[root@nginx-mariadb01 ~]# mysql -uroot -p
Enter password:
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 2
Server version: 10.1.40-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> status;
--------------
mysql  Ver 15.1 Distrib 10.1.40-MariaDB, for Linux (x86_64) using readline 5.1

Connection id:          2
Current database:
Current user:           root@localhost
SSL:                    Not in use
Current pager:          stdout
Using outfile:          ''
Using delimiter:        ;
Server:                 MariaDB
Server version:         10.1.40-MariaDB MariaDB Server
Protocol version:       10
Connection:             Localhost via UNIX socket
Server characterset:    utf8mb4
Db     characterset:    utf8mb4
Client characterset:    utf8mb4
Conn.  characterset:    utf8mb4
UNIX socket:            /var/lib/mysql/mysql.sock
Uptime:                 21 sec

Threads: 1  Questions: 4  Slow queries: 0  Opens: 17  Flush tables: 1  Open tables: 11  Queries per second avg: 0.190
--------------

MariaDB [(none)]> quit;
Bye
[root@nginx-mariadb01 ~]#

 

WordPress  에서 사용할 DB 를 생성 합니다.

[root@nginx-mariadb01 ~]# mysql -uroot -p
Enter password:
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 3
Server version: 10.1.40-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> create database wp;
Query OK, 1 row affected (0.00 sec)

MariaDB [(none)]> GRANT ALL ON wp.* TO 'wp'@'%' IDENTIFIED BY 'password';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> quit;
Bye
[root@nginx-mariadb01 ~]#

 

www1 / www2 에 hosts 파일을 수정 합니다.

wordpress 설치 파일은 www1 에서만 작업 합니다.

[root@nginx-www1 ~]# vi /etc/hosts

10.10.10.11     www1
10.10.10.22     www2
10.10.10.33     db01

 

WordPress 설치

[root@nginx-www1 ~]# cd /var/www/html/test.com/public_html/
[root@nginx-www1 public_html]# wget https://wordpress.org/latest.tar.gz
[root@nginx-www1 public_html]# tar xvf latest.tar.gz
[root@nginx-www1 public_html]# cd wordpress/
[root@nginx-www1 wordpress]# mv * ../
[root@nginx-www1 public_html]# rm -rf wordpress/

 

test.com 사이트에 접속하여 워드프레스를 설치 합니다.

 

database 정보 입력

 

Run installation

 

test.com site 정보 입력

 

워드프레스 설치가 완료 되었습니다.

 

test.com 으로 접속시 nginx-proxy 를 통하여 www1 / www2 로 접속을 합니다.

 

로그 확인시 www1 / www2 에 한번씩 접속 로그가 생성 됩니다.

[CentOS7] Redis 설치 및 php연동

LEMP Stack 의 경우 링크 사이트를 참고해 주세요. http://docs.crois.net/linux/linux/#lemp-stack   

LEMP Stack 이후 부터 설치 하시면 됩니다.

Source 로 설치 하는 방법도 있지만 간단하게 yum 으로 설치 하는 방법을 기술 합니다.

Redis 의 자세한 내용은 차후 정리 하도록 하겠습니다.

 

  • Redis 설치
[root@centos-nginx ~]# yum install -y redis php71-php-pecl-redis php71-php-phpiredis php-redis

 

  • 설치확인
[root@centos-nginx ~]# php -i |grep redis
/etc/php.d/50-redis.ini
redis
redis.arrays.algorithm => no value => no value
redis.arrays.auth => no value => no value
redis.arrays.autorehash => 0 => 0
redis.arrays.connecttimeout => 0 => 0
redis.arrays.consistent => 0 => 0
redis.arrays.distributor => no value => no value
redis.arrays.functions => no value => no value
redis.arrays.hosts => no value => no value
redis.arrays.index => 0 => 0
redis.arrays.lazyconnect => 0 => 0
redis.arrays.names => no value => no value
redis.arrays.pconnect => 0 => 0
redis.arrays.previous => no value => no value
redis.arrays.readtimeout => 0 => 0
redis.arrays.retryinterval => 0 => 0
redis.clusters.auth => no value => no value
redis.clusters.cache_slots => 0 => 0
redis.clusters.persistent => 0 => 0
redis.clusters.read_timeout => 0 => 0
redis.clusters.seeds => no value => no value
redis.clusters.timeout => 0 => 0
redis.pconnect.connection_limit => 0 => 0
redis.pconnect.pooling_enabled => 1 => 1
redis.session.lock_expire => 0 => 0
redis.session.lock_retries => 10 => 10
redis.session.lock_wait_time => 2000 => 2000
redis.session.locking_enabled => 0 => 0
Registered save handlers => files user redis rediscluster
This program is free software; you can redistribute it and/or modify
[root@centos-nginx ~]#

 

  • php-fpm 데몬 재시작
[root@centos-nginx ~]# systemctl restart php-fpm

 

  • phpinfo 페이지 확인

 

개인적으로 운영하는 ftp 를 lvm 으로 묶어 Storage 형태로 만들어 사용 하고 있습니다.

한 3~4년 주기로 깨지다 보니 그때마다 lvm 복구 작업을 종종 합니다.

Linux Soft Raid 를 이용하여 md1 + md2 를 구성하여 lvm 을 만들어 사용할까 합니다.

Test 환경의 경우 Centos 7 Version 에 1G disk 4개를 추가 하여 sdb + sdc = md1 , sdd + sde = md2 로 구성 하였습니다.

주의: dd 명령으로 복구 테스트하였을 경우 raid 볼륨 자체가 깨지는 문제로 테스트가 되지 않습니다. 🙂 

 

 

  • Disk 정보 (sdb / sdc /sdd /sde 총 4장의 1G Disk 가 장착되어 있습니다.)
[root@centos7 ~]# lsblk
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda      8:0    0   20G  0 disk
├─sda1   8:1    0    1G  0 part /boot
├─sda2   8:2    0    1G  0 part [SWAP]
└─sda3   8:3    0   18G  0 part /
sdb      8:16   0    1G  0 disk
sdc      8:32   0    1G  0 disk
sdd      8:48   0    1G  0 disk
sde      8:64   0    1G  0 disk
[root@centos7 ~]#

 

  • mdadm 설치 
[root@centos7 ~]# yum install -y mdadm

 

  • raid 장치 확인 
[root@centos7 ~]# cat /proc/mdstat
Personalities :
unused devices: <none>
[root@centos7 ~]#

 

  • fdisk 작업 (sdb / sdc /sdd /sde )
[root@centos7 ~]# fdisk /dev/sdb
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0x051d4fae.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): p
Partition number (1-4, default 1): 1
First sector (2048-2097151, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-2097151, default 2097151):
Using default value 2097151
Partition 1 of type Linux and of size 1023 MiB is set

Command (m for help): t
Selected partition 1
Hex code (type L to list all codes): fd
Changed type of partition 'Linux' to 'Linux raid autodetect'

Command (m for help): wq
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@centos7 ~]#

~중략

[root@centos7 ~]# partprobe

 

  • /dev/sdb1 , /dev/sdc1 디스크를 이용하여 raid 1 md1 를 구성 합니다.
[root@centos7 ~]# mdadm --create /dev/md1 --level=1 --raid-device=2 /dev/sdb1 /dev/sdc1
mdadm: Note: this array has metadata at the start and
    may not be suitable as a boot device.  If you plan to
    store '/boot' on this device please ensure that
    your boot-loader understands md/v1.x metadata, or use
    --metadata=0.90
Continue creating array? y
mdadm: Fail create md1 when using /sys/module/md_mod/parameters/new_array
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md1 started.
[root@centos7 ~]#

 

  • /dev/sdd1 , /dev/sde1 디스크를 이용하여 raid 1 md2 를 구성 합니다. 
[root@centos7 ~]# mdadm --create /dev/md2 --level=1 --raid-device=2 /dev/sdd1 /dev/sde1
mdadm: Note: this array has metadata at the start and
    may not be suitable as a boot device.  If you plan to
    store '/boot' on this device please ensure that
    your boot-loader understands md/v1.x metadata, or use
    --metadata=0.90
Continue creating array? y
mdadm: Fail create md2 when using /sys/module/md_mod/parameters/new_array
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md2 started.
[root@centos7 ~]#

 

  • raid 상태 확인 
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdc1[1] sdb1[0]
      1046528 blocks super 1.2 [2/2] [UU]

unused devices: <none>
[root@centos7 ~]#

 

  • /etc/mdadm.conf 를 생성합니다.
[root@centos7 ~]# mdadm --detail --scan > /etc/mdadm.conf
[root@centos7 ~]# cat /etc/mdadm.conf
ARRAY /dev/md1 metadata=1.2 name=centos7:1 UUID=33827f12:f44165a0:19e7a7c9:18f88f40
ARRAY /dev/md2 metadata=1.2 name=centos7:2 UUID=b17e111b:e22dcf62:51b929f6:819a5c2c
[root@centos7 ~]#

 

  • lvm2 패키지를 설치 합니다.
[root@centos7 ~]# yum install lvm2 -y

 

  • lvm 생성후 xfs 파일시스템으로 포맷을 합니다.
[root@centos7 ~]# pvcreate /dev/md1 /dev/md2
  Physical volume "/dev/md1" successfully created.
  Physical volume "/dev/md2" successfully created.
[root@centos7 ~]# vgcreate vg00 /dev/md1 /dev/md2
  Volume group "vg00" successfully created
[root@centos7 ~]# lvcreate -l 100%free -n data01 vg00
  Logical volume "data01" created.
[root@centos7 ~]#

[root@centos7 ~]# mkfs.xfs /dev/vg00/data01
meta-data=/dev/vg00/data01       isize=512    agcount=4, agsize=130560 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=522240, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@centos7 ~]#

 

  • /etc/fstab 에 마운트 후 시스템 리부팅후 마운트 상태를 확인 합니다. 
[root@centos7 ~]# mkdir /data
[root@centos7 ~]# vi /etc/fstab

/dev/vg00/data01                          /data                   xfs     defaults        0 0

[root@centos7 ~]# mount -a
[root@centos7 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/sda3                 18G  1.4G   17G   8% /
devtmpfs                 903M     0  903M   0% /dev
tmpfs                    912M     0  912M   0% /dev/shm
tmpfs                    912M  8.7M  903M   1% /run
tmpfs                    912M     0  912M   0% /sys/fs/cgroup
/dev/sda1               1014M  135M  880M  14% /boot
tmpfs                    183M     0  183M   0% /run/user/0
/dev/mapper/vg00-data01  2.0G   33M  2.0G   2% /data
[root@centos7 ~]# init 6 
[root@centos7 ~]# df -h |grep -i data
/dev/mapper/vg00-data01  2.0G   33M  2.0G   2% /data
[root@centos7 ~]#

 

  • mdadm 명령어를 이용하여 raid 정보를 확인 합니다. 
[root@centos7 ~]# mdadm --detail /dev/md1
/dev/md1:
           Version : 1.2
     Creation Time : Thu Jul 18 00:29:08 2019
        Raid Level : raid1
        Array Size : 1046528 (1022.00 MiB 1071.64 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 2
     Total Devices : 2
       Persistence : Superblock is persistent

       Update Time : Thu Jul 18 00:34:49 2019
             State : clean
    Active Devices : 2
   Working Devices : 2
    Failed Devices : 0
     Spare Devices : 0

Consistency Policy : resync

              Name : centos7:1  (local to host centos7)
              UUID : 33827f12:f44165a0:19e7a7c9:18f88f40
            Events : 17

    Number   Major   Minor   RaidDevice State
       0       8       17        0      active sync   /dev/sdb1
       1       8       33        1      active sync   /dev/sdc1
[root@centos7 ~]# mdadm --detail /dev/md2
/dev/md2:
           Version : 1.2
     Creation Time : Thu Jul 18 00:30:30 2019
        Raid Level : raid1
        Array Size : 1046528 (1022.00 MiB 1071.64 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 2
     Total Devices : 2
       Persistence : Superblock is persistent

       Update Time : Thu Jul 18 00:35:22 2019
             State : clean
    Active Devices : 2
   Working Devices : 2
    Failed Devices : 0
     Spare Devices : 0

Consistency Policy : resync

              Name : centos7:2  (local to host centos7)
              UUID : b17e111b:e22dcf62:51b929f6:819a5c2c
            Events : 17

    Number   Major   Minor   RaidDevice State
       0       8       49        0      active sync   /dev/sdd1
       1       8       65        1      active sync   /dev/sde1
[root@centos7 ~]#

 

  • mdadm -f 옵션을 이용하여 /dev/md1 에 sdc1 디스크 장애 상황을 만듭니다. 
[root@centos7 ~]# mdadm /dev/md1 -f /dev/sdc1
mdadm: set /dev/sdc1 faulty in /dev/md1
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdc1[1](F) sdb1[0]
      1046528 blocks super 1.2 [2/1] [U_] <---- UU 에서 U_ 로 변경 되었습니다.

unused devices: <none>
[root@centos7 ~]#


마운트는 정상적으로 되어 있고 touch 로 빈파일생성도 정상적으로 됩니다.

[root@centos7 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/sda3                 18G  1.4G   17G   8% /
devtmpfs                 903M     0  903M   0% /dev
tmpfs                    912M     0  912M   0% /dev/shm
tmpfs                    912M  8.7M  904M   1% /run
tmpfs                    912M     0  912M   0% /sys/fs/cgroup
/dev/sda1               1014M  135M  880M  14% /boot
/dev/mapper/vg00-data01  2.0G   33M  2.0G   2% /data
tmpfs                    183M     0  183M   0% /run/user/0
[root@centos7 ~]# touch /data/0
[root@centos7 ~]# touch /data/1
[root@centos7 ~]# touch /data/2

 

  • 디스크 remove 의 경우 mdadm $mdX_name –remove $device_name 으로 디스크를 제거 하시면 됩니다.
[root@centos7 ~]# mdadm /dev/md1 --remove /dev/sdc1
mdadm: hot removed /dev/sdc1 from /dev/md1
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdb1[0]                <--- sdc1 Disk 가 제거된것을 확인할수 있습니다.
      1046528 blocks super 1.2 [2/1] [U_]

unused devices: <none>
[root@centos7 ~]#

 

  • 시스템 shutdown 후 Disk 를 추가 합니다. 
[root@centos7 ~]# init 0

[root@centos7 ~]# lsblk
NAME              MAJ:MIN RM  SIZE RO TYPE  MOUNTPOINT
sda                 8:0    0   20G  0 disk
├─sda1              8:1    0    1G  0 part  /boot
├─sda2              8:2    0    1G  0 part  [SWAP]
└─sda3              8:3    0   18G  0 part  /
sdb                 8:16   0    1G  0 disk
└─sdb1              8:17   0 1023M  0 part
  └─md1             9:1    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sdc                 8:32   0    1G  0 disk
└─sdc1              8:33   0 1023M  0 part
sdd                 8:48   0    1G  0 disk
└─sdd1              8:49   0 1023M  0 part
  └─md2             9:2    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sde                 8:64   0    1G  0 disk
└─sde1              8:65   0 1023M  0 part
  └─md2             9:2    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sdf                 8:80   0    1G  0 disk
sr0                11:0    1 1024M  0 rom
[root@centos7 ~]# fdisk /dev/sdf
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0xb67cc56a.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): p
Partition number (1-4, default 1):
First sector (2048-2097151, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-2097151, default 2097151):
Using default value 2097151
Partition 1 of type Linux and of size 1023 MiB is set

Command (m for help): t
Selected partition 1
Hex code (type L to list all codes): fd
Changed type of partition 'Linux' to 'Linux raid autodetect'

Command (m for help): wq
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@centos7 ~]# partprobe

 

  • raid 에 disk 추가 
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdf1[2] sdb1[0]
      1046528 blocks super 1.2 [2/1] [U_]
      [===============>.....]  recovery = 76.4% (800384/1046528) finish=0.0min speed=200096K/sec

unused devices: <none>
[root@centos7 ~]#

 

  • /dev/md1 에서 sdb1 Disk 에 장애상황을 만듭니다. 
[root@centos7 ~]# mdadm /dev/md1 -f /dev/sdb1
mdadm: set /dev/sdb1 faulty in /dev/md1
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdf1[2] sdb1[0](F)
      1046528 blocks super 1.2 [2/1] [_U]

unused devices: <none>
[root@centos7 ~]#

 

  • mount 상태 확인 및 touch 로 빈파일을 생성해 봅니다. 
[root@centos7 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/sda3                 18G  1.4G   17G   8% /
devtmpfs                 903M     0  903M   0% /dev
tmpfs                    912M     0  912M   0% /dev/shm
tmpfs                    912M  8.7M  904M   1% /run
tmpfs                    912M     0  912M   0% /sys/fs/cgroup
/dev/sda1               1014M  135M  880M  14% /boot
/dev/mapper/vg00-data01  2.0G   33M  2.0G   2% /data      <--- 정상적으로 마운트 되어 있습니다.
tmpfs                    183M     0  183M   0% /run/user/0
[root@centos7 ~]# touch /data/20

[root@centos7 ~]# touch /data/30
[root@centos7 ~]# touch /data/40

 

  • /dev/md1 에서 sdb1 디스크를 제거 합니다. 
[root@centos7 ~]# mdadm /dev/md1 --remove /dev/sdb1
mdadm: hot removed /dev/sdb1 from /dev/md1
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdf1[2]
      1046528 blocks super 1.2 [2/1] [_U]

unused devices: <none>
[root@centos7 ~]#

 

  • 시스템 종료후 disk 를 추가 하여 /dev/md1 을 복구 합니다. 
[root@centos7 ~]# init 0
[root@centos7 ~]# lsblk
NAME              MAJ:MIN RM  SIZE RO TYPE  MOUNTPOINT
sda                 8:0    0   20G  0 disk
├─sda1              8:1    0    1G  0 part  /boot
├─sda2              8:2    0    1G  0 part  [SWAP]
└─sda3              8:3    0   18G  0 part  /
sdb                 8:16   0    1G  0 disk
└─sdb1              8:17   0 1023M  0 part
sdc                 8:32   0    1G  0 disk
└─sdc1              8:33   0 1023M  0 part
sdd                 8:48   0    1G  0 disk
└─sdd1              8:49   0 1023M  0 part
  └─md2             9:2    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sde                 8:64   0    1G  0 disk
└─sde1              8:65   0 1023M  0 part
  └─md2             9:2    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sdf                 8:80   0    1G  0 disk
└─sdf1              8:81   0 1023M  0 part
  └─md1             9:1    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sdg                 8:96   0    1G  0 disk       <--- 추가된 Disk 
[root@centos7 ~]#

fdisk 부분은 생략 합니다. 


[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sdd1[0] sde1[1]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdg1[3] sdf1[2]
      1046528 blocks super 1.2 [2/1] [_U]
      [=======>.............]  recovery = 38.2% (401280/1046528) finish=0.0min speed=200640K/sec

unused devices: <none>
[root@centos7 ~]#

 

  • raid 복구후 테스트 
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sdd1[0] sde1[1]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdg1[3] sdf1[2]
      1046528 blocks super 1.2 [2/2] [UU]

unused devices: <none>
[root@centos7 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/sda3                 18G  1.4G   17G   8% /
devtmpfs                 903M     0  903M   0% /dev
tmpfs                    912M     0  912M   0% /dev/shm
tmpfs                    912M  8.7M  904M   1% /run
tmpfs                    912M     0  912M   0% /sys/fs/cgroup
/dev/sda1               1014M  135M  880M  14% /boot
/dev/mapper/vg00-data01  2.0G   33M  2.0G   2% /data
tmpfs                    183M     0  183M   0% /run/user/0
[root@centos7 ~]# cp /tmp/* /data/
cp: omitting directory ‘/tmp/systemd-private-f539feae748d4cae9178d03dceab0233-vgauthd.service-gTNVZW’
cp: omitting directory ‘/tmp/systemd-private-f539feae748d4cae9178d03dceab0233-vmtoolsd.service-ISwWEL’
[root@centos7 ~]# ls -al /data/
total 4
drwxr-xr-x   2 root root 142 Jul 18 00:54 .
dr-xr-xr-x. 18 root root 256 Jul 18 00:33 ..
-rw-r--r--   1 root root   0 Jul 18 00:41 0
-rw-r--r--   1 root root   0 Jul 18 00:41 1
-rw-r--r--   1 root root   0 Jul 18 00:48 10
-rw-r--r--   1 root root   0 Jul 18 00:50 11
-rw-r--r--   1 root root   0 Jul 18 00:50 12
-rw-r--r--   1 root root   0 Jul 18 00:50 13
-rw-r--r--   1 root root   0 Jul 18 00:41 2
-rw-r--r--   1 root root   0 Jul 18 00:49 20
-rw-r--r--   1 root root   0 Jul 18 00:49 30
-rw-r--r--   1 root root   0 Jul 18 00:49 40
-rwx------   1 root root 836 Jul 18 00:54 ks-script-r7n_b9
-rw-------   1 root root   0 Jul 18 00:54 yum.log
[root@centos7 ~]#

 

Glusterfs 는 확장이 가능한 네트워크 분산 파일 시스템 입니다.

본문서는 HA-Proxy 테스트를 위하여 2 node 를 구성 하였으며, 단순 테스트용 입니다.

자세한 자료는 아래 사이트를 확인해 보세요.

Site: https://docs.gluster.org/en/latest/

 

 

1. yum repository 생성

[root@gluster01 ~]# yum install centos-release-gluster -y
[root@gluster02 ~]# yum install centos-release-gluster -y

 

2. /etc/hosts 파일에 gluster node 를 추가 합니다.

[root@gluster01 ~]# vi /etc/hosts

10.10.10.34 gluser01
10.10.10.35 gluser02

 

3. glusterfs-server 를 설치 합니다.

[root@gluster01 ~]# yum install glusterfs-server -y
[root@gluster02 ~]# yum install glusterfs-server -y

 

4. gluster service 를 활성화 하고 실행 합니다.

[root@gluster01 ~]# systemctl enable glusterd ; systemctl start glusterd
[root@gluster02 ~]# systemctl enable glusterd ; systemctl start glusterd

 

5. glusterfs 복제 볼륨 설정

[root@gluster01 ~]# gluster peer probe gluster02
peer probe: success.
[root@gluster02 ~]# gluster peer probe gluster01

[root@gluster01 ~]# mkdir /gluster-data
[root@gluster02 ~]# mkdir /gluster-data


[root@gluster01 ~]# gluster volume create volume01 replica 2 transport tcp gluster01:/gluster-data gluster02:/gluster-data force
volume create: volume01: success: please start the volume to access data
[root@gluster01 ~]#

[root@gluster01 ~]# gluster volume start volume01
volume start: volume01: success
[root@gluster01 ~]#

[root@gluster01 ~]# gluster volume info

Volume Name: volume01
Type: Replicate
Volume ID: d078cd71-c767-4af5-b96c-e91d37451894
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: gluster01:/gluster-data
Brick2: gluster02:/gluster-data
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
[root@gluster01 ~]#

 

6. /etc/fstab 설정

[root@gluster01 ~]# mkdir /www-data
[root@gluster02 ~]# mkdir /www-data


[root@gluster01 ~]# vi /etc/fstab
[root@gluster02 ~]# vi /etc/fstab
gluster01:/volume01 /www-data glusterfs defaults,_netdev,x-systemd.automount 0 0

 

7. 공유 테스트

[root@gluster01 ~]# mount -a
[root@gluster02 ~]# mount -a

[root@gluster02 ~]# cd /www-data/
[root@gluster02 www-data]# touch 0


[root@gluster01 ~]# ls -al /www-data/
total 0
drwxr-xr-x 3 root root 33 Jun 28 13:07 .
dr-xr-xr-x. 19 root root 262 Jun 28 13:05 ..
-rw-r--r-- 1 root root 0 Jun 28 13:07 0
[root@gluster01 ~]#

 

일전 포스트에도 포스팅을 하였지만 Lightsail 을 저렴하게 사용하는 유저에게는 AWS 로드밸런서 + SSL 인증서 보다는 Let’s Encrypt 를 이용하는것이 더 좋습니다. 기본 3.5$ + 로드밸런서를 사용할경우 월18$ 의 사용요금이 발생 합니다.

작업순서는 route 53 dns 등록 -> lightsail 인스턴스 생성 -> 고정아이피 생성 + dns 생성 ->  로드밸런서 생성 -> route 53 설정 추가 -> ssl 인증서 인증 -> wp 설정 변경 순서로 진행이 됩니다.

AWS Lightsail 설정 https://lightsail.aws.amazon.com/ls/docs/ko_kr/articles/create-lightsail-load-balancer-and-attach-lightsail-instances
bitnami 설정 :https://docs.bitnami.com/aws/apps/wordpress/administration/
WP SSL 설정 :https://www.wpbeginner.com/wp-tutorials/how-to-fix-common-ssl-issues-in-wordpress-beginners-guide/

 

  • Lightsail 에서 인스턴스를 생성 합니다.

  • 기본으로 선택되어 있는 상품으로 진행 합니다.

 

  • 프리티어 사용과 별도로 사용요금이 3.5달러 지불됩니다.

 

  • 인스턴스 이름을 변경후 생성을 클릭 합니다.

 

 

  • 네트워킹을 클릭합니다.

 

  • 고정 IP 생성을 클릭합니다.
  • DNS 도메인 과 맵핑 하기 위해서는 반드시 고정 IP를 사용해야 합니다.

 

  • 고정 IP 위치를 확인후 인스턴스에 연결 에서 인스턴스를 지정합니다.

 

  • 고정 IP 확인란 에서  생성을 클릭합니다.

 

 

  • 고정IP 생성이 완료되었습니다.

 

 

  • Lightsail  에서 사용할 dns 영역을 생성 합니다.
  • DNS 영역 생성을 클릭합니다.

  • 도메인 이름을 입력합니다.

 

  • DNS 영역 생성을 클릭합니다.

  • 도메인 등록기관 에서 name server 를 지정합니다.
  • 보통은 도메인을 구입한 곳에서 name server 를 변경할수 있습니다.
  • ex) 가비아 , cafe24등

 

  • name server 를 변경합니다.

  • route 53 설정
  • DNS 관리를 클릭합니다.

 

 

  • 호스트 영역생성을 클릭합니다.

 

  • 호스팅 영역생성을 클릭합니다.

 

  • 도메인 입력후 생성을 클릭합니다.

 

  • 생성후 화면

 

  • Lightsail 에서 네트워킹 -> DNS 영역에서 관리를 클릭합니다.

 

  • 레코드 추가를 클릭합니다.

 

  • @ 도메인에 를 추가후 ip 부분에서는 고정아이피를 추가 합니다.
  • 마우스 클릭으로 고정아이피를 지정 할수 있습니다.

 

  • www 레코드를 추가 합니다.

 

  • 네트워킹 -> 로드 밸런서 생성을 클릭합니다.

 

 

  • 리전의 경우 인스턴스가 서울리전 사용시 해당 리전으로 지정합니다.
  • https 사용시 인증서를 생성 해야 하며 로드 밸런서 생성후 인증서를 생성 합니다.

 

  • 로드밸런서 이름을 지정하고 로드 밸런서 생성을 클릭합니다.
  • 로드밸런서 비용이 한달에 18$ 입니다.

 

 

  • 로드밸런서 사용할 인스턴스를 지정합니다.

 

  • 로드 밸런서 -> 인바운드 트래픽 에서 인증서 생성을 클릭합니다.

 

  • domain 및 2차 domain 을 지정 한후 생성을 클릭합니다.

 

  • 인증서 생성후 domain 인증 txt code 가 나옵니다. 메모장에 드래그 하여 메모장에 붙여 넣기 합니다.
  • 또한 인바운드 트래픽에 최상단에 있는 로드밸런서 부분도 드래그하여 메모장에 붙여 넣기 합니다.

 

  • Lightsail 에서 도메인 설정 및 route 53 에서 도메인설정을 해야 합니다.
  • Lightsail 네트워크 -> DNS 영역 에서 관리를 클릭합니다.

 

  • 기존 DNS A 레코드를 고정 IP 에서 로드밸런서로 지정 합니다.
  • CNAME 레코드를 추가 합니다.

 

  • 레코드 추가를 클릭하여 CNAME 레코드를 추가 합니다.
  • 메모장에 붙여넣기 했던 항목중 이름 부분은 하위도메인에 값 부분은 맵핑부분에 붙여 넣기 합니다.
  • _8c9b8fde94178cd4ea044d3e1dab3ee0.www.example.com. 라고 하면 www 까지 즉,
  • _8c9b8fde94178cd4ea044d3e1dab3ee0.www 까지만 붙여넣기 합니다.

 

  • Route 53 설정
  • Lightsail 로드밸런서를 Route 53 에 등록 합니다.
  • Lightsail 로드밸런서의 경우 이름을 www / 유형 A – IPv4 주소 / 별칭 예 를 선택후 별칭대상에 붙여 넣기 합니다.
  • 로드밸런서는 도메인 ex) example.com / 2차도메인 www.example.com 으로 등록 합니다.

 

  • ssl 인증서를 사용하기위하여 메모장에 복사한 내용을 CNAME 로 추가 합니다.

 

 

 

  • Route 53 domain 작업완료 예

 

  • Lightsail -> 로드 밸런서 -> 인바운드 트래픽으로 이동 합니다.
  • 별도로 잘못 설정 하지 않았다면 인증서 상태가 유효로 변경될 것입니다.
  • 생성된 인증서를 선택 합니다.

 

 

  • site 접속 테스트
  • http / https 로 접속을 해봅니다.

 

  • https 접속 테스트
  • https 의 경우 정상적으로 접속이 되지 않습니다.
  • 별도의 WP 플러그인을 사용 해야 합니다.

 

 

  • ssh 로 Lightsail 로 접속 합니다.
$ ssh -i LightsailDefaultKey-ap-northeast-2.pem bitnami@192.168.0.22
The authenticity of host '192.168.0.22 (192.168.0.22)' can't be established.
ECDSA key fingerprint is SHA256:AHwlOFx9ql8AZjjqz6Sfb8PYsQJ/FgRakbaUQVfQ69Y.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '15.164.65.11' (ECDSA) to the list of known hosts.
Welcome to Ubuntu 16.04.6 LTS (GNU/Linux 4.4.0-1081-aws x86_64)
*** System restart required ***
       ___ _ _                   _
      | _ |_) |_ _ _  __ _ _ __ (_)
      | _ \ |  _| ' \/ _` | '  \| |
      |___/_|\__|_|_|\__,_|_|_|_|_|

  *** Welcome to the Bitnami WordPress 5.1.1-2 ***
  *** Documentation:  https://docs.bitnami.com/aws/apps/wordpress/ ***
  ***                 https://docs.bitnami.com/aws/ ***
  *** Bitnami Forums: https://community.bitnami.com/ ***
To run a command as administrator (user "root"), use "sudo <command>".
See "man sudo_root" for details.


bitnami@ip-172-26-9-64:~$ sudo vi /opt/bitnami/apps/wordpress/htdocs/wp-config.php
define( 'DB_COLLATE', '' );

// 여기부터 추가 합니다.
define('FORCE_SSL_ADMIN', true);
// in some setups HTTP_X_FORWARDED_PROTO might contain
// a comma-separated list e.g. http,https
// so check for https existence
if (strpos($_SERVER['HTTP_X_FORWARDED_PROTO'], 'https') !== false)
       $_SERVER['HTTPS']='on';


설정 완료후 서비스 재시작
bitnami@ip-172-26-9-64:~$ sudo /opt/bitnami/ctlscript.sh restart

 

  • https 접속 확인

  • simple ssl WP plugin 설치합니다.

 

  • 에서 운영한 데이터 이관후 wp 작업을 하였다면 simple ssl WP plugin 설치후 wp-config.php 에 아래와 같이 추가합니다.
// 최하단에 추가
$server_opts = array("HTTP_CLOUDFRONT_FORWARDED_PROTO" => "https", "HTTP_CF_VISITOR"=>"https", "HTTP_X_FORWARDED_PROTO"=>"https", "HTTP_X_FORWARDED_SSL"=>"on", "HTTP_X_PROTO"=>"SSL", "HTTP_X_FORWARDED_SSL"=>"1");
foreach( $server_opts as $option => $value ) {
 if ((isset($_ENV["HTTPS"]) && ( "on" == $_ENV["HTTPS"] )) || (isset( $_SERVER[ $option ] ) && ( strpos( $_SERVER[ $option ], $value ) !== false )) ) {
  $_SERVER[ "HTTPS" ] = "on";
  break;
 }
}
//END Really Simple SSL

// wp-config.php  권한을 640 에서 666 으로 변경 합니다.
bitnami@ip-172-26-9-64:~$ cd /opt/bitnami/apps/wordpress/htdocs/
bitnami@ip-172-26-9-64:/opt/bitnami/apps/wordpress/htdocs$ ll
total 216
drwxrwxr-x  5 bitnami daemon  4096 Jun 25 08:04 ./
drwxr-xr-x  7 root    root    4096 Apr 30 10:50 ../
-rw-rw-r--  1 bitnami daemon   420 Nov 30  2017 index.php
-rw-rw-r--  1 bitnami daemon 19935 Jun 25 07:51 license.txt
-rw-rw-r--  1 bitnami daemon  7447 Jun 25 07:51 readme.html
-rw-rw-r--  1 bitnami daemon  6919 Jan 12 06:41 wp-activate.php
drwxrwxr-x  9 bitnami daemon  4096 Jun 25 07:51 wp-admin/
-rw-rw-r--  1 bitnami daemon   369 Nov 30  2017 wp-blog-header.php
-rw-rw-r--  1 bitnami daemon  2283 Jan 21 01:34 wp-comments-post.php
-rw-r-----  1 bitnami daemon  4997 Jun 25 08:04 wp-config.php
-rw-rw-r--  1 daemon  daemon  2898 Jun 25 07:51 wp-config-sample.php
drwxrwxr-x  7 bitnami daemon  4096 Jun 25 07:49 wp-content/
-rw-rw-r--  1 bitnami daemon  3847 Jan  9 08:37 wp-cron.php
drwxrwxr-x 20 bitnami daemon 12288 Jun 25 07:51 wp-includes/
-rw-rw-r--  1 bitnami daemon  2502 Jan 16 05:29 wp-links-opml.php
-rw-rw-r--  1 bitnami daemon  3306 Nov 30  2017 wp-load.php
-rw-rw-r--  1 bitnami daemon 39551 Jun 25 07:51 wp-login.php
-rw-rw-r--  1 bitnami daemon  8403 Nov 30  2017 wp-mail.php
-rw-rw-r--  1 bitnami daemon 18962 Jun 25 07:51 wp-settings.php
-rw-rw-r--  1 bitnami daemon 31085 Jan 16 16:51 wp-signup.php
-rw-rw-r--  1 bitnami daemon  4764 Nov 30  2017 wp-trackback.php
-rw-rw-r--  1 bitnami daemon  3068 Aug 17  2018 xmlrpc.php
bitnami@ip-172-26-9-64:/opt/bitnami/apps/wordpress/htdocs$ chmod 666 wp-config.php

 

 

  • simple ssl 플러그인을 실행 합니다.

 

  • http -> https 리다이렉트는 별도로 설정해줄 필요가 없습니다.

 

최근 들어 Lightsail 관련 작업을 종종하고 있습니다.

AWS 접속시 가장 편하게 접속 하는 방법을 소개 합니다.

Linux 사용시는 터미널에서 하시면 되며 Windows 사용시에는 Git-Bash 로 이용하여 접속 합니다.

git bash 는 다음 링크에서 다운받으실수 있습니다.

https://git-scm.com/download/win

 

  • ssh 키파을 다운 합니다.
  • 상단에 계정을 클릭 합니다.

 

  • Lightsail ssh key 파일을 다운합니다.
  • 인스턴스가 있는 리전의 key파일을 다운 받습니다.

 

  • Git-Bash 를 실행 합니다.
  • ssh -i 옵션으로 다룬로드 받은 키파일의 위치를 지정 합니다. bitnami 이용시 bitnami 유저로 로그인을합니다.

  • 로드밸런서 이용시 ip 로 접속해야 합니다.

Lightsail 을 사용 하여 WordPress 를 운영할 경우 Let’s encrypt 인증서를 간편하게 사용 할수 있습니다.

정석적인 방벙으로는 Lightsail 로드밸런서를 붙여도 되지만 가격이 많이 나가기 때문에 추천 하지 않습니다.

 

  • 명령어 형식은 다음과 같습니다. 
  • example.com 에 도메인 넣어 주고 2차 도메인은 -d 옵션을 사용하여 아래와 같이 넣어 줍니다. 
  • /opt/bitnami/letsencrypt/scripts/generate-certificate.sh -m test-user@gmail.com -d example.com -d www.example.com
  • bitnami apache 서비스가 재기동 되기 때문에 별도로 작업을 해줄 필요는 없습니다. 
  • 기본설치된 스크립트
root@ip-172-26-14-40:~# /opt/bitnami/letsencrypt/scripts/generate-certificate.sh -m test-user@gmail.com -d example.com -d www.example.com

This tool will now stop the web server and configure the required SSL certificate. It will also start it again once finished.

When supplying multiple domains, Lego creates a SAN (Subject Alternate Names) certificate which results in only one certificate
under the email "test-user@gmail.com" valid for all domains you entered ("example.com www.example.com").

The first domain in your list ("example.com") will be added as the "CommonName" of the certificate and the rest will be added
as "DNSNames" to the SAN extension  within the certificate

Do you want to continue? [y/n]: y
Unmonitored apache
Syntax OK
/opt/bitnami/apache2/scripts/ctl.sh : httpd stopped
2019/06/24 07:36:11 No key found for account test-user@gmail.com. Generating a P384 key.
2019/06/24 07:36:11 Saved key to /opt/bitnami/letsencrypt/accounts/acme-v02.api.letsencrypt.org/test-user@gmail.com/keys/test-user@gmail.com.key
2019/06/24 07:36:12 Please review the TOS at https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf
Do you accept the TOS? Y/n
y
2019/06/24 07:36:21 [INFO] acme: Registering account for test-user@gmail.com
!!!! HEADS UP !!!!

                Your account credentials have been saved in your Let's Encrypt
                configuration directory at "/opt/bitnami/letsencrypt/accounts".
                You should make a secure backup of this folder now. This
                configuration directory will also contain certificates and
                private keys obtained from Let's Encrypt so making regular
                backups of this folder is ideal.2019/06/24 07:36:21 [INFO] [example.com, www.example.com] acme: Obtaining bundled SAN certificate
2019/06/24 07:36:22 [INFO] [example.com] AuthURL: https://acme-v02.api.letsencrypt.org/acme/authz/iDs2het-u8P9UW7tnlyReFHM8VmVhx5NNy0zqBCW5cQ
2019/06/24 07:36:22 [INFO] [www.example.com] AuthURL: https://acme-v02.api.letsencrypt.org/acme/authz/E_aGWBPUekKDlXKKhZr7fZCWcv5M9938J35vgsdKXRE
2019/06/24 07:36:22 [INFO] [example.com] acme: use tls-alpn-01 solver
2019/06/24 07:36:22 [INFO] [www.example.com] acme: use tls-alpn-01 solver
2019/06/24 07:36:22 [INFO] [example.com] acme: Trying to solve TLS-ALPN-01
2019/06/24 07:36:28 [INFO] [example.com] The server validated our request
2019/06/24 07:36:28 [INFO] [www.example.com] acme: Trying to solve TLS-ALPN-01
2019/06/24 07:36:35 [INFO] [www.example.com] The server validated our request
2019/06/24 07:36:35 [INFO] [example.com, www.example.com] acme: Validations succeeded; requesting certificates
2019/06/24 07:36:36 [INFO] [example.com] Server responded with a certificate.
Syntax OK
/opt/bitnami/apache2/scripts/ctl.sh : httpd started at port 80
Monitored apache

Congratulations, the generation and configuration of your SSL certificate finished properly.

You can now configure a cronjob to renew it every month.

Do you want to proceed? [y/n]: y
root@ip-172-26-14-40:~#

root@ip-172-26-14-40:~$ crontab -l
0 0 1 * * sudo /opt/bitnami/letsencrypt/lego --path="/opt/bitnami/letsencrypt" --tls --email="test-user@gmail.com"  --domains=example.com --domains=www.example.com renew && sudo /opt/bitnami/apache2/bin/httpd -f /opt/bitnami/apache2/conf/httpd.conf -k graceful
root@ip-172-26-14-40:~$

 

  • web site 확인

Kimchi is an HTML5 based management tool for KVM. It is designed to make it as easy as possible to get started with KVM and create your first guest.

kimchi-project site: https://github.com/kimchi-project

kvm 설치 : http://dev.crois.net/2017/11/26/system-centos7-kvm-install/

kvm bridge: http://dev.crois.net/2019/05/30/centos-bridge-%EC%84%A4%EC%A0%95/

  • 패키지 설치 및 daemon start
[root@kvm-server01 ~]# wget https://github.com/kimchi-project/kimchi/releases/download/2.5.0/wok-2.5.0-0.el7.centos.noarch.rpm
[root@kvm-server01 ~]# wget https://github.com/kimchi-project/kimchi/releases/download/2.5.0/kimchi-2.5.0-0.el7.centos.noarch.rpm
[root@kvm-server01 ~]# yum install wok-2.5.0-0.el7.centos.noarch.rpm
[root@kvm-server01 ~]# yum install -y kimchi-2.5.0-0.el7.centos.noarch.rpm
[root@kvm-server01 ~]# sed -i 's/^#session_timeout = .*/session_timeout = 1440/g' /etc/wok/wok.conf
[root@kvm-server01 ~]# systemctl enable wokd
[root@kvm-server01 ~]# systemctl start wokd

 

  • 8001 port 오픈 확인
[root@kvm-server01 ~]# netstat -antp |grep 8001
tcp        0      0 0.0.0.0:8001            0.0.0.0:*               LISTEN      28498/nginx: master
[root@kvm-server01 ~]#

 

  • web연결 (https://kvm-service01:8001)
  • root 유저로 로그인 합니다.

 

  • KVM 관리
  • Virtuallization 으로 이동 합니다.

 

kubernetes install

주의!!! 본 문서는 작성중인 문서 이며, 내용이 완벽하게 정리 되지 않았습니다.

단순참고 부탁 드립니다.

참고 사이트 :

https://www.howtoforge.com/tutorial/centos-kubernetes-docker-cluster/

https://kubernetes.io/docs/setup/cri/

https://juejin.im/post/5caea3ffe51d456e79545c32

https://cloud.tencent.com/developer/article/1409419

https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/

 

 

  • 모든 node 에서 작업
  • hosts파일 설정
[root@k8s-all-node ~]# vi /etc/hosts
10.10.10.27     k8s-master
10.10.10.28     k8s-node01
10.10.10.29     k8s-node02

 

  • SELINUX Disable
[root@k8s-all-node ~]# vi /etc/selinux/config

# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled

 

  • firewalld disable
[root@k8s-all-node ~]# iptables -L
Chain INPUT (policy ACCEPT)
target     prot opt source               destination         

Chain FORWARD (policy ACCEPT)
target     prot opt source               destination         

Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination         
[root@k8s-all-node ~]#

 

  • sysctl.conf 설정
[root@k8s-all-node ~]# cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF


[root@k8s-all-node ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
[root@k8s-all-node ~]#

 

  • swap off
[root@k8s-all-node ~]# swapoff -a

[root@k8s-all-node ~]# vi /etc/fstab
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
UUID=d7bb5d3b-5b37-47e0-8c26-fe40f7311597 /                       xfs     defaults        0 0
UUID=43ec35ea-2e35-46f1-864c-b13603a8acac /boot                   xfs     defaults        0 0
#UUID=2de336ec-4a33-36r1-8w2s-asdf2342ccgg swap                   swap     defaults        0 0

 

[root@k8s-all-node ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-all-node ~]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
[root@k8s-all-node ~]# yum list docker-ce --showduplicates | sort -r
[root@k8s-all-node ~]# yum install -y docker-ce-18.06.3.ce

[root@k8s-all-node ~]# mkdir /etc/docker

# cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF


[root@k8s-all-node ~]# mkdir -p /etc/systemd/system/docker.service.d
[root@k8s-all-node ~]# systemctl daemon-reload
[root@k8s-all-node ~]# systemctl restart docker

 

  • kubernetes install & system rebooting
[root@k8s-all-node ~]# vi /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
        https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

[root@k8s-all-node ~]# yum install -y kubelet kubeadm kubectl
[root@k8s-all-node ~]# init 6

[root@k8s-all-node ~]# systemctl start docker ; systemctl enable docker
[root@k8s-all-node ~]# systemctl start kubelet ; systemctl enable kubelet

 

  • k8s-master only
  • coredns 의 경우 network add-on 설치후 Running 으로 상태가 바뀝니다.
[root@k8s-master ~]# kubeadm init --apiserver-advertise-address=10.10.10.27 --pod-network-cidr=20.20.0.0/16
~중략

[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.10.10.27:6443 --token syojz8.svxybs8x0f3iy28a \
    --discovery-token-ca-cert-hash sha256:b28c6474e92e2bc87e8f7b470119e506df36ae6ae08a8f50dd070f5d714a28e1
[root@k8s-master ~]#

[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE     VERSION
k8s-master   Ready    master   2m22s   v1.14.1
[root@k8s-master ~]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   coredns-fb8b8dccf-c9hvh              0/1     Pending   0          78s
kube-system   coredns-fb8b8dccf-hmt6w              0/1     Pending   0          78s
kube-system   etcd-k8s-master                      1/1     Running   0          41s
kube-system   kube-apiserver-k8s-master            1/1     Running   0          42s
kube-system   kube-controller-manager-k8s-master   1/1     Running   0          31s
kube-system   kube-proxy-92c9h                     1/1     Running   0          78s
kube-system   kube-scheduler-k8s-master            1/1     Running   0          16s
[root@k8s-master ~]#

 

[root@k8s-master ~]# kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
[root@k8s-master ~]# kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml

[root@k8s-master ~]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   calico-node-r72sb                    2/2     Running   0          38s
kube-system   coredns-fb8b8dccf-c9hvh              0/1     Running   0          4m15s
kube-system   coredns-fb8b8dccf-hmt6w              0/1     Running   0          4m15s
kube-system   etcd-k8s-master                      1/1     Running   0          3m38s
kube-system   kube-apiserver-k8s-master            1/1     Running   0          3m39s
kube-system   kube-controller-manager-k8s-master   1/1     Running   0          3m28s
kube-system   kube-proxy-92c9h                     1/1     Running   0          4m15s
kube-system   kube-scheduler-k8s-master            1/1     Running   0          3m13s
[root@k8s-master ~]#

 

  • k8s-master 에서 확인
[root@k8s-master ~]# kubeadm token create --print-join-command
kubeadm join 10.10.10.27:6443 --token eq8odd.rxcfznxvepos1pg8     --discovery-token-ca-cert-hash sha256:aa3949ebeec315e5d303a18fc049c33a89a9110d8bdec0a93f3c065dcb78c689 
[root@k8s-master ~]#

 

  • k8s-node01 / k8s-node02 에서 작업
[root@k8s-node01 ~]# kubeadm join 10.10.10.27:6443 --token \
 eq8odd.rxcfznxvepos1pg8     --discovery-token-ca-cert-hash sha256:aa3949ebeec315e5d303a18fc049c33a89a9110d8bdec0a93f3c065dcb78c689

[root@k8s-node02 ~]# kubeadm join 10.10.10.27:6443 --token \
eq8odd.rxcfznxvepos1pg8     --discovery-token-ca-cert-hash sha256:aa3949ebeec315e5d303a18fc049c33a89a9110d8bdec0a93f3c065dcb78c689

 

  • k8s-master 에서 확인
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE     VERSION
k8s-master   Ready    master   8m43s   v1.14.3
k8s-node01   Ready    <none>   73s     v1.14.3
k8s-node02   Ready    <none>   62s     v1.14.3
[root@k8s-master ~]#

 

  • dash-board 설치 를 위하여 인증서를 생성 합니다.
[root@k8s-master ~]# mkdir /root/certs
[root@k8s-master ~]# cd /root/certs
[root@k8s-master certs]# openssl genrsa -des3 -passout pass:x -out dashboard.pass.key 2048
[root@k8s-master certs]# openssl rsa -passin pass:x -in dashboard.pass.key -out dashboard.key
[root@k8s-master certs]# openssl req -new -key dashboard.key -out dashboard.csr
[root@k8s-master certs]# openssl x509 -req -sha256 -days 365 -in dashboard.csr -signkey dashboard.key -out dashboard.crt

 

  • dash-board 를 설치 합니다.
[root@k8s-master ~]# kubectl create secret generic kubernetes-dashboard-certs --from-file=/root/certs -n kube-system
[root@k8s-master ~]# kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml

 

  • dash-borad 설정을 변경합니다.
[root@k8s-master ~]# kubectl edit service kubernetes-dashboard -n kube-system
#   type: ClusterIP    <--  부분을 NodePort 으로 변경

# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: Service
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"k8s-app":"kubernetes-dashboard"},"name":"kubernetes-dashboard","namespace":"kube-system"},"spec":{"ports":[{"port":443,"targetPort":8443}],"selector":{"k8s-app":"kubernetes-dashboard"}}}
  creationTimestamp: "2019-06-12T07:41:01Z"
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
  resourceVersion: "2224"
  selfLink: /api/v1/namespaces/kube-system/services/kubernetes-dashboard
  uid: 6cb7d772-8ce5-11e9-ad2b-525400fce674
spec:
  clusterIP: 10.108.72.190
  ports:
  - port: 443
    protocol: TCP
    targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
  sessionAffinity: None
#  type: ClusterIP    <--  변경전
  type: NodePort      <--  변경후 
status:
  loadBalancer: {}

 

  • dashboard 상태 확인 및 접속 정보 확인
  • 443:30906/TCP  로 맵핑되어 있습니다.
[root@k8s-master ~]#  kubectl get pods -n kube-system
NAME                                    READY   STATUS    RESTARTS   AGE
calico-node-8mvl8                       2/2     Running   0          15m
calico-node-br9sw                       2/2     Running   0          15m
calico-node-r72sb                       2/2     Running   0          18m
coredns-fb8b8dccf-c9hvh                 1/1     Running   0          22m
coredns-fb8b8dccf-hmt6w                 1/1     Running   0          22m
etcd-k8s-master                         1/1     Running   0          21m
kube-apiserver-k8s-master               1/1     Running   0          21m
kube-controller-manager-k8s-master      1/1     Running   0          21m
kube-proxy-6t9vw                        1/1     Running   0          15m
kube-proxy-8vw5v                        1/1     Running   0          15m
kube-proxy-92c9h                        1/1     Running   0          22m
kube-scheduler-k8s-master               1/1     Running   0          21m
kubernetes-dashboard-5f7b999d65-t88x2   1/1     Running   0          3m56s
[root@k8s-master ~]# 

[root@k8s-master ~]# kubectl get service -n kube-system
NAME                   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE
calico-typha           ClusterIP   10.101.41.222   <none>        5473/TCP                 20m
kube-dns               ClusterIP   10.96.0.10      <none>        53/UDP,53/TCP,9153/TCP   23m
kubernetes-dashboard   NodePort    10.108.72.190   <none>        443:30906/TCP            5m5s
[root@k8s-master ~]#

 

  • dash-borad 계정생성
[root@k8s-master ~]# kubectl create serviceaccount cluster-admin-dashboard-sa
[root@k8s-master ~]# kubectl create clusterrolebinding cluster-admin-dashboard-sa --clusterrole=cluster-admin --serviceaccount=default:cluster-admin-dashboard-sa

 

  • dash-borad 토큰값 생성
[root@k8s-master ~]# kubectl get secret $(kubectl get serviceaccount cluster-admin-dashboard-sa -o jsonpath="{.secrets[0].name}") -o jsonpath="{.data.token}" | base64 --decode
eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImNsdXN0ZXItYWRtaW4tZGFzaGJvYXJkLXNhLXRva2VuLWNzZ3A4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImNsdXN0ZXItYWRtaW4tZGFzaGJvYXJkLXNhIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNGFkYjU3Y2QtOGNlNi0xMWU5LWFkMmItNTI1NDAwZmNlNjc0Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6Y2x1c3Rlci1hZG1pbi1kYXNoYm9hcmQtc2EifQ.E_T09ftzrV_68Ie0nuthJ1yjFeNByeok87x3F653dB9Pt0a7n6hWGOZsiCUaU0mevm56kl2QUgzV5J-waNvr5Fv4IZ5NMmId_XfIGWlsul2P6y4wag96DuG65K1T2DwoGix4GO8a1p7HISOQ0knxr0OVMOjXRLcOXUov3h3Mv87T-O1gjVIUHAMvB70aZK1ScBaULegqzQbHwjpRc7FFOKUQB4HANJ6gw1asMF4yw0M_dF3GK16GaCxxKEW6rQWGrdN_TNB2nIXKgKqfqHS_35o02yYd2_cU3TDZ14xGl7F2zSVJxzB99ftyC6pwquPF3y3qhXeUFNU0tyCyxKUrWQ
[root@k8s-master ~]#

 

  • dash-borad 접속 ( https://10.10.10.27:30906/#!/login )
  • 안전하지 않음(으)로 이동을 클릭 합니다.

 

  • 토큰정보를 입력 합니다.

 

  • dash-board 화면

 

  • k8s Testing — 작성중
[root@k8s-master ~]# kubectl create deployment nginx --image=nginx
[root@k8s-master ~]# kubectl describe deployment nginx
Name:                   nginx
Namespace:              default
CreationTimestamp:      Fri, 03 May 2019 00:28:11 +0900
Labels:                 app=nginx
Annotations:            deployment.kubernetes.io/revision: 1
Selector:               app=nginx
Replicas:               1 desired | 1 updated | 1 total | 0 available | 1 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=nginx
  Containers:
   nginx:
    Image:        nginx
    Port:         <none>
    Host Port:    <none>
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      False   MinimumReplicasUnavailable
  Progressing    True    ReplicaSetUpdated
OldReplicaSets:  <none>
NewReplicaSet:   nginx-65f88748fd (1/1 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  18s   deployment-controller  Scaled up replica set nginx-65f88748fd to 1
[root@k8s-master ~]#


[root@k8s-master ~]# kubectl create service nodeport nginx --tcp=80:80


[root@k8s-master ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP        6m33s
nginx        NodePort    10.102.109.228   <none>        80:30187/TCP   21s
[root@k8s-master ~]#

 

  • nginx 확인
[root@k8s-master ~]#  curl k8s-node01:30187
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@k8s-master ~]#

 

  • pods scale
최초 생성시 1개의 pods 입니다. 

[root@k8s-master ~]# kubectl get pods
NAME                     READY   STATUS    RESTARTS   AGE
nginx-65f88748fd-8lqrb   1/1     Running   0          5m12s
[root@k8s-master ~]#

pods 을 5개로 늘립니다. 
[root@k8s-master ~]# kubectl scale --replicas=5 deployment/nginx
deployment.extensions/nginx scaled


[root@k8s-master ~]# kubectl get pods
NAME                     READY   STATUS              RESTARTS   AGE
nginx-65f88748fd-6v7n5   1/1     Running             0          13s
nginx-65f88748fd-86svl   0/1     ContainerCreating   0          13s
nginx-65f88748fd-8lqrb   1/1     Running             0          12m
nginx-65f88748fd-pq8p8   0/1     ContainerCreating   0          13s
nginx-65f88748fd-w4tq8   0/1     ContainerCreating   0          13s
[root@k8s-master ~]#

 

  • pod 삭제
[root@k8s-master ~]# kubectl delete deployment/nginx

삭제 확인 
[root@k8s-master ~]# kubectl get pods -o wide
No resources found.