본문서는 작성중인 문서 입니다.  간단하게 nginx-proxy 와 공유디렉토리를 이용하여 wordpress 를 테스트 할수 있습니다.

Test 환경 kvm 에서 Centos7 Version vm 을 3대 준비 합니다.

Nginx-proxy 1대 / LEMP Stack 2대를 구성 합니다.

도메인은 임시로 test.com 도메인을 준비 하였습니다.

구성은 nginx-proxy + LEMP Stack + glusterfs 이며 app 는 WP 가 올라 갑니다.

  • Nginx-proxy 구성도

nginx-proxy

epel-release 패키지 설치 및 nginx 설치

[root@nginx-proxy ~]# yum install epel-release -y

[root@nginx-proxy ~]# vi /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
enabled=1
[root@nginx-proxy ~]# yum install -y nginx

 

VM nginx-proxy

nginx 설정

[root@nginx-proxy ~]# cd /etc/nginx/conf.d/
[root@nginx-proxy conf.d]# cp default.conf default.conf.org


[root@nginx-proxy conf.d]# cat default.conf
server {
    listen       80;
    server_name  test.com;
    location / {
        rewrite ^/(/.*)$ $1 break; 
        proxy_pass http://test.com;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_redirect off;
    }
}
upstream test.com {
    server 10.10.10.11:80;
    server 10.10.10.22:80;
}

[root@nginx-proxy conf.d]# systemctl enable nginx ; systemctl start nginx

 

VM nginx-www1 / nginx-www2 에서 작업

NGINX , PHP 7.1 을 설치 합니다.

[root@nginx-www1 ~]# vi /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
enabled=1

[root@nginx-www1 ~]# yum install -y nginx
[root@nginx-www1 ~]# yum install -y epel-release yum-utils
[root@nginx-www1 ~]# rpm -Uvh http://ftp.riken.jp/Linux/remi/enterprise/remi-release-7.rpm
[root@nginx-www1 ~]# yum clean all && yum list
[root@nginx-www1 ~]# yum-config-manager --enable remi-php71
[root@nginx-www1 ~]# yum -y install php php-mysql php-fpm php-opcache php-gd php-ldap \
php-odbc php-pear php-xml php-xmlrpc php-mbstring php-soap curl curl-devel

 

nginx 설정 및 php-fpm 설정

테스트 도메인은 내부 dns 를 구성하여 test.com 으로 생성 하였습니다.

test.com 은 도메인 아이피를 nginx-proxy 로 설정 합니다.

[root@nginx-www1 ~]# mkdir /etc/nginx/sites-enabled
[root@nginx-www1 ~]# vi /etc/nginx/nginx.conf
user  nginx;
worker_processes  1;

error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}


http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    keepalive_timeout  65;

    #gzip  on;

    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*.conf;
}


[root@nginx-www1 ~]# vi /etc/php-fpm.d/www.conf

user = nginx
group = nginx

listen.owner = nginx
listen.group = nginx
listen.mode = 0660
[root@nginx-www1 ~]# systemctl enable nginx ; systemctl start nginx
[root@nginx-www1 ~]# systemctl enable php-fpm ; systemctl start php-fpm

 

glusterfs 설치

VM nginx-www1 / nginx-www2 에서 작업

/etc/hosts 파일 수정

[root@nginx-www1 ~]# vi /etc/hosts
10.10.10.11     www1
10.10.10.22     www2
10.10.10.33     db01

 

glusterfs 설치

[root@nginx-www1 ~]# yum install centos-release-gluster -y
[root@nginx-www1 ~]# yum install glusterfs-server -y
[root@nginx-www1 ~]# systemctl enable glusterd ; systemctl start glusterd

 

gluster 공유 디렉토리 생성

[root@nginx-www1 ~]# gluster peer probe www2
peer probe: success.
[root@nginx-www2 ~]# gluster peer probe www1



[root@nginx-www1 ~]# mkdir /gluster-storage
[root@nginx-www2 ~]# mkdir /gluster-storage


[root@nginx-www1 ~]# gluster volume create volume01 replica 2 transport tcp www1:/gluster-storage www2:/gluster-storage force
volume create: volume01: success: please start the volume to access data

[root@nginx-www1 ~]# gluster volume start volume01
volume start: volume01: success
[root@nginx-www1 ~]# gluster volume info

Volume Name: volume01
Type: Replicate
Volume ID: b24c3e2b-f458-4733-9bc0-38d9bd441bb6
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: www1:/gluster-storage
Brick2: www2:/gluster-storage
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
[root@nginx-www1 ~]#

[root@nginx-www1 ~]# mkdir -p /var/www/html/test.com/{public_html,logs}

[root@nginx-www1 ~]# vi /etc/fstab

~중략
www1:/volume01  /var/www/html/test.com/public_html glusterfs defaults,_netdev,x-systemd.automount 0 0

[root@nginx-www1 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda3        18G  1.6G   17G   9% /
devtmpfs        1.9G     0  1.9G   0% /dev
tmpfs           1.9G     0  1.9G   0% /dev/shm
tmpfs           1.9G  8.6M  1.9G   1% /run
tmpfs           1.9G     0  1.9G   0% /sys/fs/cgroup
/dev/vda1      1014M  215M  800M  22% /boot
tmpfs           379M     0  379M   0% /run/user/0
www1:/volume01   18G  1.8G   17G  10% /var/www/html/test.com/public_html
[root@nginx-www1 ~]#

 

test.com nginx 설정

[root@nginx-www1 ~]# vi /etc/nginx/sites-enabled/test_com.conf
server {
    listen       80;
    server_name  www.test.com test.com;
    root   /var/www/html/test.com/public_html;
    index  index.php index.html index.htm;
    location / {
        try_files $uri $uri/ /index.php?$query_string;
        autoindex on;
    }

    access_log  /var/www/html/test.com/logs/access.log;
    error_log  /var/www/html/test.com/logs/error.log warn;

    error_page   500 502 503 504  /50x.html;
    location = /50x.html {
    }

    location ~ \.php$ {
        try_files $uri =404;
        fastcgi_pass 127.0.0.1:9000;
        fastcgi_split_path_info ^(.+\.php)(/.+)$;
        fastcgi_index index.php;
        fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
        include fastcgi_params;
    }
}

 

nginx 데몬 재시작 및 phpinfo 확인

[root@nginx-www1 ~]# systemctl restart nginx
[root@nginx-www1 ~]# vi /var/www/html/test.com/public_html/info.php
<?php phpinfo(); ?>

 

www2 시스템 에서 test.com public_html 디렉토리 확인시 정상적으로 info.php 를 확인 할수 있습니다.

[root@nginx-www2 ~]# ls -al /var/www/html/test.com/public_html/
total 1
drwxr-xr-x 3 nginx nginx 40 Jul 26 14:37 .
drwxr-xr-x 4 nginx nginx 37 Jul 26 14:33 ..
-rw-r--r-- 1 root  root  20 Jul 26 14:37 info.php
[root@nginx-www2 ~]#

 

phpinfo 확인

 

Mariadb 10.1 설치

별도의 vm 에 db 를 설치 합니다.

[root@nginx-mariadb01 ~]# vi /etc/yum.repos.d/mariadb.repo
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.1/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1


[root@nginx-mariadb01 ~]# yum install -y mariadb mariadb-server
[root@nginx-mariadb01 ~]# systemctl start mariadb ; systemctl enable mariadb

 

mysql_secure_installation 을 실행 합니다.

[root@nginx-mariadb01 ~]# /usr/bin/mysql_secure_installation

NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB
      SERVERS IN PRODUCTION USE!  PLEASE READ EACH STEP CAREFULLY!

In order to log into MariaDB to secure it, we'll need the current
password for the root user.  If you've just installed MariaDB, and
you haven't set the root password yet, the password will be blank,
so you should just press enter here.

Enter current password for root (enter for none):
OK, successfully used password, moving on...

Setting the root password ensures that nobody can log into the MariaDB
root user without the proper authorisation.

Set root password? [Y/n] y
New password:
Re-enter new password:
Password updated successfully!
Reloading privilege tables..
 ... Success!


By default, a MariaDB installation has an anonymous user, allowing anyone
to log into MariaDB without having to have a user account created for
them.  This is intended only for testing, and to make the installation
go a bit smoother.  You should remove them before moving into a
production environment.

Remove anonymous users? [Y/n] y
 ... Success!

Normally, root should only be allowed to connect from 'localhost'.  This
ensures that someone cannot guess at the root password from the network.

Disallow root login remotely? [Y/n] y
 ... Success!

By default, MariaDB comes with a database named 'test' that anyone can
access.  This is also intended only for testing, and should be removed
before moving into a production environment.

Remove test database and access to it? [Y/n] y
 - Dropping test database...
 ... Success!
 - Removing privileges on test database...
 ... Success!

Reloading the privilege tables will ensure that all changes made so far
will take effect immediately.

Reload privilege tables now? [Y/n] y
 ... Success!

Cleaning up...

All done!  If you've completed all of the above steps, your MariaDB
installation should now be secure.

Thanks for using MariaDB!
[root@nginx-mariadb01 ~]#

 

character set 설정

[root@nginx-mariadb01 ~]# vi /etc/my.cnf.d/server.cnf
[mysqld]
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci

[root@nginx-mariadb01 ~]# vi /etc/my.cnf.d/client.cnf
[client]
default-character-set = utf8mb4

 

character set 확인

[root@nginx-mariadb01 ~]#  systemctl restart mariadb
[root@nginx-mariadb01 ~]# mysql -uroot -p
Enter password:
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 2
Server version: 10.1.40-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> status;
--------------
mysql  Ver 15.1 Distrib 10.1.40-MariaDB, for Linux (x86_64) using readline 5.1

Connection id:          2
Current database:
Current user:           root@localhost
SSL:                    Not in use
Current pager:          stdout
Using outfile:          ''
Using delimiter:        ;
Server:                 MariaDB
Server version:         10.1.40-MariaDB MariaDB Server
Protocol version:       10
Connection:             Localhost via UNIX socket
Server characterset:    utf8mb4
Db     characterset:    utf8mb4
Client characterset:    utf8mb4
Conn.  characterset:    utf8mb4
UNIX socket:            /var/lib/mysql/mysql.sock
Uptime:                 21 sec

Threads: 1  Questions: 4  Slow queries: 0  Opens: 17  Flush tables: 1  Open tables: 11  Queries per second avg: 0.190
--------------

MariaDB [(none)]> quit;
Bye
[root@nginx-mariadb01 ~]#

 

WordPress  에서 사용할 DB 를 생성 합니다.

[root@nginx-mariadb01 ~]# mysql -uroot -p
Enter password:
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 3
Server version: 10.1.40-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> create database wp;
Query OK, 1 row affected (0.00 sec)

MariaDB [(none)]> GRANT ALL ON wp.* TO 'wp'@'%' IDENTIFIED BY 'password';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> quit;
Bye
[root@nginx-mariadb01 ~]#

 

www1 / www2 에 hosts 파일을 수정 합니다.

wordpress 설치 파일은 www1 에서만 작업 합니다.

[root@nginx-www1 ~]# vi /etc/hosts

10.10.10.11     www1
10.10.10.22     www2
10.10.10.33     db01

 

WordPress 설치

[root@nginx-www1 ~]# cd /var/www/html/test.com/public_html/
[root@nginx-www1 public_html]# wget https://wordpress.org/latest.tar.gz
[root@nginx-www1 public_html]# tar xvf latest.tar.gz
[root@nginx-www1 public_html]# cd wordpress/
[root@nginx-www1 wordpress]# mv * ../
[root@nginx-www1 public_html]# rm -rf wordpress/

 

test.com 사이트에 접속하여 워드프레스를 설치 합니다.

 

database 정보 입력

 

Run installation

 

test.com site 정보 입력

 

워드프레스 설치가 완료 되었습니다.

 

test.com 으로 접속시 nginx-proxy 를 통하여 www1 / www2 로 접속을 합니다.

 

로그 확인시 www1 / www2 에 한번씩 접속 로그가 생성 됩니다.

[CentOS7] Redis 설치 및 php연동

LEMP Stack 의 경우 링크 사이트를 참고해 주세요. http://docs.crois.net/linux/linux/#lemp-stack   

LEMP Stack 이후 부터 설치 하시면 됩니다.

Source 로 설치 하는 방법도 있지만 간단하게 yum 으로 설치 하는 방법을 기술 합니다.

Redis 의 자세한 내용은 차후 정리 하도록 하겠습니다.

 

  • Redis 설치
[root@centos-nginx ~]# yum install -y redis php71-php-pecl-redis php71-php-phpiredis php-redis

 

  • 설치확인
[root@centos-nginx ~]# php -i |grep redis
/etc/php.d/50-redis.ini
redis
redis.arrays.algorithm => no value => no value
redis.arrays.auth => no value => no value
redis.arrays.autorehash => 0 => 0
redis.arrays.connecttimeout => 0 => 0
redis.arrays.consistent => 0 => 0
redis.arrays.distributor => no value => no value
redis.arrays.functions => no value => no value
redis.arrays.hosts => no value => no value
redis.arrays.index => 0 => 0
redis.arrays.lazyconnect => 0 => 0
redis.arrays.names => no value => no value
redis.arrays.pconnect => 0 => 0
redis.arrays.previous => no value => no value
redis.arrays.readtimeout => 0 => 0
redis.arrays.retryinterval => 0 => 0
redis.clusters.auth => no value => no value
redis.clusters.cache_slots => 0 => 0
redis.clusters.persistent => 0 => 0
redis.clusters.read_timeout => 0 => 0
redis.clusters.seeds => no value => no value
redis.clusters.timeout => 0 => 0
redis.pconnect.connection_limit => 0 => 0
redis.pconnect.pooling_enabled => 1 => 1
redis.session.lock_expire => 0 => 0
redis.session.lock_retries => 10 => 10
redis.session.lock_wait_time => 2000 => 2000
redis.session.locking_enabled => 0 => 0
Registered save handlers => files user redis rediscluster
This program is free software; you can redistribute it and/or modify
[root@centos-nginx ~]#

 

  • php-fpm 데몬 재시작
[root@centos-nginx ~]# systemctl restart php-fpm

 

  • phpinfo 페이지 확인

 

개인적으로 운영하는 ftp 를 lvm 으로 묶어 Storage 형태로 만들어 사용 하고 있습니다.

한 3~4년 주기로 깨지다 보니 그때마다 lvm 복구 작업을 종종 합니다.

Linux Soft Raid 를 이용하여 md1 + md2 를 구성하여 lvm 을 만들어 사용할까 합니다.

Test 환경의 경우 Centos 7 Version 에 1G disk 4개를 추가 하여 sdb + sdc = md1 , sdd + sde = md2 로 구성 하였습니다.

주의: dd 명령으로 복구 테스트하였을 경우 raid 볼륨 자체가 깨지는 문제로 테스트가 되지 않습니다. 🙂 

 

 

  • Disk 정보 (sdb / sdc /sdd /sde 총 4장의 1G Disk 가 장착되어 있습니다.)
[root@centos7 ~]# lsblk
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda      8:0    0   20G  0 disk
├─sda1   8:1    0    1G  0 part /boot
├─sda2   8:2    0    1G  0 part [SWAP]
└─sda3   8:3    0   18G  0 part /
sdb      8:16   0    1G  0 disk
sdc      8:32   0    1G  0 disk
sdd      8:48   0    1G  0 disk
sde      8:64   0    1G  0 disk
[root@centos7 ~]#

 

  • mdadm 설치 
[root@centos7 ~]# yum install -y mdadm

 

  • raid 장치 확인 
[root@centos7 ~]# cat /proc/mdstat
Personalities :
unused devices: <none>
[root@centos7 ~]#

 

  • fdisk 작업 (sdb / sdc /sdd /sde )
[root@centos7 ~]# fdisk /dev/sdb
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0x051d4fae.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): p
Partition number (1-4, default 1): 1
First sector (2048-2097151, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-2097151, default 2097151):
Using default value 2097151
Partition 1 of type Linux and of size 1023 MiB is set

Command (m for help): t
Selected partition 1
Hex code (type L to list all codes): fd
Changed type of partition 'Linux' to 'Linux raid autodetect'

Command (m for help): wq
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@centos7 ~]#

~중략

[root@centos7 ~]# partprobe

 

  • /dev/sdb1 , /dev/sdc1 디스크를 이용하여 raid 1 md1 를 구성 합니다.
[root@centos7 ~]# mdadm --create /dev/md1 --level=1 --raid-device=2 /dev/sdb1 /dev/sdc1
mdadm: Note: this array has metadata at the start and
    may not be suitable as a boot device.  If you plan to
    store '/boot' on this device please ensure that
    your boot-loader understands md/v1.x metadata, or use
    --metadata=0.90
Continue creating array? y
mdadm: Fail create md1 when using /sys/module/md_mod/parameters/new_array
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md1 started.
[root@centos7 ~]#

 

  • /dev/sdd1 , /dev/sde1 디스크를 이용하여 raid 1 md2 를 구성 합니다. 
[root@centos7 ~]# mdadm --create /dev/md2 --level=1 --raid-device=2 /dev/sdd1 /dev/sde1
mdadm: Note: this array has metadata at the start and
    may not be suitable as a boot device.  If you plan to
    store '/boot' on this device please ensure that
    your boot-loader understands md/v1.x metadata, or use
    --metadata=0.90
Continue creating array? y
mdadm: Fail create md2 when using /sys/module/md_mod/parameters/new_array
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md2 started.
[root@centos7 ~]#

 

  • raid 상태 확인 
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdc1[1] sdb1[0]
      1046528 blocks super 1.2 [2/2] [UU]

unused devices: <none>
[root@centos7 ~]#

 

  • /etc/mdadm.conf 를 생성합니다.
[root@centos7 ~]# mdadm --detail --scan > /etc/mdadm.conf
[root@centos7 ~]# cat /etc/mdadm.conf
ARRAY /dev/md1 metadata=1.2 name=centos7:1 UUID=33827f12:f44165a0:19e7a7c9:18f88f40
ARRAY /dev/md2 metadata=1.2 name=centos7:2 UUID=b17e111b:e22dcf62:51b929f6:819a5c2c
[root@centos7 ~]#

 

  • lvm2 패키지를 설치 합니다.
[root@centos7 ~]# yum install lvm2 -y

 

  • lvm 생성후 xfs 파일시스템으로 포맷을 합니다.
[root@centos7 ~]# pvcreate /dev/md1 /dev/md2
  Physical volume "/dev/md1" successfully created.
  Physical volume "/dev/md2" successfully created.
[root@centos7 ~]# vgcreate vg00 /dev/md1 /dev/md2
  Volume group "vg00" successfully created
[root@centos7 ~]# lvcreate -l 100%free -n data01 vg00
  Logical volume "data01" created.
[root@centos7 ~]#

[root@centos7 ~]# mkfs.xfs /dev/vg00/data01
meta-data=/dev/vg00/data01       isize=512    agcount=4, agsize=130560 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=522240, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@centos7 ~]#

 

  • /etc/fstab 에 마운트 후 시스템 리부팅후 마운트 상태를 확인 합니다. 
[root@centos7 ~]# mkdir /data
[root@centos7 ~]# vi /etc/fstab

/dev/vg00/data01                          /data                   xfs     defaults        0 0

[root@centos7 ~]# mount -a
[root@centos7 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/sda3                 18G  1.4G   17G   8% /
devtmpfs                 903M     0  903M   0% /dev
tmpfs                    912M     0  912M   0% /dev/shm
tmpfs                    912M  8.7M  903M   1% /run
tmpfs                    912M     0  912M   0% /sys/fs/cgroup
/dev/sda1               1014M  135M  880M  14% /boot
tmpfs                    183M     0  183M   0% /run/user/0
/dev/mapper/vg00-data01  2.0G   33M  2.0G   2% /data
[root@centos7 ~]# init 6 
[root@centos7 ~]# df -h |grep -i data
/dev/mapper/vg00-data01  2.0G   33M  2.0G   2% /data
[root@centos7 ~]#

 

  • mdadm 명령어를 이용하여 raid 정보를 확인 합니다. 
[root@centos7 ~]# mdadm --detail /dev/md1
/dev/md1:
           Version : 1.2
     Creation Time : Thu Jul 18 00:29:08 2019
        Raid Level : raid1
        Array Size : 1046528 (1022.00 MiB 1071.64 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 2
     Total Devices : 2
       Persistence : Superblock is persistent

       Update Time : Thu Jul 18 00:34:49 2019
             State : clean
    Active Devices : 2
   Working Devices : 2
    Failed Devices : 0
     Spare Devices : 0

Consistency Policy : resync

              Name : centos7:1  (local to host centos7)
              UUID : 33827f12:f44165a0:19e7a7c9:18f88f40
            Events : 17

    Number   Major   Minor   RaidDevice State
       0       8       17        0      active sync   /dev/sdb1
       1       8       33        1      active sync   /dev/sdc1
[root@centos7 ~]# mdadm --detail /dev/md2
/dev/md2:
           Version : 1.2
     Creation Time : Thu Jul 18 00:30:30 2019
        Raid Level : raid1
        Array Size : 1046528 (1022.00 MiB 1071.64 MB)
     Used Dev Size : 1046528 (1022.00 MiB 1071.64 MB)
      Raid Devices : 2
     Total Devices : 2
       Persistence : Superblock is persistent

       Update Time : Thu Jul 18 00:35:22 2019
             State : clean
    Active Devices : 2
   Working Devices : 2
    Failed Devices : 0
     Spare Devices : 0

Consistency Policy : resync

              Name : centos7:2  (local to host centos7)
              UUID : b17e111b:e22dcf62:51b929f6:819a5c2c
            Events : 17

    Number   Major   Minor   RaidDevice State
       0       8       49        0      active sync   /dev/sdd1
       1       8       65        1      active sync   /dev/sde1
[root@centos7 ~]#

 

  • mdadm -f 옵션을 이용하여 /dev/md1 에 sdc1 디스크 장애 상황을 만듭니다. 
[root@centos7 ~]# mdadm /dev/md1 -f /dev/sdc1
mdadm: set /dev/sdc1 faulty in /dev/md1
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdc1[1](F) sdb1[0]
      1046528 blocks super 1.2 [2/1] [U_] <---- UU 에서 U_ 로 변경 되었습니다.

unused devices: <none>
[root@centos7 ~]#


마운트는 정상적으로 되어 있고 touch 로 빈파일생성도 정상적으로 됩니다.

[root@centos7 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/sda3                 18G  1.4G   17G   8% /
devtmpfs                 903M     0  903M   0% /dev
tmpfs                    912M     0  912M   0% /dev/shm
tmpfs                    912M  8.7M  904M   1% /run
tmpfs                    912M     0  912M   0% /sys/fs/cgroup
/dev/sda1               1014M  135M  880M  14% /boot
/dev/mapper/vg00-data01  2.0G   33M  2.0G   2% /data
tmpfs                    183M     0  183M   0% /run/user/0
[root@centos7 ~]# touch /data/0
[root@centos7 ~]# touch /data/1
[root@centos7 ~]# touch /data/2

 

  • 디스크 remove 의 경우 mdadm $mdX_name –remove $device_name 으로 디스크를 제거 하시면 됩니다.
[root@centos7 ~]# mdadm /dev/md1 --remove /dev/sdc1
mdadm: hot removed /dev/sdc1 from /dev/md1
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdb1[0]                <--- sdc1 Disk 가 제거된것을 확인할수 있습니다.
      1046528 blocks super 1.2 [2/1] [U_]

unused devices: <none>
[root@centos7 ~]#

 

  • 시스템 shutdown 후 Disk 를 추가 합니다. 
[root@centos7 ~]# init 0

[root@centos7 ~]# lsblk
NAME              MAJ:MIN RM  SIZE RO TYPE  MOUNTPOINT
sda                 8:0    0   20G  0 disk
├─sda1              8:1    0    1G  0 part  /boot
├─sda2              8:2    0    1G  0 part  [SWAP]
└─sda3              8:3    0   18G  0 part  /
sdb                 8:16   0    1G  0 disk
└─sdb1              8:17   0 1023M  0 part
  └─md1             9:1    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sdc                 8:32   0    1G  0 disk
└─sdc1              8:33   0 1023M  0 part
sdd                 8:48   0    1G  0 disk
└─sdd1              8:49   0 1023M  0 part
  └─md2             9:2    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sde                 8:64   0    1G  0 disk
└─sde1              8:65   0 1023M  0 part
  └─md2             9:2    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sdf                 8:80   0    1G  0 disk
sr0                11:0    1 1024M  0 rom
[root@centos7 ~]# fdisk /dev/sdf
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0xb67cc56a.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): p
Partition number (1-4, default 1):
First sector (2048-2097151, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-2097151, default 2097151):
Using default value 2097151
Partition 1 of type Linux and of size 1023 MiB is set

Command (m for help): t
Selected partition 1
Hex code (type L to list all codes): fd
Changed type of partition 'Linux' to 'Linux raid autodetect'

Command (m for help): wq
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@centos7 ~]# partprobe

 

  • raid 에 disk 추가 
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdf1[2] sdb1[0]
      1046528 blocks super 1.2 [2/1] [U_]
      [===============>.....]  recovery = 76.4% (800384/1046528) finish=0.0min speed=200096K/sec

unused devices: <none>
[root@centos7 ~]#

 

  • /dev/md1 에서 sdb1 Disk 에 장애상황을 만듭니다. 
[root@centos7 ~]# mdadm /dev/md1 -f /dev/sdb1
mdadm: set /dev/sdb1 faulty in /dev/md1
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdf1[2] sdb1[0](F)
      1046528 blocks super 1.2 [2/1] [_U]

unused devices: <none>
[root@centos7 ~]#

 

  • mount 상태 확인 및 touch 로 빈파일을 생성해 봅니다. 
[root@centos7 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/sda3                 18G  1.4G   17G   8% /
devtmpfs                 903M     0  903M   0% /dev
tmpfs                    912M     0  912M   0% /dev/shm
tmpfs                    912M  8.7M  904M   1% /run
tmpfs                    912M     0  912M   0% /sys/fs/cgroup
/dev/sda1               1014M  135M  880M  14% /boot
/dev/mapper/vg00-data01  2.0G   33M  2.0G   2% /data      <--- 정상적으로 마운트 되어 있습니다.
tmpfs                    183M     0  183M   0% /run/user/0
[root@centos7 ~]# touch /data/20

[root@centos7 ~]# touch /data/30
[root@centos7 ~]# touch /data/40

 

  • /dev/md1 에서 sdb1 디스크를 제거 합니다. 
[root@centos7 ~]# mdadm /dev/md1 --remove /dev/sdb1
mdadm: hot removed /dev/sdb1 from /dev/md1
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sde1[1] sdd1[0]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdf1[2]
      1046528 blocks super 1.2 [2/1] [_U]

unused devices: <none>
[root@centos7 ~]#

 

  • 시스템 종료후 disk 를 추가 하여 /dev/md1 을 복구 합니다. 
[root@centos7 ~]# init 0
[root@centos7 ~]# lsblk
NAME              MAJ:MIN RM  SIZE RO TYPE  MOUNTPOINT
sda                 8:0    0   20G  0 disk
├─sda1              8:1    0    1G  0 part  /boot
├─sda2              8:2    0    1G  0 part  [SWAP]
└─sda3              8:3    0   18G  0 part  /
sdb                 8:16   0    1G  0 disk
└─sdb1              8:17   0 1023M  0 part
sdc                 8:32   0    1G  0 disk
└─sdc1              8:33   0 1023M  0 part
sdd                 8:48   0    1G  0 disk
└─sdd1              8:49   0 1023M  0 part
  └─md2             9:2    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sde                 8:64   0    1G  0 disk
└─sde1              8:65   0 1023M  0 part
  └─md2             9:2    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sdf                 8:80   0    1G  0 disk
└─sdf1              8:81   0 1023M  0 part
  └─md1             9:1    0 1022M  0 raid1
    └─vg00-data01 253:0    0    2G  0 lvm   /data
sdg                 8:96   0    1G  0 disk       <--- 추가된 Disk 
[root@centos7 ~]#

fdisk 부분은 생략 합니다. 


[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sdd1[0] sde1[1]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdg1[3] sdf1[2]
      1046528 blocks super 1.2 [2/1] [_U]
      [=======>.............]  recovery = 38.2% (401280/1046528) finish=0.0min speed=200640K/sec

unused devices: <none>
[root@centos7 ~]#

 

  • raid 복구후 테스트 
[root@centos7 ~]# cat /proc/mdstat
Personalities : [raid1]
md2 : active raid1 sdd1[0] sde1[1]
      1046528 blocks super 1.2 [2/2] [UU]

md1 : active raid1 sdg1[3] sdf1[2]
      1046528 blocks super 1.2 [2/2] [UU]

unused devices: <none>
[root@centos7 ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/sda3                 18G  1.4G   17G   8% /
devtmpfs                 903M     0  903M   0% /dev
tmpfs                    912M     0  912M   0% /dev/shm
tmpfs                    912M  8.7M  904M   1% /run
tmpfs                    912M     0  912M   0% /sys/fs/cgroup
/dev/sda1               1014M  135M  880M  14% /boot
/dev/mapper/vg00-data01  2.0G   33M  2.0G   2% /data
tmpfs                    183M     0  183M   0% /run/user/0
[root@centos7 ~]# cp /tmp/* /data/
cp: omitting directory ‘/tmp/systemd-private-f539feae748d4cae9178d03dceab0233-vgauthd.service-gTNVZW’
cp: omitting directory ‘/tmp/systemd-private-f539feae748d4cae9178d03dceab0233-vmtoolsd.service-ISwWEL’
[root@centos7 ~]# ls -al /data/
total 4
drwxr-xr-x   2 root root 142 Jul 18 00:54 .
dr-xr-xr-x. 18 root root 256 Jul 18 00:33 ..
-rw-r--r--   1 root root   0 Jul 18 00:41 0
-rw-r--r--   1 root root   0 Jul 18 00:41 1
-rw-r--r--   1 root root   0 Jul 18 00:48 10
-rw-r--r--   1 root root   0 Jul 18 00:50 11
-rw-r--r--   1 root root   0 Jul 18 00:50 12
-rw-r--r--   1 root root   0 Jul 18 00:50 13
-rw-r--r--   1 root root   0 Jul 18 00:41 2
-rw-r--r--   1 root root   0 Jul 18 00:49 20
-rw-r--r--   1 root root   0 Jul 18 00:49 30
-rw-r--r--   1 root root   0 Jul 18 00:49 40
-rwx------   1 root root 836 Jul 18 00:54 ks-script-r7n_b9
-rw-------   1 root root   0 Jul 18 00:54 yum.log
[root@centos7 ~]#

 

Glusterfs 는 확장이 가능한 네트워크 분산 파일 시스템 입니다.

본문서는 HA-Proxy 테스트를 위하여 2 node 를 구성 하였으며, 단순 테스트용 입니다.

자세한 자료는 아래 사이트를 확인해 보세요.

Site: https://docs.gluster.org/en/latest/

 

 

1. yum repository 생성

[root@gluster01 ~]# yum install centos-release-gluster -y
[root@gluster02 ~]# yum install centos-release-gluster -y

 

2. /etc/hosts 파일에 gluster node 를 추가 합니다.

[root@gluster01 ~]# vi /etc/hosts

10.10.10.34 gluser01
10.10.10.35 gluser02

 

3. glusterfs-server 를 설치 합니다.

[root@gluster01 ~]# yum install glusterfs-server -y
[root@gluster02 ~]# yum install glusterfs-server -y

 

4. gluster service 를 활성화 하고 실행 합니다.

[root@gluster01 ~]# systemctl enable glusterd ; systemctl start glusterd
[root@gluster02 ~]# systemctl enable glusterd ; systemctl start glusterd

 

5. glusterfs 복제 볼륨 설정

[root@gluster01 ~]# gluster peer probe gluster02
peer probe: success.
[root@gluster02 ~]# gluster peer probe gluster01

[root@gluster01 ~]# mkdir /gluster-data
[root@gluster02 ~]# mkdir /gluster-data


[root@gluster01 ~]# gluster volume create volume01 replica 2 transport tcp gluster01:/gluster-data gluster02:/gluster-data force
volume create: volume01: success: please start the volume to access data
[root@gluster01 ~]#

[root@gluster01 ~]# gluster volume start volume01
volume start: volume01: success
[root@gluster01 ~]#

[root@gluster01 ~]# gluster volume info

Volume Name: volume01
Type: Replicate
Volume ID: d078cd71-c767-4af5-b96c-e91d37451894
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: gluster01:/gluster-data
Brick2: gluster02:/gluster-data
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
[root@gluster01 ~]#

 

6. /etc/fstab 설정

[root@gluster01 ~]# mkdir /www-data
[root@gluster02 ~]# mkdir /www-data


[root@gluster01 ~]# vi /etc/fstab
[root@gluster02 ~]# vi /etc/fstab
gluster01:/volume01 /www-data glusterfs defaults,_netdev,x-systemd.automount 0 0

 

7. 공유 테스트

[root@gluster01 ~]# mount -a
[root@gluster02 ~]# mount -a

[root@gluster02 ~]# cd /www-data/
[root@gluster02 www-data]# touch 0


[root@gluster01 ~]# ls -al /www-data/
total 0
drwxr-xr-x 3 root root 33 Jun 28 13:07 .
dr-xr-xr-x. 19 root root 262 Jun 28 13:05 ..
-rw-r--r-- 1 root root 0 Jun 28 13:07 0
[root@gluster01 ~]#