主机名 | IP | 安装服务 |
master | 192.168.30.130 | mfsmaster、mfsmetalogger |
node-1 | 192.168.30.131 | chunkserver |
node-2 | 192.168.30.132 | mfsclient |
node-3 | 192.168.30.133 | chunkserver |
在master上安装
[root@master ~]# yum install -y rpm-build gcc gcc-c++ fuse-devel zlib-devel -y
上传软件包
[root@master ~]# rz
添加mfs运行用户
[root@master ~]# useradd -s /sbin/nologin mfs
[root@master ~]# tar -xf mfs-1.6.-.tar.gz -C /usr/local/src/
[root@master ~]# cd /usr/local/src/mfs-1.6./
[root@master mfs-1.6.]# ./configure --prefix=/usr/local/mfs \
> --with-default-user=mfs \
> --with-default-group=mfs
[root@master mfs-1.6.]# make -j && make install
[root@master mfs-1.6.]# echo $?
相关文件介绍
[root@master mfs-1.6.]# cd /usr/local/mfs/
[root@master mfs]# ll
total
drwxr-xr-x root root Jun : bin #客户端工具
drwxr-xr-x root root Jun : etc #服务器的配置文件所在位置
drwxr-xr-x root root Jun : sbin #服务端启动程序
drwxr-xr-x root root Jun : share #帮助手册
drwxr-xr-x root root Jun : var #元数据目录,可在配置文件中定义到其他目录
创建配置文件
[root@master mfs]# cd /usr/local/mfs/etc/mfs/
[root@master mfs]# ll
total
-rw-r--r-- root root Jun : mfschunkserver.cfg.dist
-rw-r--r-- root root Jun : mfsexports.cfg.dist
-rw-r--r-- root root Jun : mfshdd.cfg.dist
-rw-r--r-- root root Jun : mfsmaster.cfg.dist
-rw-r--r-- root root Jun : mfsmetalogger.cfg.dist
-rw-r--r-- root root Jun : mfsmount.cfg.dist
-rw-r--r-- root root Jun : mfstopology.cfg.dist
[root@master mfs]# cp mfsmaster.cfg.dist mfsmaster.cfg #配置文件
[root@master mfs]# cp mfsexports.cfg.dist mfsexports.cfg #输出目录配置文件
[root@master mfs]# cp mfsmetalogger.cfg.dist mfsmetalogger.cfg #元数据日志
[root@master mfs]# cp metadata.mfs.empty metadata.mfs #首次安装master时,会自动生成一个名为metadata.mfs.empty的元数据文件,但是该文件是空的,MooseFS master必须有文件metadata.mfs才可以运行
启动服务
[root@master mfs]# chown -R mfs:mfs /usr/local/mfs/
[root@master mfs]# /usr/local/mfs/sbin/mfsmaster start
working directory: /usr/local/mfs/var/mfs
lockfile created and locked
initializing mfsmaster modules ...
loading sessions ... file not found
if it is not fresh installation then you have to restart all active mounts !!!
exports file has been loaded
mfstopology configuration file (/usr/local/mfs/etc/mfstopology.cfg) not found - using defaults
loading metadata ...
create new empty filesystemmetadata file has been loaded
no charts data file - initializing empty charts
master <-> metaloggers module: listen on *:
master <-> chunkservers module: listen on *:
main master server module: listen on *:
mfsmaster daemon initialized properly
[root@master mfs]# /usr/local/mfs/sbin/mfsmaster start
working directory: /usr/local/mfs/var/mfs
lockfile created and locked
initializing mfsmaster modules ...
loading sessions ... ok
sessions file has been loaded
exports file has been loaded
mfstopology configuration file (/usr/local/mfs/etc/mfstopology.cfg) not found - using defaults
loading metadata ...
loading objects (files,directories,etc.) ... ok
loading names ... ok
loading deletion timestamps ... ok
loading chunks data ... ok
checking filesystem consistency ... ok
connecting files and chunks ... ok
all inodes:
directory inodes:
file inodes:
chunks:
metadata file has been loaded
stats file has been loaded
master <-> metaloggers module: listen on *:
master <-> chunkservers module: listen on *:
main master server module: listen on *:
mfsmaster daemon initialized properly
查看端口是否侦听到
[root@master ~]# netstat -antup | grep *
tcp 0.0.0.0: 0.0.0.0:* LISTEN /mfsmaster
tcp 0.0.0.0: 0.0.0.0:* LISTEN /mfsmaster
tcp 0.0.0.0: 0.0.0.0:* LISTEN /mfsmaster
tcp 0.0.0.0: 0.0.0.0:* LISTEN /tgtd
tcp 192.168.30.130: 192.168.30.131: ESTABLISHED /tgtd
tcp ::: :::* LISTEN /tgtd
设置开机启动及关闭服务
[root@master ~]# echo "/usr/local/mfs/sbin/mfsmaster start" >> /etc/rc.local
[root@master ~]# chmod +x /etc/rc.local
[root@master ~]# /usr/local/mfs/sbin/mfsmaster stop
sending SIGTERM to lock owner (pid:)
waiting for termination ... terminated
查看日志文件
[root@master ~]# ll /usr/local/mfs/var/mfs/
total
-rw-r----- mfs mfs Jun : metadata.mfs
-rw-r----- mfs mfs Jun : metadata.mfs.back.
-rw-r--r-- mfs mfs Jun : metadata.mfs.empty
-rw-r----- mfs mfs Jun : sessions.mfs
-rw-r----- mfs mfs Jun : stats.mfs
指定需要共享的权限
[root@master ~]# cd /usr/local/mfs/etc/mfs/
[root@master mfs]# vim mfsexports.cfg # Allow "meta".
* . rw#此行下面添加两行
192.168.30.131 / rw,alldirs,maproot=
192.168.30.132 / rw,alldirs,maproot=
启动服务
[root@master mfs]# sh /etc/rc.local
working directory: /usr/local/mfs/var/mfs
lockfile created and locked
initializing mfsmaster modules ...
loading sessions ... ok
sessions file has been loaded
exports file has been loaded
mfstopology configuration file (/usr/local/mfs/etc/mfstopology.cfg) not found - using defaults
loading metadata ...
loading objects (files,directories,etc.) ... ok
loading names ... ok
loading deletion timestamps ... ok
loading chunks data ... ok
checking filesystem consistency ... ok
connecting files and chunks ... ok
all inodes:
directory inodes:
file inodes:
chunks:
metadata file has been loaded
stats file has been loaded
master <-> metaloggers module: listen on *:
master <-> chunkservers module: listen on *:
main master server module: listen on *:
mfsmaster daemon initialized properly
在master上配置元数据日志服务器
root@master src]# rm -rf mfs-1.6./
[root@master src]# ls /root/
anaconda-ks.cfg install.log.syslog Pictures
Desktop ip_forward~ Public
Documents ip_forwarz~ Templates
Downloads mfs-1.6.-.tar.gz Videos
install.log Music
[root@master src]# tar -xf /root/mfs-1.6.-.tar.gz -C /usr/local/src/
[root@master src]# cd mfs-1.6./
[root@master mfs-1.6.]# ./configure \
> --prefix=/usr/local/mfsmeta \
> --with-default-user=mfs \
> --with-default-group=mfs
[root@master mfs-1.6.]# make -j && make install && echo $? && cd /usr/local/mfsmeta && ls
..... bin etc sbin share var
[root@master mfsmeta]# pwd
/usr/local/mfsmeta
[root@master mfsmeta]# cd etc/mfs/
[root@master mfs]# cp mfsmetalogger.cfg.dist mfsmetalogger.cfg
[root@master mfs]# vim mfsmetalogger.cfg
..........
MASTER_HOST = 192.168.30.130
.......
启动服务和关闭服务的方法
[root@master mfs]# chown -R mfs:mfs /usr/local/mfsmeta/
[root@master mfs]# /usr/local/mfsmeta/sbin/mfsmetalogger start
working directory: /usr/local/mfsmeta/var/mfs
lockfile created and locked
initializing mfsmetalogger modules ...
mfsmetalogger daemon initialized properly
[root@master mfs]# echo "/usr/local/mfsmeta/sbin/mfsmetalogger start " >> /etc/rc.local #设置开机启动
[root@master mfs]# /usr/local/mfsmeta/sbin/mfsmetalogger stop
sending SIGTERM to lock owner (pid:)
waiting for termination ... terminated
[root@master mfs]# /usr/local/mfsmeta/sbin/mfsmetalogger start
working directory: /usr/local/mfsmeta/var/mfs
lockfile created and locked
initializing mfsmetalogger modules ...
mfsmetalogger daemon initialized properly
#查看端口
[root@master mfs]# lsof -i :
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
mfsmaster mfs 8u IPv4 0t0 TCP *: (LISTEN)
mfsmaster mfs 11u IPv4 0t0 TCP master:->master: (ESTABLISHED)
mfsmetalo mfs 8u IPv4 0t0 TCP master:->master: (ESTABLISHED)
在node-1上安装数据服务器(chunk-server),chunk-server存储数据的特点:在普通的文件系统上存储数据块或碎片作为文件,因此,在chunk-server上看不到完整的文件。
[root@node- ~]# tar -xf mfs-1.6.-.tar.gz -C /usr/local/src/
[root@node- ~]# cd /usr/local/src/mfs-1.6./
[root@node- mfs-1.6.]# useradd -s /sbin/nologin mfs
[root@node- mfs-1.6.]# ./configure --prefix=/usr/local/mfschunk --with-default-user=mfs --with-default-group=mfs
[root@node- mfs-1.6.]# make -j && make install && echo $? && cd /usr/local/mfschunk
............ [root@node- mfschunk]# pwd
/usr/local/mfschunk
[root@node- mfschunk]# ls
etc sbin share var
[root@node- mfschunk]# cd etc/mfs/
创建配置文件,并修改
[root@node- mfs]# cp mfschunkserver.cfg.dist mfschunkserver.cfg
[root@node- mfs]# cp mfshdd.cfg.dist mfshdd.cfg
[root@node- mfs]# vim mfschunkserver.cfg
# WORKING_USER = mfs #运行用户
# WORKING_GROUP = mfs #运行用户组
# SYSLOG_IDENT = mfschunkserver #master sever在syslog中的标识,用以识别该服务的日志
# LOCK_MEMORY = 0 #是否执行mlockall()避免mfsmaster进程溢出(默认为0)
# NICE_LEVEL = -19 #运行的优先级(默认-19) # DATA_PATH = /usr/local/mfschunk/var/mfs #数据存放路径,该目录分三类文件(changlelog,sessions,stats) # MASTER_RECONNECTION_DELAY = 5 # # BIND_HOST = *
MASTER_HOST = 192.168.30.130 #元数据服务器的名称或地址,(主机名或ip均可)
MASTER_PORT = #默认9420,可以启用 # MASTER_TIMEOUT = # CSSERV_LISTEN_HOST = *
# CSSERV_LISTEN_PORT = #用于与其他数据存储服务器间的连接,通常为数据复制 # HDD_CONF_FILENAME = /usr/local/mfschunk/etc/mfs/mfshdd.cfg #用于指定MFS使用的磁盘空间的配置文件的位置
# HDD_TEST_FREQ = # deprecated, to be removed in MooseFS 1.7
# LOCK_FILE = /var/run/mfs/mfschunkserver.lock
# BACK_LOGS =
# CSSERV_TIMEOUT =
建立mfs分区,这里以/tmp目录为例,生产环境下一般是一个独立的磁盘
[root@node- mfs]# vim mfshdd.cfg
# mount points of HDD drives
#
#/mnt/hd1
#/mnt/hd2
#etc.
/tmp
开始启动服务
[root@node- mfs]# chown -R mfs:mfs /usr/local/mfschunk/
[root@node- mfs]# /usr/local/mfschunk/sbin/mfschunkserver start
working directory: /usr/local/mfschunk/var/mfs
lockfile created and locked
initializing mfschunkserver modules ...
hdd space manager: path to scan: /tmp/
hdd space manager: start background hdd scanning (searching for available chunks)
main server module: listen on *:
no charts data file - initializing empty charts
mfschunkserver daemon initialized properly
[root@node- mfs]# echo "/usr/local/mfschunk/sbin/mfschunkserver start " >> /etc/rc.local
[root@node- mfs]# chmod +x /etc/rc.local
[root@node- mfs]# ls /tmp/ #全部是存储块,人工无法识别
AA BB CC DD EE FF
9A AB BC CD DE EF 8A 9B AC BD CE DF F0 7A 8B 9C AD BE CF E0 F1 6A 7B 8C 9D AE BF D0 E1 F2
5A 6B 7C 8D 9E AF C0 D1 E2 F3 4A 5B 6C 7D 8E 9F B0 C1 D2 E3 F4 3A 4B 5C 6D 7E 8F A0 B1 C2 D3 E4 F5 2A 3B 4C 5D 6E 7F A1 B2 C3 D4 E5 F6 1A 2B 3C 4D 5E 6F A2 B3 C4 D5 E6 F7
0A 1B 2C 3D 4E 5F A3 B4 C5 D6 E7 F8
0B 1C 2D 3E 4F A4 B5 C6 D7 E8 F9
0C 1D 2E 3F A5 B6 C7 D8 E9 FA
0D 1E 2F A6 B7 C8 D9 EA FB
0E 1F A7 B8 C9 DA EB FC
0F A8 B9 CA DB EC FD
A9 BA CB DC ED FE
关闭服务的方法
[root@node- mfs]# /usr/local/mfschunk/sbin/mfschunkserver stop
sending SIGTERM to lock owner (pid:)
waiting for termination ... terminated
在node-2上配置客户端
[root@node- ~]# yum install -y rpm-build gcc gcc-c++ fuse-devel zlib-devel lrzsz
[root@node- ~]# useradd -s /sbin/nologin mfs
[root@node- ~]# rz
[root@node- ~]# tar -xf mfs-1.6.-.tar.gz -C /usr/local/src/
[root@node- ~]# cd /usr/local/src/mfs-1.6./
[root@node- mfs-1.6.]# ./configure --prefix=/usr/local/mfsclient \
> --with-default-user=mfs \
> --with-default-group=mfs \
> --enable-mfsmount
[root@node- mfs-1.6.]# make -j && make install && echo $? && cd /usr/local/mfsclient
...... [root@node- mfsclient]# pwd
/usr/local/mfsclient
[root@node- mfsclient]# cd bin/
[root@node- bin]# ls
mfsappendchunks mfsfileinfo mfsgettrashtime mfsrgettrashtime mfssetgoal
mfscheckfile mfsfilerepair mfsmakesnapshot mfsrsetgoal mfssettrashtime
mfsdeleattr mfsgeteattr mfsmount mfsrsettrashtime mfssnapshot
mfsdirinfo mfsgetgoal mfsrgetgoal mfsseteattr mfstools
[root@node- bin]# mkdir /mfs
[root@node- bin]# lsmod | grep fuse
fuse
#如果这里没有加载出来fuse模块,手动执行下如下命令
[root@node- ~]# modprobe fuse
创建mfsmount命令
[root@node- bin]# ln -s /usr/local/mfsclient/bin/mfsmount /usr/bin/mfsmount
挂载测试
[root@node- ~]# mfsmount /mfs/ -H 192.168.30.130 -p
MFS Password:
mfsmaster accepted connection with parameters: read-write,restricted_ip ; root mapped to root:root
[root@node- ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_master-LogVol00 18G .2G 13G % /
tmpfs .0G 72K .0G % /dev/shm
/dev/sda1 194M 35M 150M % /boot
/dev/sr0 .6G .6G % /media/cdrom
192.168.30.130: 13G 13G % /mfs
[root@node- ~]# echo "modprobe fuse">>/etc/rc.local
[root@node- ~]# echo "/usr/local/mfsclient/bin/mfsmount /mfs -H 192.168.30.130 -p">> /etc/rc.local
[root@node- ~]# chmod +x /etc/rc.local
在node-1上查看监控状态
[root@node- ~]# tree /tmp/
/tmp/
├──
├──
├──
├──
├──
├──
├──
├──
├──
├──
├── 0A
..............
directories, files
在node-2上
[root@node- ~]# cp /etc/passwd /mfs/
然后在node-1上查看
[root@node- ~]# tree /tmp/
/tmp/
├──
├──
│ └── chunk_0000000000000001_00000001.mfs
├──
├──
├──
├──
├──
├──
├──
├──
.........
directories, files
在node-2上可以看得到文件
[root@node- ~]# ls /mfs/
passwd
在master上启动网页监控,主要用户监控MFS各节点的状态信息,网页监控可以部署到任意一台机器上。
[root@master mfs]# /usr/local/mfs/sbin/mfscgiserv start
lockfile created and locked
starting simple cgi server (host: any , port: , rootpath: /usr/local/mfs/share/mfscgi)
查看权限
配置密码以及添加一台存储和复制份数
在master上
[root@master mfs]# cd /usr/local/mfs/etc/mfs
[root@master mfs]# vim mfsexports.cfg
# Allow everything but "meta".
* / rw,alldirs,maproot= # Allow "meta".
* . rw
192.168.30.131 / rw,alldirs,maproot=
192.168.30.132 / rw,alldirs,maproot=,password=
192.168.30.0/ / rw,alldirs,maproot=
添加chunk-server
[root@node- ~]# yum install -y rpm-build gcc gcc-c++ fuse-devel zlib-devel lrzsz
[root@node- ~]# useradd -s /sbin/nologin mfs
[root@node- mfs-1.6.]# ./configure --prefix=/usr/local/mfs --with-default-user=mfs --with-default-group=mfs --enable-mfsmount && echo $? && sleep && make -j && sleep && make install && cd /usr/local/mfs
[root@node- mfs]# pwd
/usr/local/mfs
[root@node- mfs]# ls
bin etc sbin share var
[root@node- mfs]# cd etc/mfs/
[root@node- mfs]# cp mfschunkserver.cfg.dist mfschunkserver.cfg
[root@node- mfs]# vim mfschunkserver.cfg
......
MASTER_HOST = 192.168.30.130
MASTER_PORT =
.......
[root@node- mfs]# vim mfshdd.cfg # mount points of HDD drives
#
#/mnt/hd1
#/mnt/hd2
#etc.
/opt #此行添加
[root@node- mfs]# chown -R mfs:mfs /usr/local/mfs/
[root@node- mfs]# chown -R mfs:mfs /opt/
[root@node- mfs]# /usr/local/mfs/sbin/mfschunkserver start
working directory: /usr/local/mfs/var/mfs
lockfile created and locked
initializing mfschunkserver modules ...
hdd space manager: path to scan: /opt/
hdd space manager: start background hdd scanning (searching for available chunks)
main server module: listen on *:
no charts data file - initializing empty charts
mfschunkserver daemon initialized properly
[root@node- mfs]# yum install -y tree
在master上重新启动下mfsmaster
[root@master ~]# /usr/local/mfs/sbin/mfsmaster stop
sending SIGTERM to lock owner (pid:)
waiting for termination ... terminated
[root@master ~]# /usr/local/mfs/sbin/mfsmaster start
working directory: /usr/local/mfs/var/mfs
lockfile created and locked
initializing mfsmaster modules ...
loading sessions ... ok
sessions file has been loaded
exports file has been loaded
mfstopology configuration file (/usr/local/mfs/etc/mfstopology.cfg) not found - using defaults
loading metadata ...
loading objects (files,directories,etc.) ... ok
loading names ... ok
loading deletion timestamps ... ok
loading chunks data ... ok
checking filesystem consistency ... ok
connecting files and chunks ... ok
all inodes:
directory inodes:
file inodes:
chunks:
metadata file has been loaded
stats file has been loaded
master <-> metaloggers module: listen on *:
master <-> chunkservers module: listen on *:
main master server module: listen on *:
mfsmaster daemon initialized properly
[root@master ~]# /usr/local/mfsmeta/sbin/mfsmetalogger stop
sending SIGTERM to lock owner (pid:)
waiting for termination ... terminated
[root@master ~]# /usr/local/mfsmeta/sbin/mfsmetalogger start
working directory: /usr/local/mfsmeta/var/mfs
lockfile created and locked
initializing mfsmetalogger modules ...
mfsmetalogger daemon initialized properly
在node-2上重新写入数据测试
在node-3上查看监控状态
[root@node- mfs]# tree /opt/
/opt/
├──
├──
├──
├──
│ └── chunk_0000000000000003_00000001.mfs
├──
├──
│ └── chunk_0000000000000005_00000001.mfs
├──
├──
│ └── chunk_0000000000000007_00000001.mfs
├──
├──
........
在node-1上查看
[root@node- ~]# tree /tmp/
/tmp/
├──
├──
│ └── chunk_0000000000000001_00000001.mfs
├──
│ └── chunk_0000000000000002_00000001.mfs
├──
├──
│ └── chunk_0000000000000004_00000001.mfs
├──
├──
│ └── chunk_0000000000000006_00000001.mfs
├──
├──
├──
........
注意对比下区别!
设置复制份数
[root@node- ~]# cd /usr/local/mfsclient/bin/
[root@node- bin]# ./mfssetgoal -r /mfs/
/mfs/:
inodes with goal changed:
inodes with goal not changed:
inodes with permission denied:
[root@node- bin]# ./mfsfileinfo /mfs/initramfs-2.6.-.el6.x86_64.img #这里没有生效
/mfs/initramfs-2.6.-.el6.x86_64.img:
chunk : 0000000000000003_00000001 / (id: ver:)
copy : 192.168.30.133:
原因:
客户端没有重新挂载
[root@node- ~]# umount /mfs/
[root@node- ~]# mfsmount /mfs/ -H 192.168.30.130 -p
MFS Password: #输入123456
mfsmaster accepted connection with parameters: read-write,restricted_ip ; root mapped to root:root
[root@node- ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/vg_master-LogVol00 18G .2G 13G % /
tmpfs .0G 72K .0G % /dev/shm
/dev/sda1 194M 35M 150M % /boot
/dev/sr0 .6G .6G % /media/cdrom
192.168.30.130: 25G 58M 25G % /mfs
[root@node- ~]# cd /usr/local/mfsclient/bin/
[root@node- bin]# ./mfssetgoal -r /mfs/ #虽然这里没有提示2份
/mfs/:
inodes with goal changed:
inodes with goal not changed:
inodes with permission denied:
[root@node- bin]# ./mfsfileinfo /mfs/initramfs-2.6.-.el6.x86_64.img #这里的结果表明已经有两份数据
/mfs/initramfs-2.6.-.el6.x86_64.img:
chunk : 0000000000000003_00000001 / (id: ver:)
copy : 192.168.30.131:
copy : 192.168.30.133:
手动停掉node-1上的chunk-server
[root@node- ~]# /usr/local/mfschunk/sbin/mfschunkserver stop
sending SIGTERM to lock owner (pid:)
waiting for termination ... terminated
[root@node- ~]# cp /etc/group /mfs/ #客户端依然可写
[root@node- ~]# ls /mfs/
config-2.6.-.el6.x86_64 passwd
group symvers-2.6.-.el6.x86_64.gz
initramfs-2.6.-.el6.x86_64.img System.map-2.6.-.el6.x86_64
initrd-2.6.-.el6.x86_64kdump.img vmlinuz-2.6.-.el6.x86_64
拓展:设置数据文件在回收站的过期时间
[root@node- bin]# cd
[root@node- ~]# cd /usr/local/mfsclient/bin/
[root@node- bin]# ls
mfsappendchunks mfsfileinfo mfsgettrashtime mfsrgettrashtime mfssetgoal
mfscheckfile mfsfilerepair mfsmakesnapshot mfsrsetgoal mfssettrashtime
mfsdeleattr mfsgeteattr mfsmount mfsrsettrashtime mfssnapshot
mfsdirinfo mfsgetgoal mfsrgetgoal mfsseteattr mfstools
[root@node- bin]# ./mfsrsettrashtime /mfs/
deprecated tool - use "mfssettrashtime -r" #提示这个工具已经被弃用了
/mfs/:
inodes with trashtime changed:
inodes with trashtime not changed:
inodes with permission denied:
[root@node- bin]# echo $?
#上个命令执行成功
#回收站的过期时间以秒为单位,如果是单独安装或挂载的MFSMETA文件系统,它包含/trash目录(该目录表示仍然可以还原被删除的文件,)
和/trash/undel/(用于获取文件)
把删除的文件移到/trash/undel下,就可以恢复删除的文件。
在MFSMETA的目录里,除了trash和trash/undel两个目录外,还有第三个目录reserved,reserved目录内有已经删除的文件,但却被其他
用户打开占用着,在用户关闭了这些打开占用着的文件后,reserved目录中的文件即被删除,文件的数据也会被立即删除,此目录不能进行操作。
MFS集群启动和关闭的顺序
启动顺序
.启动master server
.启动chunk server
.启动metaogger
.启动客户端,使用mfsmount挂载
mfs集群关闭顺序
.所有客户端卸载MFS文件系统
.停止chunk server
.停止metalogger
.停止master server