You are on page 1of 20

2.20.9.185@o2ib --param ost.

quota_type=ug3 /dev/mapper/ost_scratch_23
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_23
format filesystem? [y/N]
Creating mountpoints...
Making directory /lustre/home/client
Making directory /lustre/home/ost_6
Making directory /lustre/home/ost_7
Making directory /lustre/home/ost_8
Making directory /lustre/home/ost_9
Making directory /lustre/home/ost_10
Making directory /lustre/home/ost_11
Making directory /lustre/scratch/client
Making directory /lustre/scratch/ost_12
Making directory /lustre/scratch/ost_13
Making directory /lustre/scratch/ost_14
Making directory /lustre/scratch/ost_15
Making directory /lustre/scratch/ost_16
Making directory /lustre/scratch/ost_17
Making directory /lustre/scratch/ost_18
Making directory /lustre/scratch/ost_19
Making directory /lustre/scratch/ost_20
Making directory /lustre/scratch/ost_21
Making directory /lustre/scratch/ost_22
Making directory /lustre/scratch/ost_23
config corosync? [Y/n]
WARNING: Only one network configured for corosync
Consider adding a second network for stability
Installing file /etc/corosync/service.d/pcmk
Installing file /etc/corosync/corosync.conf
WARNING: Only one network configured for corosync
Consider adding a second network for stability
Enabling corosync at boot
Enabling pacemaker at boot
Installing file /etc/inittab
Installing file /etc/securetty
done
Installing file /etc/syslog-ng/syslog-ng.conf
Installing file /etc/logrotate.d/corosync
[root@oss4 ~]# es_mount
========
home
========
ost[9-11]:
mounted
-----========
scratch
========
ost[18-23]:
mounted
-----[root@oss4 ~]# mount
/dev/mapper/VolGroup00-LogVol01 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
/dev/mapper/VolGroup00-LogVol05 on /scratch type ext4 (rw)
/dev/mapper/VolGroup00-LogVol03 on /var type ext4 (rw)

none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)


/dev/mapper/ost_home_11 on /lustre/home/ost_11 type lustre (rw)
/dev/mapper/ost_scratch_23 on /lustre/scratch/ost_23 type lustre
/dev/mapper/ost_scratch_19 on /lustre/scratch/ost_19 type lustre
/dev/mapper/ost_home_10 on /lustre/home/ost_10 type lustre (rw)
/dev/mapper/ost_scratch_22 on /lustre/scratch/ost_22 type lustre
/dev/mapper/ost_scratch_21 on /lustre/scratch/ost_21 type lustre
/dev/mapper/ost_scratch_18 on /lustre/scratch/ost_18 type lustre
/dev/mapper/ost_home_9 on /lustre/home/ost_9 type lustre (rw)
/dev/mapper/ost_scratch_20 on /lustre/scratch/ost_20 type lustre
[root@oss4 ~]# lsscsi
[0:0:0:0]
disk
HP
LOGICAL VOLUME 1.34 /dev/sda
[0:3:0:0]
storage HP
P440ar
1.34 [6:0:0:0]
cd/dvd hp
DVD RAM UJ8E2
SE03 /dev/sr0
[7:0:0:0]
storage DDN
SFA12KXN
2.31 [7:0:0:20] disk
DDN
SFA12KXN
2.31 /dev/sdb
[7:0:0:21] disk
DDN
SFA12KXN
2.31 /dev/sdc
[7:0:0:22] disk
DDN
SFA12KXN
2.31 /dev/sdd
[7:0:0:23] disk
DDN
SFA12KXN
2.31 /dev/sde
[7:0:0:24] disk
DDN
SFA12KXN
2.31 /dev/sdf
[7:0:0:25] disk
DDN
SFA12KXN
2.31 /dev/sdg
[7:0:0:26] disk
DDN
SFA12KXN
2.31 /dev/sdh
[7:0:0:27] disk
DDN
SFA12KXN
2.31 /dev/sdi
[7:0:0:28] disk
DDN
SFA12KXN
2.31 /dev/sdj
[7:0:0:29] disk
DDN
SFA12KXN
2.31 /dev/sdk
[7:0:0:30] disk
DDN
SFA12KXN
2.31 /dev/sdl
[7:0:0:31] disk
DDN
SFA12KXN
2.31 /dev/sdm
[7:0:0:32] disk
DDN
SFA12KXN
2.31 /dev/sdn
[7:0:0:33] disk
DDN
SFA12KXN
2.31 /dev/sdo
[7:0:0:34] disk
DDN
SFA12KXN
2.31 /dev/sdp
[7:0:0:35] disk
DDN
SFA12KXN
2.31 /dev/sdq
[7:0:0:36] disk
DDN
SFA12KXN
2.31 /dev/sdr
[7:0:0:37] disk
DDN
SFA12KXN
2.31 /dev/sds
[8:0:0:0]
storage DDN
SFA12KXN
2.31 [8:0:0:20] disk
DDN
SFA12KXN
2.31 /dev/sdt
[8:0:0:21] disk
DDN
SFA12KXN
2.31 /dev/sdu
[8:0:0:22] disk
DDN
SFA12KXN
2.31 /dev/sdv
[8:0:0:23] disk
DDN
SFA12KXN
2.31 /dev/sdw
[8:0:0:24] disk
DDN
SFA12KXN
2.31 /dev/sdx
[8:0:0:25] disk
DDN
SFA12KXN
2.31 /dev/sdy
[8:0:0:26] disk
DDN
SFA12KXN
2.31 /dev/sdz
[8:0:0:27] disk
DDN
SFA12KXN
2.31 /dev/sdaa
[8:0:0:28] disk
DDN
SFA12KXN
2.31 /dev/sdab
[8:0:0:29] disk
DDN
SFA12KXN
2.31 /dev/sdac
[8:0:0:30] disk
DDN
SFA12KXN
2.31 /dev/sdad
[8:0:0:31] disk
DDN
SFA12KXN
2.31 /dev/sdae
[8:0:0:32] disk
DDN
SFA12KXN
2.31 /dev/sdaf
[8:0:0:33] disk
DDN
SFA12KXN
2.31 /dev/sdag
[8:0:0:34] disk
DDN
SFA12KXN
2.31 /dev/sdah
[8:0:0:35] disk
DDN
SFA12KXN
2.31 /dev/sdai
[8:0:0:36] disk
DDN
SFA12KXN
2.31 /dev/sdaj
[8:0:0:37] disk
DDN
SFA12KXN
2.31 /dev/sdak
[root@oss4 ~]# mount
/dev/mapper/VolGroup00-LogVol01 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
/dev/mapper/VolGroup00-LogVol05 on /scratch type ext4 (rw)

(rw)
(rw)
(rw)
(rw)
(rw)
(rw)

/dev/mapper/VolGroup00-LogVol03 on /var type ext4 (rw)


none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
/dev/mapper/ost_home_11 on /lustre/home/ost_11 type lustre (rw)
/dev/mapper/ost_scratch_23 on /lustre/scratch/ost_23 type lustre (rw)
/dev/mapper/ost_scratch_19 on /lustre/scratch/ost_19 type lustre (rw)
/dev/mapper/ost_home_10 on /lustre/home/ost_10 type lustre (rw)
/dev/mapper/ost_scratch_22 on /lustre/scratch/ost_22 type lustre (rw)
/dev/mapper/ost_scratch_21 on /lustre/scratch/ost_21 type lustre (rw)
/dev/mapper/ost_scratch_18 on /lustre/scratch/ost_18 type lustre (rw)
/dev/mapper/ost_home_9 on /lustre/home/ost_9 type lustre (rw)
/dev/mapper/ost_scratch_20 on /lustre/scratch/ost_20 type lustre (rw)
[root@oss4 ~]# mount | sort
/dev/mapper/ost_home_10 on /lustre/home/ost_10 type lustre (rw)
/dev/mapper/ost_home_11 on /lustre/home/ost_11 type lustre (rw)
/dev/mapper/ost_home_9 on /lustre/home/ost_9 type lustre (rw)
/dev/mapper/ost_scratch_18 on /lustre/scratch/ost_18 type lustre (rw)
/dev/mapper/ost_scratch_19 on /lustre/scratch/ost_19 type lustre (rw)
/dev/mapper/ost_scratch_20 on /lustre/scratch/ost_20 type lustre (rw)
/dev/mapper/ost_scratch_21 on /lustre/scratch/ost_21 type lustre (rw)
/dev/mapper/ost_scratch_22 on /lustre/scratch/ost_22 type lustre (rw)
/dev/mapper/ost_scratch_23 on /lustre/scratch/ost_23 type lustre (rw)
/dev/mapper/VolGroup00-LogVol01 on / type ext4 (rw)
/dev/mapper/VolGroup00-LogVol03 on /var type ext4 (rw)
/dev/mapper/VolGroup00-LogVol05 on /scratch type ext4 (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
tmpfs on /dev/shm type tmpfs (rw)
[root@oss4 ~]# mount -t lustre | sort
/dev/mapper/ost_home_10 on /lustre/home/ost_10 type lustre (rw)
/dev/mapper/ost_home_11 on /lustre/home/ost_11 type lustre (rw)
/dev/mapper/ost_home_9 on /lustre/home/ost_9 type lustre (rw)
/dev/mapper/ost_scratch_18 on /lustre/scratch/ost_18 type lustre (rw)
/dev/mapper/ost_scratch_19 on /lustre/scratch/ost_19 type lustre (rw)
/dev/mapper/ost_scratch_20 on /lustre/scratch/ost_20 type lustre (rw)
/dev/mapper/ost_scratch_21 on /lustre/scratch/ost_21 type lustre (rw)
/dev/mapper/ost_scratch_22 on /lustre/scratch/ost_22 type lustre (rw)
/dev/mapper/ost_scratch_23 on /lustre/scratch/ost_23 type lustre (rw)
[root@oss4 ~]# logout
Connection to oss4 closed.
[root@oss1 ~]# ssh oss6
Last login: Fri Sep 25 12:45:22 2015 from oss5
[root@oss6 ~]# /opt/ddn/config/install
hostname of this machine [oss6]:
This is host oss6
EXAScaler version: 2.1.2
EXAScaler flavour: CentOS
Peers of this host are: oss5
Network interface: eth0 (172.20.5.188)
Network interface: bond0 (172.20.9.188)
Lustre is exported on interfaces: o2ib(bond0)
Stonith type: ipmi
Details for filesystem home
ost 12 can failover to this host: /dev/mapper/ost_home_12 /lustre/home/ost_12
ost 13 can failover to this host: /dev/mapper/ost_home_13 /lustre/home/ost_13
ost 14 can failover to this host: /dev/mapper/ost_home_14 /lustre/home/ost_14
ost 15 runs on this host: /dev/mapper/ost_home_15 /lustre/home/ost_15
ost 16 runs on this host: /dev/mapper/ost_home_16 /lustre/home/ost_16

ost 17 runs on this host: /dev/mapper/ost_home_17 /lustre/home/ost_17


FSCK logs are saved to /scratch/log/home
Details for filesystem scratch
ost 24 can failover to this host: /dev/mapper/ost_scratch_24 /lustre/scratch/ost
_24
ost 25 can failover to this host: /dev/mapper/ost_scratch_25 /lustre/scratch/ost
_25
ost 26 can failover to this host: /dev/mapper/ost_scratch_26 /lustre/scratch/ost
_26
ost 27 can failover to this host: /dev/mapper/ost_scratch_27 /lustre/scratch/ost
_27
ost 28 can failover to this host: /dev/mapper/ost_scratch_28 /lustre/scratch/ost
_28
ost 29 can failover to this host: /dev/mapper/ost_scratch_29 /lustre/scratch/ost
_29
ost 30 runs on this host: /dev/mapper/ost_scratch_30 /lustre/scratch/ost_30
ost 31 runs on this host: /dev/mapper/ost_scratch_31 /lustre/scratch/ost_31
ost 32 runs on this host: /dev/mapper/ost_scratch_32 /lustre/scratch/ost_32
ost 33 runs on this host: /dev/mapper/ost_scratch_33 /lustre/scratch/ost_33
ost 34 runs on this host: /dev/mapper/ost_scratch_34 /lustre/scratch/ost_34
ost 35 runs on this host: /dev/mapper/ost_scratch_35 /lustre/scratch/ost_35
FSCK logs are saved to /scratch/log/scratch
WARNING: Only one network configured for corosync. Consider adding a second netw
ork for stability
WARNING: mgs_standalone not set for filesystem home. If you want to use the IR(I
mperative Recovery) feature of lustre, you must enable it.This requires the crea
tion of a separate storage device for the MGS, so it is not possible to enable t
his after the filesystem has been created.
Device missing: /dev/mapper/ost_scratch_24
Device missing: /dev/mapper/ost_scratch_25
Device missing: /dev/mapper/ost_scratch_26
Device missing: /dev/mapper/ost_scratch_27
Device missing: /dev/mapper/ost_scratch_28
Device missing: /dev/mapper/ost_scratch_29
Errors detected!!
[root@oss6 ~]# service multipathd restart
ok
Stopping multipathd daemon:
[ OK ]
Starting multipathd daemon:
[ OK ]
[root@oss6 ~]# ls -l /dev/mapper/
total 0
crw-rw---- 1 root root 10, 58 Sep 22 08:23 control
lrwxrwxrwx 1 root root
7 Sep 25 18:32 ost_home_12 -> ../dm-4
lrwxrwxrwx 1 root root
7 Sep 25 18:32 ost_home_13 -> ../dm-5
lrwxrwxrwx 1 root root
7 Sep 25 18:32 ost_home_14 -> ../dm-6
lrwxrwxrwx 1 root root
7 Sep 25 18:32 ost_home_15 -> ../dm-7
lrwxrwxrwx 1 root root
7 Sep 25 18:32 ost_home_16 -> ../dm-8
lrwxrwxrwx 1 root root
7 Sep 25 18:32 ost_home_17 -> ../dm-9
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_24 -> ../dm-10
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_25 -> ../dm-11
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_26 -> ../dm-12
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_27 -> ../dm-13
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_28 -> ../dm-14
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_29 -> ../dm-15
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_30 -> ../dm-16
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_31 -> ../dm-17
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_32 -> ../dm-18
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_33 -> ../dm-19
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_34 -> ../dm-20
lrwxrwxrwx 1 root root
8 Sep 25 18:32 ost_scratch_35 -> ../dm-21

lrwxrwxrwx 1 root root


7 Sep 22 08:23 VolGroup00-LogVol01 -> ../dm-0
lrwxrwxrwx 1 root root
7 Sep 22 08:23 VolGroup00-LogVol03 -> ../dm-2
lrwxrwxrwx 1 root root
7 Sep 22 08:23 VolGroup00-LogVol04 -> ../dm-1
lrwxrwxrwx 1 root root
7 Sep 22 08:23 VolGroup00-LogVol05 -> ../dm-3
[root@oss6 ~]# /opt/ddn/config/install
hostname of this machine [oss6]:
This is host oss6
EXAScaler version: 2.1.2
EXAScaler flavour: CentOS
Peers of this host are: oss5
Network interface: eth0 (172.20.5.188)
Network interface: bond0 (172.20.9.188)
Lustre is exported on interfaces: o2ib(bond0)
Stonith type: ipmi
Details for filesystem home
ost 12 can failover to this host: /dev/mapper/ost_home_12 /lustre/home/ost_12
ost 13 can failover to this host: /dev/mapper/ost_home_13 /lustre/home/ost_13
ost 14 can failover to this host: /dev/mapper/ost_home_14 /lustre/home/ost_14
ost 15 runs on this host: /dev/mapper/ost_home_15 /lustre/home/ost_15
ost 16 runs on this host: /dev/mapper/ost_home_16 /lustre/home/ost_16
ost 17 runs on this host: /dev/mapper/ost_home_17 /lustre/home/ost_17
FSCK logs are saved to /scratch/log/home
Details for filesystem scratch
ost 24 can failover to this host: /dev/mapper/ost_scratch_24 /lustre/scratch/ost
_24
ost 25 can failover to this host: /dev/mapper/ost_scratch_25 /lustre/scratch/ost
_25
ost 26 can failover to this host: /dev/mapper/ost_scratch_26 /lustre/scratch/ost
_26
ost 27 can failover to this host: /dev/mapper/ost_scratch_27 /lustre/scratch/ost
_27
ost 28 can failover to this host: /dev/mapper/ost_scratch_28 /lustre/scratch/ost
_28
ost 29 can failover to this host: /dev/mapper/ost_scratch_29 /lustre/scratch/ost
_29
ost 30 runs on this host: /dev/mapper/ost_scratch_30 /lustre/scratch/ost_30
ost 31 runs on this host: /dev/mapper/ost_scratch_31 /lustre/scratch/ost_31
ost 32 runs on this host: /dev/mapper/ost_scratch_32 /lustre/scratch/ost_32
ost 33 runs on this host: /dev/mapper/ost_scratch_33 /lustre/scratch/ost_33
ost 34 runs on this host: /dev/mapper/ost_scratch_34 /lustre/scratch/ost_34
ost 35 runs on this host: /dev/mapper/ost_scratch_35 /lustre/scratch/ost_35
FSCK logs are saved to /scratch/log/scratch
WARNING: Only one network configured for corosync. Consider adding a second netw
ork for stability
WARNING: mgs_standalone not set for filesystem home. If you want to use the IR(I
mperative Recovery) feature of lustre, you must enable it.This requires the crea
tion of a separate storage device for the MGS, so it is not possible to enable t
his after the filesystem has been created.
Installing file /etc/sysctl.conf
Not installing new version of "/etc/sysctl.conf": file is unchanged
Installing file /etc/sysconfig/pacemaker
Not installing new version of "/etc/sysconfig/pacemaker": file is unchanged
LNET configured
Making directory /scratch/crash
Installing file /etc/kdump.conf
config networking? [Y/n] n
config IPMI hardware? [Y/n] n
Checking connectivity
Pinging hosts...
mds1: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.191@o2ib FAIL

mds2: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.192@o2ib FAIL


oss1: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.183@o2ib FAIL
oss2: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.184@o2ib FAIL
oss3: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.185@o2ib FAIL
oss4: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.186@o2ib FAIL
oss5: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.187@o2ib FAIL
oss6: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.188@o2ib FAIL
oss7: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.189@o2ib FAIL
oss8: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.190@o2ib FAIL
done
continue? [Y/n] n
[root@oss6 ~]# lustre_rmmod
Modules still loaded:
lnet/klnds/socklnd/ksocklnd.o lnet/lnet/lnet.o libcfs/libcfs/libcfs.o
[root@oss6 ~]# lctl net down
LNET ready to unload
[root@oss6 ~]# lustre_rmmod
[root@oss6 ~]# /opt/ddn/config/install
hostname of this machine [oss6]:
This is host oss6
EXAScaler version: 2.1.2
EXAScaler flavour: CentOS
Peers of this host are: oss5
Network interface: eth0 (172.20.5.188)
Network interface: bond0 (172.20.9.188)
Lustre is exported on interfaces: o2ib(bond0)
Stonith type: ipmi
Details for filesystem home
ost 12 can failover to this host: /dev/mapper/ost_home_12 /lustre/home/ost_12
ost 13 can failover to this host: /dev/mapper/ost_home_13 /lustre/home/ost_13
ost 14 can failover to this host: /dev/mapper/ost_home_14 /lustre/home/ost_14
ost 15 runs on this host: /dev/mapper/ost_home_15 /lustre/home/ost_15
ost 16 runs on this host: /dev/mapper/ost_home_16 /lustre/home/ost_16
ost 17 runs on this host: /dev/mapper/ost_home_17 /lustre/home/ost_17
FSCK logs are saved to /scratch/log/home
Details for filesystem scratch
ost 24 can failover to this host: /dev/mapper/ost_scratch_24 /lustre/scratch/ost
_24
ost 25 can failover to this host: /dev/mapper/ost_scratch_25 /lustre/scratch/ost
_25
ost 26 can failover to this host: /dev/mapper/ost_scratch_26 /lustre/scratch/ost
_26
ost 27 can failover to this host: /dev/mapper/ost_scratch_27 /lustre/scratch/ost
_27
ost 28 can failover to this host: /dev/mapper/ost_scratch_28 /lustre/scratch/ost
_28
ost 29 can failover to this host: /dev/mapper/ost_scratch_29 /lustre/scratch/ost
_29
ost 30 runs on this host: /dev/mapper/ost_scratch_30 /lustre/scratch/ost_30
ost 31 runs on this host: /dev/mapper/ost_scratch_31 /lustre/scratch/ost_31
ost 32 runs on this host: /dev/mapper/ost_scratch_32 /lustre/scratch/ost_32
ost 33 runs on this host: /dev/mapper/ost_scratch_33 /lustre/scratch/ost_33
ost 34 runs on this host: /dev/mapper/ost_scratch_34 /lustre/scratch/ost_34
ost 35 runs on this host: /dev/mapper/ost_scratch_35 /lustre/scratch/ost_35
FSCK logs are saved to /scratch/log/scratch
WARNING: Only one network configured for corosync. Consider adding a second netw
ork for stability
WARNING: mgs_standalone not set for filesystem home. If you want to use the IR(I
mperative Recovery) feature of lustre, you must enable it.This requires the crea
tion of a separate storage device for the MGS, so it is not possible to enable t

his after the filesystem has been created.


Installing file /etc/sysctl.conf
Not installing new version of "/etc/sysctl.conf": file is unchanged
Installing file /etc/sysconfig/pacemaker
Not installing new version of "/etc/sysconfig/pacemaker": file is unchanged
LNET configured
Installing file /etc/kdump.conf
Not installing new version of "/etc/kdump.conf": file is unchanged
config networking? [Y/n] n
config IPMI hardware? [Y/n] n
Checking connectivity
Pinging hosts...
mds1: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.191@o2ib OK
mds2: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.192@o2ib OK
oss1: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.183@o2ib OK
oss2: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.184@o2ib OK
oss3: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.185@o2ib OK
oss4: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.186@o2ib OK
oss5: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.187@o2ib OK
oss6: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.188@o2ib OK
oss7: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.189@o2ib FAIL
oss8: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.190@o2ib FAIL
done
continue? [Y/n] y
Installing file /etc/lvm/lvm.conf
Traceback (most recent call last):
File "/opt/ddn/config/config-lvm.py", line 28, in <module>
main()
File "/opt/ddn/config/config-lvm.py", line 24, in main
es.files.load_by_name('lvm')(conf, prefix).install()
File "/opt/ddn/es/files/common.py", line 125, in install
single_file.install()
File "/opt/ddn/es/files/common.py", line 322, in install
file_contents = self.file_contents()
File "/opt/ddn/es/files/common.py", line 297, in file_contents
self.msg = self._cb_method(contents)
File "/opt/ddn/es/files/lvm.py", line 72, in _lvm
raise Exception(errmsg)
Exception: Volume Group vg_home does not exist
Failed to configure auto_activation_volume_list in lvm.conf
This is host oss6
EXAScaler version: 2.1.2
EXAScaler flavour: CentOS
Peers of this host are: oss5
Network interface: eth0 (172.20.5.188)
Network interface: bond0 (172.20.9.188)
Lustre is exported on interfaces: o2ib(bond0)
Stonith type: ipmi
Details for filesystem home
ost 12 can failover to this host: /dev/mapper/ost_home_12 /lustre/home/ost_12
ost 13 can failover to this host: /dev/mapper/ost_home_13 /lustre/home/ost_13
ost 14 can failover to this host: /dev/mapper/ost_home_14 /lustre/home/ost_14
ost 15 runs on this host: /dev/mapper/ost_home_15 /lustre/home/ost_15
ost 16 runs on this host: /dev/mapper/ost_home_16 /lustre/home/ost_16
ost 17 runs on this host: /dev/mapper/ost_home_17 /lustre/home/ost_17
FSCK logs are saved to /scratch/log/home
Details for filesystem scratch
ost 24 can failover to this host: /dev/mapper/ost_scratch_24 /lustre/scratch/ost
_24

ost 25 can failover to this host: /dev/mapper/ost_scratch_25 /lustre/scratch/ost


_25
ost 26 can failover to this host: /dev/mapper/ost_scratch_26 /lustre/scratch/ost
_26
ost 27 can failover to this host: /dev/mapper/ost_scratch_27 /lustre/scratch/ost
_27
ost 28 can failover to this host: /dev/mapper/ost_scratch_28 /lustre/scratch/ost
_28
ost 29 can failover to this host: /dev/mapper/ost_scratch_29 /lustre/scratch/ost
_29
ost 30 runs on this host: /dev/mapper/ost_scratch_30 /lustre/scratch/ost_30
ost 31 runs on this host: /dev/mapper/ost_scratch_31 /lustre/scratch/ost_31
ost 32 runs on this host: /dev/mapper/ost_scratch_32 /lustre/scratch/ost_32
ost 33 runs on this host: /dev/mapper/ost_scratch_33 /lustre/scratch/ost_33
ost 34 runs on this host: /dev/mapper/ost_scratch_34 /lustre/scratch/ost_34
ost 35 runs on this host: /dev/mapper/ost_scratch_35 /lustre/scratch/ost_35
FSCK logs are saved to /scratch/log/scratch
WARNING: Only one network configured for corosync. Consider adding a second netw
ork for stability
WARNING: mgs_standalone not set for filesystem home. If you want to use the IR(I
mperative Recovery) feature of lustre, you must enable it.This requires the crea
tion of a separate storage device for the MGS, so it is not possible to enable t
his after the filesystem has been created.
## LUSTRE ##
## The following commands will be run to format the file system: ##
WARNING: mgs_standalone not set for filesystem. If you want to use the IR(Impera
tive Recovery) feature of lustre, you must enable it.This requires the creation
of a separate storage device for the MGS, so it is not possible to enable this a
fter the filesystem has been created.
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ost
--mkfsoptions=-m1 -i 131072 --index=15 /dev/mapper/ost_home_15
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_15
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_15
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ost
--mkfsoptions=-m1 -i 131072 --index=16 /dev/mapper/ost_home_16
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_16
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_16
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ost
--mkfsoptions=-m1 -i 131072 --index=17 /dev/mapper/ost_home_17
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_17
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_17
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=30 /dev/mapper/ost_scratch_30
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_30
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_30
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=31 /dev/mapper/ost_scratch_31
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_31
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_31
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch --

ost --mkfsoptions=-m1 -i 131072 --index=32 /dev/mapper/ost_scratch_32


tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_32
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_32
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=33 /dev/mapper/ost_scratch_33
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_33
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_33
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=34 /dev/mapper/ost_scratch_34
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_34
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_34
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=35 /dev/mapper/ost_scratch_35
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_35
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_35
format filesystem? [y/N]
Creating mountpoints...
Making directory /scratch/log/home
Making directory /lustre/home/client
Making directory /lustre/home/ost_12
Making directory /lustre/home/ost_13
Making directory /lustre/home/ost_14
Making directory /lustre/home/ost_15
Making directory /lustre/home/ost_16
Making directory /lustre/home/ost_17
Making directory /scratch/log/scratch
Making directory /lustre/scratch/client
Making directory /lustre/scratch/ost_24
Making directory /lustre/scratch/ost_25
Making directory /lustre/scratch/ost_26
Making directory /lustre/scratch/ost_27
Making directory /lustre/scratch/ost_28
Making directory /lustre/scratch/ost_29
Making directory /lustre/scratch/ost_30
Making directory /lustre/scratch/ost_31
Making directory /lustre/scratch/ost_32
Making directory /lustre/scratch/ost_33
Making directory /lustre/scratch/ost_34
Making directory /lustre/scratch/ost_35
config corosync? [Y/n]
WARNING: Only one network configured for corosync
Consider adding a second network for stability
Installing file /etc/corosync/service.d/pcmk
Installing file /etc/corosync/corosync.conf
WARNING: Only one network configured for corosync
Consider adding a second network for stability
Enabling corosync at boot
Enabling pacemaker at boot
Installing file /etc/inittab
Installing file /etc/securetty
done
Installing file /etc/syslog-ng/syslog-ng.conf

Installing file /etc/logrotate.d/corosync


[root@oss6 ~]# es_mkfs --reformat --dry-run
WARNING: mgs_standalone not set for filesystem. If you want to use the IR(Impera
tive Recovery) feature of lustre, you must enable it.This requires the creation
of a separate storage device for the MGS, so it is not possible to enable this a
fter the filesystem has been created.
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ref
ormat --ost --mkfsoptions=-m1 -i 131072 --index=15 /dev/mapper/ost_home_15
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_15
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_15
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ref
ormat --ost --mkfsoptions=-m1 -i 131072 --index=16 /dev/mapper/ost_home_16
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_16
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_16
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ref
ormat --ost --mkfsoptions=-m1 -i 131072 --index=17 /dev/mapper/ost_home_17
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_17
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_17
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=30 /dev/mapper/ost_scratch_30
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_30
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_30
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=31 /dev/mapper/ost_scratch_31
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_31
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_31
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=32 /dev/mapper/ost_scratch_32
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_32
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_32
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=33 /dev/mapper/ost_scratch_33
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_33
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_33
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=34 /dev/mapper/ost_scratch_34
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_34
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_34
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=35 /dev/mapper/ost_scratch_35
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.188@o2ib --servicenode=172.20.9.187@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_35
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_35

[root@oss6 ~]# es_mkfs --reformat


WARNING: mgs_standalone not set for filesystem. If you want to use the IR(Impera
tive Recovery) feature of lustre, you must enable it.This requires the creation
of a separate storage device for the MGS, so it is not possible to enable this a
fter the filesystem has been created.
Format and tunefs completed
[root@oss6 ~]# es_mount
========
home
========
ost[15-17]:
mounted
-----========
scratch
========
ost[30-35]:
mounted
-----[root@oss6 ~]# mount
/dev/mapper/VolGroup00-LogVol01 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
/dev/mapper/VolGroup00-LogVol05 on /scratch type ext4 (rw)
/dev/mapper/VolGroup00-LogVol03 on /var type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
/dev/mapper/ost_scratch_30 on /lustre/scratch/ost_30 type lustre (rw)
/dev/mapper/ost_home_16 on /lustre/home/ost_16 type lustre (rw)
/dev/mapper/ost_home_17 on /lustre/home/ost_17 type lustre (rw)
/dev/mapper/ost_scratch_34 on /lustre/scratch/ost_34 type lustre (rw)
/dev/mapper/ost_scratch_35 on /lustre/scratch/ost_35 type lustre (rw)
/dev/mapper/ost_scratch_33 on /lustre/scratch/ost_33 type lustre (rw)
/dev/mapper/ost_scratch_31 on /lustre/scratch/ost_31 type lustre (rw)
/dev/mapper/ost_scratch_32 on /lustre/scratch/ost_32 type lustre (rw)
/dev/mapper/ost_home_15 on /lustre/home/ost_15 type lustre (rw)
[root@oss6 ~]# logout
Connection to oss6 closed.
[root@oss1 ~]# ssh oss8
Last login: Fri Sep 25 12:49:05 2015 from oss7
[root@oss8 ~]# ls -l /dev/mapper/
total 0
crw-rw---- 1 root root 10, 58 Sep 22 23:52 control
lrwxrwxrwx 1 root root
7 Sep 25 12:49 ost_home_18 -> ../dm-4
lrwxrwxrwx 1 root root
7 Sep 25 12:49 ost_home_19 -> ../dm-5
lrwxrwxrwx 1 root root
7 Sep 25 12:49 ost_home_20 -> ../dm-6
lrwxrwxrwx 1 root root
7 Sep 25 12:49 ost_home_21 -> ../dm-7
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_home_22 -> ../dm-10
lrwxrwxrwx 1 root root
7 Sep 25 12:49 ost_home_23 -> ../dm-9
lrwxrwxrwx 1 root root
7 Sep 25 12:49 ost_scratch_36 -> ../dm-8
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_scratch_37 -> ../dm-11
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_scratch_38 -> ../dm-12
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_scratch_39 -> ../dm-14
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_scratch_40 -> ../dm-13
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_scratch_41 -> ../dm-15
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_scratch_42 -> ../dm-16
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_scratch_43 -> ../dm-17
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_scratch_44 -> ../dm-18

lrwxrwxrwx 1 root root


8 Sep 25 12:49 ost_scratch_45 -> ../dm-19
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_scratch_46 -> ../dm-20
lrwxrwxrwx 1 root root
8 Sep 25 12:49 ost_scratch_47 -> ../dm-21
lrwxrwxrwx 1 root root
7 Sep 22 23:52 VolGroup00-LogVol01 -> ../dm-0
lrwxrwxrwx 1 root root
7 Sep 22 23:52 VolGroup00-LogVol03 -> ../dm-2
lrwxrwxrwx 1 root root
7 Sep 22 23:52 VolGroup00-LogVol04 -> ../dm-1
lrwxrwxrwx 1 root root
7 Sep 22 23:52 VolGroup00-LogVol05 -> ../dm-3
[root@oss8 ~]# /opt/ddn/config/install
hostname of this machine [oss8]:
This is host oss8
EXAScaler version: 2.1.2
EXAScaler flavour: CentOS
Peers of this host are: oss7
Network interface: eth0 (172.20.5.190)
Network interface: bond0 (172.20.9.190)
Lustre is exported on interfaces: o2ib(bond0)
Stonith type: ipmi
Details for filesystem home
ost 18 can failover to this host: /dev/mapper/ost_home_18 /lustre/home/ost_18
ost 19 can failover to this host: /dev/mapper/ost_home_19 /lustre/home/ost_19
ost 20 can failover to this host: /dev/mapper/ost_home_20 /lustre/home/ost_20
ost 21 runs on this host: /dev/mapper/ost_home_21 /lustre/home/ost_21
ost 22 runs on this host: /dev/mapper/ost_home_22 /lustre/home/ost_22
ost 23 runs on this host: /dev/mapper/ost_home_23 /lustre/home/ost_23
FSCK logs are saved to /scratch/log/home
Details for filesystem scratch
ost 36 can failover to this host: /dev/mapper/ost_scratch_36 /lustre/scratch/ost
_36
ost 37 can failover to this host: /dev/mapper/ost_scratch_37 /lustre/scratch/ost
_37
ost 38 can failover to this host: /dev/mapper/ost_scratch_38 /lustre/scratch/ost
_38
ost 39 can failover to this host: /dev/mapper/ost_scratch_39 /lustre/scratch/ost
_39
ost 40 can failover to this host: /dev/mapper/ost_scratch_40 /lustre/scratch/ost
_40
ost 41 can failover to this host: /dev/mapper/ost_scratch_41 /lustre/scratch/ost
_41
ost 42 runs on this host: /dev/mapper/ost_scratch_42 /lustre/scratch/ost_42
ost 43 runs on this host: /dev/mapper/ost_scratch_43 /lustre/scratch/ost_43
ost 44 runs on this host: /dev/mapper/ost_scratch_44 /lustre/scratch/ost_44
ost 45 runs on this host: /dev/mapper/ost_scratch_45 /lustre/scratch/ost_45
ost 46 runs on this host: /dev/mapper/ost_scratch_46 /lustre/scratch/ost_46
ost 47 runs on this host: /dev/mapper/ost_scratch_47 /lustre/scratch/ost_47
FSCK logs are saved to /scratch/log/scratch
WARNING: Only one network configured for corosync. Consider adding a second netw
ork for stability
WARNING: mgs_standalone not set for filesystem home. If you want to use the IR(I
mperative Recovery) feature of lustre, you must enable it.This requires the crea
tion of a separate storage device for the MGS, so it is not possible to enable t
his after the filesystem has been created.
Installing file /etc/sysctl.conf
Not installing new version of "/etc/sysctl.conf": file is unchanged
Installing file /etc/sysconfig/pacemaker
Not installing new version of "/etc/sysconfig/pacemaker": file is unchanged
LNET configured
Making directory /scratch/crash
Installing file /etc/kdump.conf
config networking? [Y/n] n
config IPMI hardware? [Y/n] n

Checking connectivity
Pinging hosts...
mds1: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.191@o2ib FAIL
mds2: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.192@o2ib FAIL
oss1: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.183@o2ib FAIL
oss2: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.184@o2ib FAIL
oss3: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.185@o2ib FAIL
oss4: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.186@o2ib FAIL
oss5: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.187@o2ib FAIL
oss6: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.188@o2ib FAIL
oss7: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.189@o2ib FAIL
oss8: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.190@o2ib FAIL
done
continue? [Y/n] n
[root@oss8 ~]# lustre_rmmod
Modules still loaded:
lnet/klnds/socklnd/ksocklnd.o lnet/lnet/lnet.o libcfs/libcfs/libcfs.o
[root@oss8 ~]# lctl net down
LNET ready to unload
[root@oss8 ~]# lustre_rmmod
[root@oss8 ~]# /opt/ddn/config/install
hostname of this machine [oss8]:
This is host oss8
EXAScaler version: 2.1.2
EXAScaler flavour: CentOS
Peers of this host are: oss7
Network interface: eth0 (172.20.5.190)
Network interface: bond0 (172.20.9.190)
Lustre is exported on interfaces: o2ib(bond0)
Stonith type: ipmi
Details for filesystem home
ost 18 can failover to this host: /dev/mapper/ost_home_18 /lustre/home/ost_18
ost 19 can failover to this host: /dev/mapper/ost_home_19 /lustre/home/ost_19
ost 20 can failover to this host: /dev/mapper/ost_home_20 /lustre/home/ost_20
ost 21 runs on this host: /dev/mapper/ost_home_21 /lustre/home/ost_21
ost 22 runs on this host: /dev/mapper/ost_home_22 /lustre/home/ost_22
ost 23 runs on this host: /dev/mapper/ost_home_23 /lustre/home/ost_23
FSCK logs are saved to /scratch/log/home
Details for filesystem scratch
ost 36 can failover to this host: /dev/mapper/ost_scratch_36 /lustre/scratch/ost
_36
ost 37 can failover to this host: /dev/mapper/ost_scratch_37 /lustre/scratch/ost
_37
ost 38 can failover to this host: /dev/mapper/ost_scratch_38 /lustre/scratch/ost
_38
ost 39 can failover to this host: /dev/mapper/ost_scratch_39 /lustre/scratch/ost
_39
ost 40 can failover to this host: /dev/mapper/ost_scratch_40 /lustre/scratch/ost
_40
ost 41 can failover to this host: /dev/mapper/ost_scratch_41 /lustre/scratch/ost
_41
ost 42 runs on this host: /dev/mapper/ost_scratch_42 /lustre/scratch/ost_42
ost 43 runs on this host: /dev/mapper/ost_scratch_43 /lustre/scratch/ost_43
ost 44 runs on this host: /dev/mapper/ost_scratch_44 /lustre/scratch/ost_44
ost 45 runs on this host: /dev/mapper/ost_scratch_45 /lustre/scratch/ost_45
ost 46 runs on this host: /dev/mapper/ost_scratch_46 /lustre/scratch/ost_46
ost 47 runs on this host: /dev/mapper/ost_scratch_47 /lustre/scratch/ost_47
FSCK logs are saved to /scratch/log/scratch
WARNING: Only one network configured for corosync. Consider adding a second netw
ork for stability

WARNING: mgs_standalone not set for filesystem home. If you want to use the IR(I
mperative Recovery) feature of lustre, you must enable it.This requires the crea
tion of a separate storage device for the MGS, so it is not possible to enable t
his after the filesystem has been created.
Installing file /etc/sysctl.conf
Not installing new version of "/etc/sysctl.conf": file is unchanged
Installing file /etc/sysconfig/pacemaker
Not installing new version of "/etc/sysconfig/pacemaker": file is unchanged
LNET configured
Installing file /etc/kdump.conf
Not installing new version of "/etc/kdump.conf": file is unchanged
config networking? [Y/n] n
config IPMI hardware? [Y/n] n
Checking connectivity
Pinging hosts...
mds1: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.191@o2ib OK
mds2: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.192@o2ib OK
oss1: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.183@o2ib OK
oss2: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.184@o2ib OK
oss3: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.185@o2ib OK
oss4: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.186@o2ib OK
oss5: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.187@o2ib OK
oss6: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.188@o2ib OK
oss7: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.189@o2ib OK
oss8: eth0 OK bond0 OK ipmi OK IPMI ON 172.20.9.190@o2ib OK
done
continue? [Y/n]
Installing file /etc/lvm/lvm.conf
Traceback (most recent call last):
File "/opt/ddn/config/config-lvm.py", line 28, in <module>
main()
File "/opt/ddn/config/config-lvm.py", line 24, in main
es.files.load_by_name('lvm')(conf, prefix).install()
File "/opt/ddn/es/files/common.py", line 125, in install
single_file.install()
File "/opt/ddn/es/files/common.py", line 322, in install
file_contents = self.file_contents()
File "/opt/ddn/es/files/common.py", line 297, in file_contents
self.msg = self._cb_method(contents)
File "/opt/ddn/es/files/lvm.py", line 72, in _lvm
raise Exception(errmsg)
Exception: Volume Group vg_home does not exist
Failed to configure auto_activation_volume_list in lvm.conf
This is host oss8
EXAScaler version: 2.1.2
EXAScaler flavour: CentOS
Peers of this host are: oss7
Network interface: eth0 (172.20.5.190)
Network interface: bond0 (172.20.9.190)
Lustre is exported on interfaces: o2ib(bond0)
Stonith type: ipmi
Details for filesystem home
ost 18 can failover to this host: /dev/mapper/ost_home_18 /lustre/home/ost_18
ost 19 can failover to this host: /dev/mapper/ost_home_19 /lustre/home/ost_19
ost 20 can failover to this host: /dev/mapper/ost_home_20 /lustre/home/ost_20
ost 21 runs on this host: /dev/mapper/ost_home_21 /lustre/home/ost_21
ost 22 runs on this host: /dev/mapper/ost_home_22 /lustre/home/ost_22
ost 23 runs on this host: /dev/mapper/ost_home_23 /lustre/home/ost_23
FSCK logs are saved to /scratch/log/home

Details for filesystem scratch


ost 36 can failover to this host: /dev/mapper/ost_scratch_36 /lustre/scratch/ost
_36
ost 37 can failover to this host: /dev/mapper/ost_scratch_37 /lustre/scratch/ost
_37
ost 38 can failover to this host: /dev/mapper/ost_scratch_38 /lustre/scratch/ost
_38
ost 39 can failover to this host: /dev/mapper/ost_scratch_39 /lustre/scratch/ost
_39
ost 40 can failover to this host: /dev/mapper/ost_scratch_40 /lustre/scratch/ost
_40
ost 41 can failover to this host: /dev/mapper/ost_scratch_41 /lustre/scratch/ost
_41
ost 42 runs on this host: /dev/mapper/ost_scratch_42 /lustre/scratch/ost_42
ost 43 runs on this host: /dev/mapper/ost_scratch_43 /lustre/scratch/ost_43
ost 44 runs on this host: /dev/mapper/ost_scratch_44 /lustre/scratch/ost_44
ost 45 runs on this host: /dev/mapper/ost_scratch_45 /lustre/scratch/ost_45
ost 46 runs on this host: /dev/mapper/ost_scratch_46 /lustre/scratch/ost_46
ost 47 runs on this host: /dev/mapper/ost_scratch_47 /lustre/scratch/ost_47
FSCK logs are saved to /scratch/log/scratch
WARNING: Only one network configured for corosync. Consider adding a second netw
ork for stability
WARNING: mgs_standalone not set for filesystem home. If you want to use the IR(I
mperative Recovery) feature of lustre, you must enable it.This requires the crea
tion of a separate storage device for the MGS, so it is not possible to enable t
his after the filesystem has been created.
## LUSTRE ##
## The following commands will be run to format the file system: ##
WARNING: mgs_standalone not set for filesystem. If you want to use the IR(Impera
tive Recovery) feature of lustre, you must enable it.This requires the creation
of a separate storage device for the MGS, so it is not possible to enable this a
fter the filesystem has been created.
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ost
--mkfsoptions=-m1 -i 131072 --index=21 /dev/mapper/ost_home_21
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_21
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_21
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ost
--mkfsoptions=-m1 -i 131072 --index=22 /dev/mapper/ost_home_22
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_22
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_22
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ost
--mkfsoptions=-m1 -i 131072 --index=23 /dev/mapper/ost_home_23
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_23
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_23
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=42 /dev/mapper/ost_scratch_42
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_42
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_42
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=43 /dev/mapper/ost_scratch_43
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost

.quota_type=ug3 /dev/mapper/ost_scratch_43
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_43
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=44 /dev/mapper/ost_scratch_44
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_44
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_44
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=45 /dev/mapper/ost_scratch_45
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_45
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_45
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=46 /dev/mapper/ost_scratch_46
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_46
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_46
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -ost --mkfsoptions=-m1 -i 131072 --index=47 /dev/mapper/ost_scratch_47
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_47
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_47
format filesystem? [y/N]
Creating mountpoints...
Making directory /scratch/log/home
Making directory /lustre/home/client
Making directory /lustre/home/ost_18
Making directory /lustre/home/ost_19
Making directory /lustre/home/ost_20
Making directory /lustre/home/ost_21
Making directory /lustre/home/ost_22
Making directory /lustre/home/ost_23
Making directory /scratch/log/scratch
Making directory /lustre/scratch/client
Making directory /lustre/scratch/ost_36
Making directory /lustre/scratch/ost_37
Making directory /lustre/scratch/ost_38
Making directory /lustre/scratch/ost_39
Making directory /lustre/scratch/ost_40
Making directory /lustre/scratch/ost_41
Making directory /lustre/scratch/ost_42
Making directory /lustre/scratch/ost_43
Making directory /lustre/scratch/ost_44
Making directory /lustre/scratch/ost_45
Making directory /lustre/scratch/ost_46
Making directory /lustre/scratch/ost_47
config corosync? [Y/n]
WARNING: Only one network configured for corosync
Consider adding a second network for stability
Installing file /etc/corosync/service.d/pcmk
Installing file /etc/corosync/corosync.conf
WARNING: Only one network configured for corosync
Consider adding a second network for stability
Enabling corosync at boot
Enabling pacemaker at boot
Installing file /etc/inittab

Installing file /etc/securetty


done
Installing file /etc/syslog-ng/syslog-ng.conf
Installing file /etc/logrotate.d/corosync
[root@oss8 ~]# mount
/dev/mapper/VolGroup00-LogVol01 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
/dev/mapper/VolGroup00-LogVol05 on /scratch type ext4 (rw)
/dev/mapper/VolGroup00-LogVol03 on /var type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
[root@oss8 ~]# es_mkfs --reformat --dry-run
WARNING: mgs_standalone not set for filesystem. If you want to use the IR(Impera
tive Recovery) feature of lustre, you must enable it.This requires the creation
of a separate storage device for the MGS, so it is not possible to enable this a
fter the filesystem has been created.
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ref
ormat --ost --mkfsoptions=-m1 -i 131072 --index=21 /dev/mapper/ost_home_21
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_21
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_21
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ref
ormat --ost --mkfsoptions=-m1 -i 131072 --index=22 /dev/mapper/ost_home_22
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_22
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_22
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=home --ref
ormat --ost --mkfsoptions=-m1 -i 131072 --index=23 /dev/mapper/ost_home_23
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_home_23
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_home_23
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=42 /dev/mapper/ost_scratch_42
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_42
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_42
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=43 /dev/mapper/ost_scratch_43
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_43
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_43
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=44 /dev/mapper/ost_scratch_44
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_44
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_44
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=45 /dev/mapper/ost_scratch_45
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_45

tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_45


mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=46 /dev/mapper/ost_scratch_46
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_46
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_46
mkfs.lustre --mgsnode=127.0.0.2@tcp --failnode=127.0.0.2@tcp --fsname=scratch -reformat --ost --mkfsoptions=-m1 -i 131072 --index=47 /dev/mapper/ost_scratch_47
tunefs.lustre --erase-params --mgsnode=172.20.9.191@o2ib --mgsnode=172.20.9.192@
o2ib --servicenode=172.20.9.190@o2ib --servicenode=172.20.9.189@o2ib --param ost
.quota_type=ug3 /dev/mapper/ost_scratch_47
tune2fs -E mmp_update_interval=5 /dev/mapper/ost_scratch_47
[root@oss8 ~]# es_mkfs --reformat
WARNING: mgs_standalone not set for filesystem. If you want to use the IR(Impera
tive Recovery) feature of lustre, you must enable it.This requires the creation
of a separate storage device for the MGS, so it is not possible to enable this a
fter the filesystem has been created.
Format and tunefs completed
[root@oss8 ~]# es_mount
========
home
========
ost[21-23]:
mounted
-----========
scratch
========
ost[42-47]:
mounted
-----[root@oss8 ~]# mount
/dev/mapper/VolGroup00-LogVol01 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
/dev/mapper/VolGroup00-LogVol05 on /scratch type ext4 (rw)
/dev/mapper/VolGroup00-LogVol03 on /var type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
/dev/mapper/ost_scratch_42 on /lustre/scratch/ost_42 type lustre (rw)
/dev/mapper/ost_scratch_47 on /lustre/scratch/ost_47 type lustre (rw)
/dev/mapper/ost_home_23 on /lustre/home/ost_23 type lustre (rw)
/dev/mapper/ost_scratch_43 on /lustre/scratch/ost_43 type lustre (rw)
/dev/mapper/ost_scratch_45 on /lustre/scratch/ost_45 type lustre (rw)
/dev/mapper/ost_scratch_46 on /lustre/scratch/ost_46 type lustre (rw)
/dev/mapper/ost_home_21 on /lustre/home/ost_21 type lustre (rw)
/dev/mapper/ost_home_22 on /lustre/home/ost_22 type lustre (rw)
/dev/mapper/ost_scratch_44 on /lustre/scratch/ost_44 type lustre (rw)
[root@oss8 ~]# mount -t lustre | sort
/dev/mapper/ost_home_21 on /lustre/home/ost_21 type lustre (rw)
/dev/mapper/ost_home_22 on /lustre/home/ost_22 type lustre (rw)
/dev/mapper/ost_home_23 on /lustre/home/ost_23 type lustre (rw)
/dev/mapper/ost_scratch_42 on /lustre/scratch/ost_42 type lustre (rw)
/dev/mapper/ost_scratch_43 on /lustre/scratch/ost_43 type lustre (rw)
/dev/mapper/ost_scratch_44 on /lustre/scratch/ost_44 type lustre (rw)
/dev/mapper/ost_scratch_45 on /lustre/scratch/ost_45 type lustre (rw)
/dev/mapper/ost_scratch_46 on /lustre/scratch/ost_46 type lustre (rw)

/dev/mapper/ost_scratch_47 on /lustre/scratch/ost_47 type lustre (rw)


[root@oss8 ~]# logout
Connection to oss8 closed.
[root@oss1 ~]# logout
Connection to oss1 closed.
[root@mds1 ~]# logout
Connection to 172.20.5.191 closed.
[root@login01 ~]# pdsh
20150916.gz
.gconf/
Pictures/
STDIN.e24
STDIN.o17
STDIN.o32
anaconda-ks.cfg
.gconfd/
Public/
STDIN.e25
STDIN.o18
STDIN.o33
backup/
.gnome2/
.pulse/
STDIN.e26
STDIN.o19
STDIN.o34
.bash_history
.gnome2_private/
.pulse-cookie
STDIN.e27
STDIN.o20
STDIN.o46
.bash_logout
.gnupg/
.ssh/
STDIN.e28
STDIN.o21
.tcshrc
.bash_profile
.gstreamer-0.10/
STDIN.e14
STDIN.e29
STDIN.o22
Templates/
.bashrc
.gvfs/
STDIN.e15
STDIN.e30
STDIN.o23
Videos/
.cache/
.ICEauthority
STDIN.e16
STDIN.e31
STDIN.o24
.viminfo
.config/
install.log
STDIN.e17
STDIN.e32
STDIN.o25
.Xauthority
.cshrc
install.log.syslog STDIN.e18
STDIN.e33
STDIN.o26
.xsession-errors
.dbus/
.kde/
STDIN.e19
STDIN.e34
STDIN.o27
Desktop/
.mozilla/
STDIN.e20
STDIN.e46
STDIN.o28
Documents/
Music/
STDIN.e21
STDIN.o14
STDIN.o29
Downloads/
.nautilus/
STDIN.e22
STDIN.o15
STDIN.o30
.esd_auth
.pbs_qmgr_history STDIN.e23
STDIN.o16
STDIN.o31
[root@login01 ~]# pdsh a f 1 /opt/ddn/config/corosync/config-corosync.py^C
[root@login01 ~]# ^C
[root@login01 ~]# ssh mds1
ssh: Could not resolve hostname mds1: Name or service not known
[root@login01 ~]# ssh 172.20.5.191
root@172.20.5.191's password:
Last login: Fri Sep 25 14:11:21 2015 from 172.20.9.175
[root@mds1 ~]# pdsh a f 1 /opt/ddn/config/corosync/config-corosync.py
pdsh@mds1: no remote hosts specified
[root@mds1 ~]# pdsh w mds2 f 1 /opt/ddn/config/corosync/config-corosync.py
pdsh@mds1: no remote hosts specified
[root@mds1 ~]# pdsh w mds[1-2] f 1 /opt/ddn/config/corosync/config-corosync.py
pdsh@mds1: no remote hosts specified
[root@mds1 ~]# host
host
hostid
hostname
[root@mds1 ~]# pdsh w mds[1-2] /opt/ddn/config/corosync/config-corosync.py
pdsh@mds1: no remote hosts specified
[root@mds1 ~]# cat /etc/machines
mds1
mds2
oss1
oss2

oss3
oss4
oss5
oss6
oss7
oss8
[root@mds1 ~]# cp /etc/machines /etc/p
pam.d/
php.ini
profile
pam_ldap.conf
pkcs11/
profile.d/
pango/
pki/
protocols
passwd
plymouth/

polkit-1/

passwd-

pm/

ppp/

php.d/

pm-utils-hd-apm-restore.conf printcap

popt.d/
portreserve/
postfix/

[root@mds1 ~]# pdsh a f 1 /opt/ddn/config/corosync/config-corosync.py


pdsh@mds1: no remote hosts specified
[root@mds1 ~]# pdsh a /opt/ddn/config/corosync/config-corosync.py
pdsh@mds1: no remote hosts specified
[root@mds1 ~]# pdsh a "/opt/ddn/config/corosync/config-corosync.py"
pdsh@mds1: no remote hosts specified
[root@mds1 ~]# pdsh a hostname
pdsh@mds1: no remote hosts specified
[root@mds1 ~]# ssh oss2
Last login: Fri Sep 25 18:09:30 2015 from oss1
[root@oss2 ~]#

You might also like