Whamcloud - gitweb
986e6e5f2b4687979e19f06c153c07bc72927fb3
[fs/lustre-release.git] / lustre / scripts / lustre_createcsv.in
1 #!/bin/bash
2 #
3 # lustre_createcsv - generate a csv file from a running lustre cluster
4 #
5 # This script is used to collect lustre target informations, linux MD/LVM device
6 # informations and HA software configurations in a lustre cluster to generate a
7 # csv file. In reverse, the csv file could be parsed by lustre_config to 
8 # configure multiple lustre servers in parallel.
9 #
10 # This script should be run on the MGS node.
11 #
12 ################################################################################
13
14 # Usage
15 usage() {
16         cat >&2 <<EOF
17
18 Usage:  `basename $0` [-t HAtype] [-d] [-h] [-v] [-f csv_filename]
19
20         This script is used to collect lustre target informations, linux MD/LVM
21         device informations and HA software configurations from a running lustre
22         cluster to generate a csv file. It should be run on the MGS node.
23
24         -t HAtype       collect High-Availability software configurations
25                         The argument following -t is used to indicate the High-
26                         Availability software type. The HA software types which 
27                         are currently supported are: hbv1 (Heartbeat version 1)
28                         and hbv2 (Heartbeat version 2).
29         -d              collect linux MD/LVM device informations
30         -h              help
31         -v              verbose mode
32         -f csv_filename designate a name for the csv file
33                         Default is lustre_config.csv.
34
35 EOF
36         exit 1
37 }
38
39 # Get the library of functions
40 . @scriptlibdir@/lc_common
41
42 #**************************** Global variables ****************************#
43 # csv file
44 LUSTRE_CSV_FILE=${LUSTRE_CSV_FILE:-"lustre_config.csv"}
45
46 # Lustre proc files
47 LUSTRE_PROC=${LUSTRE_PROC:-"/proc/fs/lustre"}
48 LUSTRE_PROC_DEVICES=${LUSTRE_PROC}/devices
49
50 LNET_PROC=${LNET_PROC:-"/proc/sys/lnet"}
51 LNET_PROC_PEERS=${LNET_PROC}/peers
52
53 # Default network module options
54 DEFAULT_MOD_OPTS=${DEFAULT_MOD_OPTS:-"options lnet networks=tcp"}
55
56 # Lustre target obd device types
57 MGS_TYPE=${MGS_TYPE:-"mgs"}
58 MDT_TYPE=${MDT_TYPE:-"mds"}
59 OST_TYPE=${OST_TYPE:-"obdfilter"}
60
61 # The obd name of MGS target server
62 MGS_SVNAME=${MGS_SVNAME:-"MGS"}         
63
64 # Hostnames of the lustre cluster nodes
65 declare -a HOST_NAMES                   
66 MGS_HOSTNAME=${MGS_HOSTNAME:-"`hostname`"} # Hostname of the MGS node
67
68 # Configs of lustre targets in one cluster node
69 declare -a TARGET_CONFIGS               
70 declare -a TARGET_SVNAMES TARGET_DEVNAMES TARGET_DEVSIZES TARGET_MNTPNTS
71 declare -a TARGET_DEVTYPES TARGET_FSNAMES TARGET_MGSNIDS TARGET_INDEXES
72 declare -a TARGET_FMTOPTS TARGET_MKFSOPTS TARGET_MNTOPTS TARGET_FAILNIDS
73 declare -a HA_CONFIGS
74 declare -a ALL_TARGET_SVNAMES           # All the target services in the cluster
75 declare -a FAILOVER_FMTOPTS             # "--noformat"  
76
77 # Informations of linux MD/LVM devices in one cluster node
78 declare -a MD_NAME MD_LEVEL MD_DEVS     # MD
79 declare -a VG_NAME VG_PVNAMES           # VG
80 declare -a LV_NAME LV_SIZE LV_VGNAME    # LV
81
82 # Lustre target service types
83 let "LDD_F_SV_TYPE_MDT = 0x0001"
84 let "LDD_F_SV_TYPE_OST = 0x0002"
85 let "LDD_F_SV_TYPE_MGS = 0x0004"
86
87 # Permanent mount options for ext3 or ldiskfs
88 ALWAYS_MNTOPTS=${ALWAYS_MNTOPTS:-"errors=remount-ro"}
89 MDT_MGS_ALWAYS_MNTOPTS=${MDT_MGS_ALWAYS_MNTOPTS:-",iopen_nopriv,user_xattr"}
90 OST_ALWAYS_MNTOPTS=${OST_ALWAYS_MNTOPTS:-",asyncdel"}
91 OST_DEFAULT_MNTOPTS=${OST_DEFAULT_MNTOPTS:-",extents,mballoc"}
92
93 # User-settable parameter keys
94 PARAM_MGSNODE=${PARAM_MGSNODE:-"mgsnode="}
95 PARAM_FAILNODE=${PARAM_FAILNODE:-"failover.node="}
96
97 # Block size
98 L_BLOCK_SIZE=4096
99
100 # Option string of mkfs.lustre
101 OPTSTR_STRIPE_COUNT=${OPTSTR_STRIPE_COUNT:-"--stripe-count-hint="}
102
103
104 # Get and check the positional parameters
105 VERBOSE_OUTPUT=false
106 GET_MDLVM_INFO=false
107 while getopts "t:dhvf:" OPTION; do
108         case $OPTION in
109         t) 
110                 HATYPE_OPT=$OPTARG
111                 if [ "${HATYPE_OPT}" != "${HBVER_HBV1}" ] \
112                 && [ "${HATYPE_OPT}" != "${HBVER_HBV2}" ] \
113                 && [ "${HATYPE_OPT}" != "${HATYPE_CLUMGR}" ]; then
114                         echo >&2 "`basename $0`: Invalid HA software type" \
115                                  "- ${HATYPE_OPT}!"
116                         usage
117                 fi
118                 ;;
119         d)      GET_MDLVM_INFO=true;;
120         h)      usage;;
121         v)      VERBOSE_OUTPUT=true;;
122         f)      LUSTRE_CSV_FILE=$OPTARG;;
123         ?)      usage 
124         esac
125 done
126
127 # Verify the local host is the MGS node
128 mgs_node() {
129         if [ ! -e ${LUSTRE_PROC_DEVICES} ]; then
130                 echo >&2 "`basename $0`: error: ${LUSTRE_PROC_DEVICES} does" \
131                          "not exist. Lustre kernel modules may not be loaded!"
132                 return 1
133         fi
134
135         if [ -z "`cat ${LUSTRE_PROC_DEVICES}`" ]; then
136                 echo >&2 "`basename $0`: error: ${LUSTRE_PROC_DEVICES} is" \
137                          "empty. Lustre services may not be started!"
138                 return 1
139         fi
140
141         if [ -z "`grep ${MGS_TYPE} ${LUSTRE_PROC_DEVICES}`" ]; then
142                 echo >&2 "`basename $0`: error: This node is not a MGS node." \
143                          "The script should be run on the MGS node!"
144                 return 1
145         fi
146
147         return 0
148 }
149
150 # get_hostnames
151 # Get lustre cluster node names
152 get_hostnames() {
153         declare -a HOST_NIDS
154         declare -i idx          # Index of HOST_NIDS array
155         declare -i i            # Index of HOST_NAMES array
156
157         if ! mgs_node; then
158                 return 1
159         fi
160
161         if [ ! -e ${LNET_PROC_PEERS} ]; then
162                 echo >&2 "`basename $0`: error: ${LNET_PROC_PEERS} does not" \
163                          "exist. LNET kernel modules may not be loaded" \
164                          "or LNET network may not be up!"
165                 return 1
166         fi
167
168         HOST_NAMES[0]=${MGS_HOSTNAME} # MGS node
169         HOST_NIDS[0]=${HOST_NAMES[0]}
170
171         # Get the nids of the nodes which have contacted MGS
172         idx=1
173         for nid in `cat ${LNET_PROC_PEERS} | awk '{print $1}'`; do
174                 if [ "${nid}" = "nid" ]; then
175                         continue
176                 fi
177
178                 HOST_NIDS[idx]=${nid}
179                 let "idx += 1"
180         done
181
182         if [ ${idx} -eq 1 ]; then
183                 verbose_output "Only one node running in the lustre cluster." \
184                                "It's ${HOST_NAMES[0]}."
185                 return 0                
186         fi
187
188         # Get the hostnames of the nodes
189         for ((idx = 1, i = 1; idx < ${#HOST_NIDS[@]}; idx++, i++)); do
190                 if [ -z "${HOST_NIDS[idx]}" ]; then
191                         echo >&2 "`basename $0`: get_hostnames() error:" \
192                                  "Invalid nid - \"${HOST_NIDS[idx]}\"!"
193                         return 1
194                 fi
195
196                 HOST_NAMES[i]=$(nid2hostname ${HOST_NIDS[idx]})
197                 if [ $? -ne 0 ]; then
198                         echo >&2 "${HOST_NAMES[i]}"
199                         return 1
200                 fi
201
202                 if [ "${HOST_NAMES[i]}" = "${HOST_NAMES[0]}" ]; then
203                         unset HOST_NAMES[i]
204                         let "i -= 1"
205                 fi
206         done
207
208         return 0
209 }
210
211 #********************** Linux MD/LVM device informations **********************#
212 # get_md_configs hostname
213 # Get all the active MD device informations from the node @hostname
214 get_md_configs() {
215         declare -i i=0
216         declare -i j=0
217         local host_name=$1
218         local ret_line line first_item
219
220         # Initialize the arrays
221         unset MD_NAME
222         unset MD_LEVEL
223         unset MD_DEVS
224         
225         # Execute remote command to the node ${host_name} and get all the
226         # active MD device informations.
227         while read -r ret_line; do
228                 if is_pdsh; then
229                         set -- ${ret_line}
230                         shift
231                         line="$*"
232                 else
233                         line="${ret_line}"
234                 fi
235
236                 first_item=`echo "${line}" | awk '{print $1}'`
237
238                 # Get the MD device name and raid level
239                 if [ "${first_item}" = "ARRAY" ]; then
240                         MD_NAME[i]=`echo "${line}" | awk '{print $2}'`
241                         MD_LEVEL[i]=`echo "${line}" | awk '{print $3}' | sed -e 's/level=//'`
242                         let "j = i"
243                         let "i += 1"
244                 fi
245
246                 # Get the MD component devices
247                 if [ "${first_item}" != "${first_item#devices=}" ]; then
248                         MD_DEVS[j]=`echo "${line}" | sed -e 's/devices=//' -e 's/,/ /g'`
249                 fi
250         done < <(${REMOTE} ${host_name} "${MDADM} --detail --scan --verbose")
251
252         if [ $i -eq 0 ]; then
253                 verbose_output "There are no active MD devices" \
254                                "in the host ${host_name}!"
255         fi
256
257         return 0
258 }
259
260 # get_pv_configs hostname
261 # Get all the LVM PV informations from the node @hostname
262 get_pv_configs() {
263         PV_NAMES=
264         local host_name=$1
265         local cmd ret_str
266
267         # Execute remote command to get all the PV informations.
268         cmd="${EXPORT_PATH} pvdisplay -c | awk -F: '{print \$1}' | xargs"
269         ret_str=`${REMOTE} ${host_name} "${cmd}" 2>&1`
270         if [ $? -ne 0 ]; then
271                 if [ -n "${ret_str}" ]; then
272                         echo >&2 "`basename $0`: get_pv_configs() error:" \
273                         "remote command to ${host_name} error: ${ret_str}"
274                 else
275                         remote_error "get_pv_configs" ${host_name}
276                 fi
277                 return 1
278         fi
279
280         PV_NAMES=`echo "${ret_str}" | sed -e 's/^'${host_name}':[[:space:]]//'`
281         if [ -z "${PV_NAMES}" ]; then
282                 verbose_output "There are no PVs in the host ${host_name}!"
283                 return 0
284         fi
285
286         return 0
287 }
288
289 # get_vg_pvnames hostname vgname
290 # Get the PVs contained in @vgname from the node @hostname
291 get_vg_pvnames() {
292         local host_name=$1
293         local vg_name=$2
294         local pv_names=
295         local cmd ret_str
296
297         # Execute remote command to get the PV names.
298         cmd="${EXPORT_PATH} vgdisplay -v ${vg_name} 2>/dev/null\
299              | grep \"PV Name\" | awk '{print \$3}' | xargs"
300         ret_str=`${REMOTE} ${host_name} "${cmd}" 2>&1`
301         if [ $? -ne 0 ]; then
302                 if [ -n "${ret_str}" ]; then
303                         echo "`basename $0`: get_vg_pvnames() error:" \
304                         "remote command to ${host_name} error: ${ret_str}"
305                 else
306                         remote_error "get_vg_pvnames" ${host_name}
307                 fi
308                 return 1
309         fi
310
311         pv_names=`echo "${ret_str}" | sed -e 's/^'${host_name}':[[:space:]]//'`
312         if [ -z "${pv_names}" ]; then
313                 echo "`basename $0`: get_vg_pvnames() error:" \
314                 "There are no PVs in VG ${vg_name} in the host ${host_name}!"\
315                 "Or VG ${vg_name} does not exist."
316                 return 1
317         fi
318
319         echo "${pv_names}"
320         return 0
321 }
322
323 # get_vg_configs hostname
324 # Get all the LVM VG informations from the node @hostname
325 get_vg_configs() {
326         declare -i i=0
327         local host_name=$1
328         local cmd ret_str
329         local vg_name
330
331         # Initialize the arrays
332         unset VG_NAME
333         unset VG_PVNAMES
334
335         # Execute remote command to get all the VG names.
336         cmd="${EXPORT_PATH} vgdisplay \
337              | grep \"VG Name\" | awk '{print \$3}' | xargs"
338         ret_str=`${REMOTE} ${host_name} "${cmd}" 2>&1`
339         if [ $? -ne 0 ]; then
340                 if [ -n "${ret_str}" ]; then
341                         echo >&2 "`basename $0`: get_vg_configs() error:" \
342                         "remote command to ${host_name} error: ${ret_str}"
343                 else
344                         remote_error "get_vg_configs" ${host_name}
345                 fi
346                 return 1
347         fi
348
349         if [ -z "${ret_str}" ] \
350         || [ "${ret_str}" != "${ret_str#*No volume groups found*}" ]; then
351                 verbose_output "There are no VGs in the host ${host_name}!"
352                 return 0
353         fi
354
355         # Get all the VG informations
356         for vg_name in `echo "${ret_str}" | sed -e 's/^'${host_name}'://'`; do
357                 VG_NAME[i]=${vg_name}
358                 VG_PVNAMES[i]=$(get_vg_pvnames ${host_name} ${VG_NAME[i]})
359                 if [ $? -ne 0 ]; then
360                         echo >&2 "${VG_PVNAMES[i]}"
361                         return 1
362                 fi
363                 let "i += 1"
364         done
365
366         return 0
367 }
368
369 # get_lv_configs hostname
370 # Get all the LVM LV informations from the node @hostname
371 get_lv_configs() {
372         declare -i i=0
373         local host_name=$1
374         local ret_line line
375
376         # Initialize the arrays
377         unset LV_NAME
378         unset LV_SIZE
379         unset LV_VGNAME
380
381         # Execute remote command to get all the LV informations.
382         while read -r ret_line; do
383                 if is_pdsh; then
384                         set -- ${ret_line}
385                         shift
386                         line="$*"
387                 else
388                         line="${ret_line}"
389                 fi
390
391                 [ "${line}" != "${line#*volume group*}" ] && break
392
393                 LV_NAME[i]=`echo "${line}" | awk -F: '{print $1}' | sed -e 's/.*\///g'`
394                 LV_VGNAME[i]=`echo "${line}" | awk -F: '{print $2}'`
395                 LV_SIZE[i]=`echo "${line}" | awk -F: '{print $7}' | sed -e 's/.*/&K/'`
396
397                 let "i += 1"
398         done < <(${REMOTE} ${host_name} "${EXPORT_PATH} lvdisplay -c")
399
400         if [ $i -eq 0 ]; then
401                 verbose_output "There are no LVs in the host ${host_name}"
402         fi
403
404         return 0
405 }
406
407 #*************************** Network module options ***************************#
408 # last_is_backslash line
409 # Check whether the last effective letter of @line is a backslash
410 last_is_backslash() {
411         local line="$*"
412         declare -i i
413         declare -i length
414         local letter last_letter
415
416         length=${#line}
417         for ((i = ${length}-1; i >= 0; i--)); do
418                 letter=${line:${i}:1}
419                 [ "x${letter}" != "x " -a "x${letter}" != "x    " -a -n "${letter}" ]\
420                 && last_letter=${letter} && break
421         done
422
423         [ "x${last_letter}" = "x\\" ] && return 0
424
425         return 1
426 }
427
428 # get_module_opts hostname
429 # Get the network module options from the node @hostname 
430 get_module_opts() {
431         local host_name=$1
432         local ret_str
433         local MODULE_CONF KERNEL_VER
434         local ret_line line find_options
435         local continue_flag
436
437         MODULE_OPTS=${DEFAULT_MOD_OPTS}
438
439         # Execute remote command to get the kernel version
440         ret_str=`${REMOTE} ${host_name} "uname -r" 2>&1`
441         if [ $? -ne 0 -a -n "${ret_str}" ]; then
442                 echo >&2 "`basename $0`: get_module_opts() error:" \
443                          "remote command error: ${ret_str}"
444                 return 1
445         fi
446         remote_error "get_module_opts" ${host_name} "${ret_str}" && return 1
447
448         if is_pdsh; then
449                 KERNEL_VER=`echo ${ret_str} | awk '{print $2}'`
450         else
451                 KERNEL_VER=`echo ${ret_str} | awk '{print $1}'`
452         fi
453
454         # Get the module configuration file name
455         if [ "${KERNEL_VER:0:3}" = "2.4" ]; then
456                 MODULE_CONF=/etc/modules.conf
457         else
458                 MODULE_CONF=/etc/modprobe.conf
459         fi
460
461         # Execute remote command to get the lustre network module options
462         continue_flag=false
463         find_options=false
464         while read -r ret_line; do
465                 if is_pdsh; then
466                         set -- ${ret_line}
467                         shift
468                         line="$*"
469                 else
470                         line="${ret_line}"
471                 fi
472
473                 # Get rid of the comment line
474                 [ -z "`echo \"${line}\"|egrep -v \"^#\"`" ] && continue
475
476                 if [ "${line}" != "${line#*options lnet*}" ]; then
477                         if ! ${find_options}; then
478                                 find_options=true
479                                 MODULE_OPTS=${line}
480                         else
481                                 MODULE_OPTS=${MODULE_OPTS}$" \n "${line}
482                         fi
483
484                         last_is_backslash "${line}" && continue_flag=true \
485                         || continue_flag=false
486                         continue
487                 fi      
488
489                 if ${continue_flag}; then
490                         MODULE_OPTS=${MODULE_OPTS}$" \n "${line}
491                         ! last_is_backslash "${line}" && continue_flag=false
492
493                 fi
494         done < <(${REMOTE} ${host_name} "cat ${MODULE_CONF}")
495
496         if [ -z "${MODULE_OPTS}" ]; then
497                 MODULE_OPTS=${DEFAULT_MOD_OPTS}
498         fi
499
500         return 0
501 }
502
503 #************************ HA software configurations ************************#
504 # is_ha_target hostname target_devname
505 # Check whether the target @target_devname was made to be high-available
506 is_ha_target() {
507         local host_name=$1
508         local target_svname=$2
509         local res_file
510         local ret_str
511
512         case "${HATYPE_OPT}" in
513         "${HBVER_HBV1}")        res_file=${HA_RES};;
514         "${HBVER_HBV2}")        res_file=${HA_CIB};;
515         "${HATYPE_CLUMGR}")     res_file=${CLUMAN_CONFIG};;
516         esac
517
518         # Execute remote command to check the resource file
519         ret_str=`${REMOTE} ${host_name} \
520                 "grep ${target_svname} ${res_file}" 2>&1`
521         if [ $? -ne 0 -a -n "${ret_str}" ]; then
522                 echo >&2 "`basename $0`: is_ha_target() error:" \
523                          "remote command error: ${ret_str}"
524                 return 1
525         fi
526
527         [ "${ret_str}" = "${ret_str#*${target_svname}*}" ] && return 1
528
529         return 0
530 }
531
532 # get_hb_configs hostname
533 # Get the Heartbeat configurations from the node @hostname
534 get_hb_configs() {
535         local host_name=$1
536         local ret_line line
537         declare -i i
538
539         unset HA_CONFIGS
540         HB_CHANNELS=
541         SRV_IPADDRS=
542         HB_OPTIONS=
543
544         # Execute remote command to get the configs of Heartbeat channels, etc
545         while read -r ret_line; do
546                 if is_pdsh; then
547                         set -- ${ret_line}
548                         shift
549                         line="$*"
550                 else
551                         line="${ret_line}"
552                 fi
553
554                 # Get rid of the comment line
555                 [ -z "`echo \"${line}\"|egrep -v \"^#\"`" ] && continue
556
557                 if [ "${line}" != "${line#*serial*}" ] \
558                 || [ "${line}" != "${line#*cast*}" ]; then
559                         if [ -z "${HB_CHANNELS}" ]; then
560                                 HB_CHANNELS=${line}
561                         else
562                                 HB_CHANNELS=${HB_CHANNELS}:${line}
563                         fi
564                 fi
565
566                 if [ "${line}" != "${line#*stonith*}" ] \
567                 || [ "${line}" != "${line#*ping*}" ] \
568                 || [ "${line}" != "${line#*respawn*}" ] \
569                 || [ "${line}" != "${line#*apiauth*}" ] \
570                 || [ "${line}" != "${line#*compression*}" ]; then
571                         if [ -z "${HB_OPTIONS}" ]; then
572                                 HB_OPTIONS=${line}
573                         else
574                                 HB_OPTIONS=${HB_OPTIONS}:${line}
575                         fi
576                 fi
577         done < <(${REMOTE} ${host_name} "cat ${HA_CF}")
578
579         if [ -z "${HB_CHANNELS}" ]; then
580                 echo >&2 "`basename $0`: get_hb_configs() error:" \
581                          "There are no heartbeat channel configs in ${HA_CF}" \
582                          "of host ${host_name} or ${HA_CF} does not exist!"
583                 return 0
584         fi
585
586         # Execute remote command to get Heartbeat service address
587         if [ "${HATYPE_OPT}" = "${HBVER_HBV1}" ]; then
588                 while read -r ret_line; do
589                         if is_pdsh; then
590                                 set -- ${ret_line}
591                                 shift
592                                 line="$*"
593                         else
594                                 line="${ret_line}"
595                         fi
596
597                         # Get rid of the empty line
598                         [ -z "`echo ${line}|awk '/[[:alnum:]]/ {print $0}'`" ]\
599                         && continue
600
601                         # Get rid of the comment line
602                         [ -z "`echo \"${line}\"|egrep -v \"^#\"`" ] && continue
603
604                         SRV_IPADDRS=`echo ${line} | awk '{print $2}'`
605                         [ -n "${SRV_IPADDRS}" ] \
606                         && [ "`echo ${line} | awk '{print $1}'`" = "${host_name}" ] && break
607                 done < <(${REMOTE} ${host_name} "cat ${HA_RES}")
608         
609                 if [ -z "${SRV_IPADDRS}" ]; then
610                         echo >&2 "`basename $0`: get_hb_configs() error: There"\
611                                  "are no service address in ${HA_RES} of host"\
612                                  "${host_name} or ${HA_RES} does not exist!"
613                         return 0
614                 fi
615         fi
616
617         # Construct HA configuration items 
618         for ((i = 0; i < ${#TARGET_DEVNAMES[@]}; i++)); do
619                 [ -z "${TARGET_DEVNAMES[i]}" ] && continue
620
621                 # Execute remote command to check whether this target service 
622                 # was made to be high-available
623                 if is_ha_target ${host_name} ${TARGET_DEVNAMES[i]}; then
624                         HA_CONFIGS[i]=${HB_CHANNELS},${SRV_IPADDRS},${HB_OPTIONS}
625                 fi
626         done
627
628         return 0
629 }
630
631 # get_cluman_channel hostname
632 # Get the Heartbeat channel of CluManager from the node @hostname
633 get_cluman_channel() {
634         local host_name=$1
635         local ret_line line
636         local cluman_channel=
637         local mcast_ipaddr
638
639         while read -r ret_line; do
640                 if is_pdsh; then
641                         set -- ${ret_line}
642                         shift
643                         line="$*"
644                 else
645                         line="${ret_line}"
646                 fi
647
648                 if [ "${line}" != "${line#*broadcast*}" ] \
649                 && [ "`echo ${line}|awk '{print $3}'`" = "yes" ]; then
650                         cluman_channel="broadcast"
651                         break
652                 fi
653
654                 if [ "${line}" != "${line#*multicast_ipaddress*}" ]; then
655                         mcast_ipaddr=`echo ${line}|awk '{print $3}'`
656                         if [ "${mcast_ipaddr}" != "225.0.0.11" ]; then
657                                 cluman_channel="multicast ${mcast_ipaddr}"
658                                 break
659                         fi
660                 fi
661         done < <(${REMOTE} ${host_name} "${CONFIG_CMD} --clumembd")
662
663         echo ${cluman_channel}
664         return 0
665 }
666
667 # get_cluman_srvaddr hostname target_svname
668 # Get the service IP addresses of @target_svname from the node @hostname 
669 get_cluman_srvaddr() {
670         local host_name=$1
671         local target_svname=$2
672         local ret_line line
673         local srvaddr cluman_srvaddr=
674
675         while read -r ret_line; do
676                 if is_pdsh; then
677                         set -- ${ret_line}
678                         shift
679                         line="$*"
680                 else
681                         line="${ret_line}"
682                 fi
683
684                 if [ "${line}" != "${line#*ipaddress = *}" ]; then
685                         srvaddr=`echo ${line}|awk '{print $3}'`
686                         if [ -z "${cluman_srvaddr}" ]; then
687                                 cluman_srvaddr=${srvaddr}                       
688                         else
689                                 cluman_srvaddr=${cluman_srvaddr}:${srvaddr}
690                         fi
691                 fi
692         done < <(${REMOTE} ${host_name} "${CONFIG_CMD} \
693                 --service=${target_svname} --service_ipaddresses")
694
695         if [ -z "${cluman_srvaddr}" ]; then
696                 echo "`basename $0`: get_cluman_srvaddr() error: Cannot" \
697                 "get the service IP addresses of ${target_svname} in" \
698                 "${host_name}! Check ${CONFIG_CMD} command!"
699                 return 1
700         fi
701
702         echo ${cluman_srvaddr}
703         return 0
704 }
705
706 # get_cluman_configs hostname
707 # Get the CluManager configurations from the node @hostname
708 get_cluman_configs() {
709         local host_name=$1
710         local ret_str
711         declare -i i
712
713         unset HA_CONFIGS
714
715         # Execute remote command to get the configs of CluManager
716         for ((i = 0; i < ${#TARGET_DEVNAMES[@]}; i++)); do
717                 HB_CHANNELS=
718                 SRV_IPADDRS=
719                 HB_OPTIONS=
720                 [ -z "${TARGET_DEVNAMES[i]}" ] && continue
721
722                 # Execute remote command to check whether this target service 
723                 # was made to be high-available
724                 ! is_ha_target ${host_name} ${TARGET_DEVNAMES[i]} && continue
725
726                 # Execute remote command to get Heartbeat channel
727                 HB_CHANNELS=$(get_cluman_channel ${host_name})
728                 if [ $? -ne 0 ]; then
729                         echo >&2 "${HB_CHANNELS}"
730                 fi
731
732                 # Execute remote command to get service IP address 
733                 SRV_IPADDRS=$(get_cluman_srvaddr ${host_name} \
734                               ${TARGET_SVNAMES[i]})
735                 if [ $? -ne 0 ]; then
736                         echo >&2 "${SRV_IPADDRS}"
737                         return 0
738                 fi
739
740                 HA_CONFIGS[i]=${HB_CHANNELS},${SRV_IPADDRS},${HB_OPTIONS}
741         done
742
743         return 0
744 }
745
746 # get_ha_configs hostname
747 # Get the HA software configurations from the node @hostname
748 get_ha_configs() {
749         local host_name=$1
750
751         unset HA_CONFIGS
752
753         if [ -z "${HATYPE_OPT}" ]; then
754                 return 0
755         fi
756
757         verbose_output "Collecting HA software configurations from host $1..."
758
759         case "${HATYPE_OPT}" in
760         "${HBVER_HBV1}" | "${HBVER_HBV2}") # Heartbeat
761                 if ! get_hb_configs ${host_name}; then
762                         return 1
763                 fi
764                 ;;
765         "${HATYPE_CLUMGR}") # CluManager
766                 if ! get_cluman_configs ${host_name}; then
767                         return 1
768                 fi
769                 ;;
770         esac
771
772         return 0
773 }
774
775 #*********************** Lustre targets configurations ***********************#
776
777 # is_failover_service target_svname
778 # Check whether a target service @target_svname is a failover service.
779 is_failover_service() {
780         local target_svname=$1
781         declare -i i
782
783         for ((i = 0; i < ${#ALL_TARGET_SVNAMES[@]}; i++)); do
784                 [ "${target_svname}" = "${ALL_TARGET_SVNAMES[i]}" ] && return 0
785         done
786
787         return 1
788 }
789
790 # get_svnames hostname
791 # Get the lustre target server obd names from the node @hostname
792 get_svnames(){
793         declare -i i
794         declare -i j
795         local host_name=$1
796         local ret_line line
797
798         # Initialize the TARGET_SVNAMES array
799         unset TARGET_SVNAMES
800         unset FAILOVER_FMTOPTS
801         
802         # Execute remote command to the node @hostname and figure out what
803         # lustre services are running.
804         i=0
805         j=${#ALL_TARGET_SVNAMES[@]}
806         while read -r ret_line; do
807                 if is_pdsh; then
808                         set -- ${ret_line}
809                         shift
810                         line="$*"
811                 else
812                         line="${ret_line}"
813                 fi
814
815                 if [ -z "`echo ${line} | grep ${MGS_TYPE}`" ] \
816                 && [ -z "`echo ${line} | grep ${MDT_TYPE}`" ] \
817                 && [ -z "`echo ${line} | grep ${OST_TYPE}`" ]; then
818                         continue
819                 fi
820
821                 # Get target server name
822                 TARGET_SVNAMES[i]=`echo ${line} | awk '{print $4}'`
823                 if [ -n "${TARGET_SVNAMES[i]}" ]; then
824                         if is_failover_service ${TARGET_SVNAMES[i]}; then
825                                 FAILOVER_FMTOPTS[i]="--noformat"
826                         fi
827                         ALL_TARGET_SVNAMES[j]=${TARGET_SVNAMES[i]}
828                         let "i += 1"
829                         let "j += 1"
830                 else
831                         echo >&2 "`basename $0`: get_svnames() error: Invalid"\
832                               "line in ${host_name}'s ${LUSTRE_PROC_DEVICES}"\
833                               "- \"${line}\"!"
834                         return 1
835                 fi
836         done < <(${REMOTE} ${host_name} "cat ${LUSTRE_PROC_DEVICES}")
837
838         if [ $i -eq 0 ]; then
839                 verbose_output "There are no lustre services running" \
840                                "on the node ${host_name}!"
841         fi
842
843         return 0
844
845
846 # is_loopdev devname
847 # Check whether a device @devname is a loop device or not
848 is_loopdev() {
849         local devname=$1
850
851         if [ -z "${devname}" ] || \
852         [ -z "`echo ${devname}|awk '/\/dev\/loop[[:digit:]]/ {print $0}'`" ]
853         then
854                 return 1
855         fi
856
857         return 0
858 }
859
860 # get_devname hostname svname
861 # Get the device name of lustre target @svname from node @hostname
862 get_devname() {
863         local host_name=$1
864         local target_svname=$2
865         local target_devname=
866         local ret_str
867         local target_type target_obdtype mntdev_file
868
869         if [ "${target_svname}" = "${MGS_SVNAME}" ]; then
870                 # Execute remote command to get the device name of mgs target
871                 ret_str=`${REMOTE} ${host_name} \
872                         "/sbin/findfs LABEL=${target_svname}" 2>&1`
873                 if [ $? -ne 0 -a -n "${ret_str}" ]; then
874                         if [ "${ret_str}" = "${ret_str#*Unable to resolve*}" ]
875                         then
876                                 echo "`basename $0`: get_devname() error:" \
877                                      "remote command error: ${ret_str}"
878                                 return 1
879                         fi
880                 fi
881
882                 if [ "${ret_str}" = "${ret_str#*Unable to resolve*}" ]; then
883                         if is_pdsh; then
884                                 target_devname=`echo ${ret_str} | awk '{print $2}'`
885                         else
886                                 target_devname=`echo ${ret_str} | awk '{print $1}'`
887                         fi
888                 fi
889         else    # Execute remote command to get the device name of mdt/ost target
890                 target_type=`echo ${target_svname} | cut -d - -f 2`
891                 target_obdtype=${target_type:0:3}_TYPE
892                 
893                 mntdev_file=${LUSTRE_PROC}/${!target_obdtype}/${target_svname}/mntdev
894
895                 ret_str=`${REMOTE} ${host_name} "cat ${mntdev_file}" 2>&1`
896                 if [ $? -ne 0 -a -n "${ret_str}" ]; then
897                         echo "`basename $0`: get_devname() error:" \
898                              "remote command error: ${ret_str}"
899                         return 1
900                 fi
901
902                 if [ "${ret_str}" != "${ret_str#*No such file*}" ]; then
903                         echo "`basename $0`: get_devname() error:"\
904                              "${mntdev_file} does not exist in ${host_name}!"
905                         return 1
906                 else
907                         if is_pdsh; then
908                                 target_devname=`echo ${ret_str} | awk '{print $2}'`
909                         else
910                                 target_devname=`echo ${ret_str} | awk '{print $1}'`
911                         fi
912                 fi
913         fi
914
915         echo ${target_devname}
916         return 0
917 }
918
919 # get_devsize hostname target_devname 
920 # Get the device size (KB) of @target_devname from node @hostname
921 get_devsize() {
922         local host_name=$1
923         local target_devname=$2
924         local target_devsize=
925         local ret_str
926
927         # Execute remote command to get the device size
928         ret_str=`${REMOTE} ${host_name} \
929                 "/sbin/blockdev --getsize ${target_devname}" 2>&1`
930         if [ $? -ne 0 -a -n "${ret_str}" ]; then
931                 echo "`basename $0`: get_devsize() error:" \
932                      "remote command error: ${ret_str}"
933                 return 1
934         fi
935
936         if is_pdsh; then
937                 target_devsize=`echo ${ret_str} | awk '{print $2}'`
938         else
939                 target_devsize=`echo ${ret_str} | awk '{print $1}'`
940         fi
941         
942         if [ -z "`echo ${target_devsize}|awk '/^[[:digit:]]/ {print $0}'`" ]
943         then
944                 echo "`basename $0`: get_devsize() error: can't" \
945                 "get device size of ${target_devname} in ${host_name}!"
946                 return 1
947         fi
948
949         let " target_devsize /= 2"
950
951         echo ${target_devsize}
952         return 0
953 }
954
955 # get_realdevname hostname loop_dev
956 # Get the real device name of loop device @loop_dev from node @hostname
957 get_realdevname() {
958         local host_name=$1
959         local loop_dev=$2
960         local target_devname=
961         local ret_str
962
963         # Execute remote command to get the real device name
964         ret_str=`${REMOTE} ${host_name} \
965                 "/sbin/losetup ${loop_dev}" 2>&1`
966         if [ $? -ne 0 -a -n "${ret_str}" ]; then
967                 echo "`basename $0`: get_realdevname() error:" \
968                      "remote command error: ${ret_str}"
969                 return 1
970         fi
971
972         if is_pdsh; then
973                 target_devname=`echo ${ret_str} | awk '{print $4}' \
974                                 | sed 's/^(//' | sed 's/)$//'`
975         else
976                 target_devname=`echo ${ret_str} | awk '{print $3}' \
977                                 | sed 's/^(//' | sed 's/)$//'`
978         fi
979
980         if [ "${ret_str}" != "${ret_str#*No such*}" ] \
981         || [ -z "${target_devname}" ]; then
982                 echo "`basename $0`: get_realdevname() error: can't" \
983                 "get info on device ${loop_dev} in ${host_name}!"
984                 return 1
985         fi
986
987         echo ${target_devname}
988         return 0
989 }
990
991 # get_mntpnt hostname target_devname
992 # Get the lustre target mount point from the node @hostname
993 get_mntpnt(){
994         local host_name=$1
995         local target_devname=$2
996         local mnt_point=
997         local ret_str
998
999         # Execute remote command to get the mount point
1000         ret_str=`${REMOTE} ${host_name} \
1001                 "cat /etc/mtab | grep ${target_devname}" 2>&1`
1002         if [ $? -ne 0 -a -n "${ret_str}" ]; then
1003                 echo "`basename $0`: get_mntpnt() error:" \
1004                      "remote command error: ${ret_str}"
1005                 return 1
1006         fi
1007
1008         if is_pdsh; then
1009                 mnt_point=`echo ${ret_str} | awk '{print $3}'`
1010         else
1011                 mnt_point=`echo ${ret_str} | awk '{print $2}'`
1012         fi
1013         
1014         if [ -z "${mnt_point}" ]; then
1015                 echo "`basename $0`: get_mntpnt() error: can't" \
1016                 "get the mount point of ${target_devname} in ${host_name}!"
1017                 return 1
1018         fi
1019
1020         echo ${mnt_point}
1021         return 0
1022 }
1023
1024 # get_devnames hostname
1025 # Get the lustre target device names, mount points
1026 # and loop device sizes from the node @hostname
1027 get_devnames(){
1028         declare -i i
1029         local host_name=$1
1030         local ret_line line
1031
1032         # Initialize the arrays
1033         unset TARGET_DEVNAMES
1034         unset TARGET_DEVSIZES
1035         unset TARGET_MNTPNTS
1036
1037         for ((i = 0; i < ${#TARGET_SVNAMES[@]}; i++)); do
1038                 TARGET_DEVNAMES[i]=$(get_devname ${host_name} \
1039                                      ${TARGET_SVNAMES[i]})
1040                 if [ $? -ne 0 ]; then
1041                         echo >&2 "${TARGET_DEVNAMES[i]}"
1042                         return 1
1043                 fi
1044
1045                 if [ -z "${TARGET_DEVNAMES[i]}" ]; then
1046                         if [ "${TARGET_SVNAMES[i]}" = "${MGS_SVNAME}" ]; then
1047                                 verbose_output "There exists combo mgs/mdt"\
1048                                                "target in ${host_name}."
1049                                 continue
1050                         else
1051                                 echo >&2 "`basename $0`: get_devname() error:"\
1052                                          "No device corresponding to target" \
1053                                          "${TARGET_SVNAMES[i]} in ${host_name}!"
1054                                 return 1
1055                         fi
1056                 fi
1057
1058                 # Get the mount point of the target
1059                 TARGET_MNTPNTS[i]=$(get_mntpnt ${host_name} \
1060                                      ${TARGET_DEVNAMES[i]})
1061                 if [ $? -ne 0 ]; then
1062                         echo >&2 "${TARGET_MNTPNTS[i]}"
1063                         return 1
1064                 fi
1065
1066                 # The target device is a loop device?
1067                 if [ -n "${TARGET_DEVNAMES[i]}" ] \
1068                 && is_loopdev ${TARGET_DEVNAMES[i]}; then 
1069                         # Get the device size
1070                         TARGET_DEVSIZES[i]=$(get_devsize ${host_name} \
1071                                              ${TARGET_DEVNAMES[i]})
1072                         if [ $? -ne 0 ]; then
1073                                 echo >&2 "${TARGET_DEVSIZES[i]}"
1074                                 return 1
1075                         fi
1076
1077                         # Get the real device name
1078                         TARGET_DEVNAMES[i]=$(get_realdevname ${host_name} \
1079                                              ${TARGET_DEVNAMES[i]})
1080                         if [ $? -ne 0 ]; then
1081                                 echo >&2 "${TARGET_DEVNAMES[i]}"
1082                                 return 1
1083                         fi
1084                 fi
1085         done
1086
1087         return 0
1088 }
1089
1090 # is_target target_svtype ldd_flags
1091 # Check the service type of a lustre target
1092 is_target() {
1093         case "$1" in
1094         "mdt") let "ret = $2 & LDD_F_SV_TYPE_MDT";;
1095         "ost") let "ret = $2 & LDD_F_SV_TYPE_OST";;
1096         "mgs") let "ret = $2 & LDD_F_SV_TYPE_MGS";;
1097         "*") 
1098                 echo >&2 "`basename $0`: is_target() error: Invalid" \
1099                 "target service type - \"$1\"!"
1100                 return 1
1101                 ;;
1102         esac
1103
1104         if [ ${ret} -eq 0 ]; then
1105                 return 1
1106         fi
1107
1108         return 0
1109 }
1110
1111 # get_devtype ldd_flags
1112 # Get the service type of a lustre target from @ldd_flags
1113 get_devtype() {
1114         local target_devtype=
1115
1116         if [ -z "${flags}" ]; then
1117                 echo "`basename $0`: get_devtype() error: Invalid" \
1118                         "ldd_flags - it's value is null!"
1119                 return 1
1120         fi
1121
1122         if is_target "mgs" $1; then
1123                 if is_target "mdt" $1; then
1124                         target_devtype="mgs|mdt"
1125                 else
1126                         target_devtype="mgs"
1127                 fi
1128         elif is_target "mdt" $1; then
1129                 target_devtype="mdt"
1130         elif is_target "ost" $1; then
1131                 target_devtype="ost"
1132         else
1133                 echo "`basename $0`: get_devtype() error: Invalid" \
1134                 "ldd_flags - \"$1\"!"
1135                 return 1
1136         fi
1137
1138         echo ${target_devtype}
1139         return 0
1140 }
1141
1142 # get_mntopts ldd_mount_opts
1143 # Get the user-specified lustre target mount options from @ldd_mount_opts
1144 get_mntopts() {
1145         local mount_opts=
1146         local ldd_mount_opts=$1
1147
1148         mount_opts="${ldd_mount_opts#${ALWAYS_MNTOPTS}}"
1149         mount_opts="${mount_opts#${MDT_MGS_ALWAYS_MNTOPTS}}"
1150         mount_opts="${mount_opts#${OST_ALWAYS_MNTOPTS}}"
1151         mount_opts="${mount_opts#${OST_DEFAULT_MNTOPTS}}"
1152         mount_opts="`echo \"${mount_opts}\" | sed 's/^,//'`"
1153
1154         [ "${mount_opts}" != "${mount_opts#*,*}" ] && echo "\""${mount_opts}"\"" \
1155         || echo ${mount_opts}
1156
1157         return 0
1158 }
1159
1160 # get_mgsnids ldd_params
1161 # Get the mgs nids of lustre target from @ldd_params
1162 get_mgsnids() {
1163         local mgs_nids=         # mgs nids in one mgs node
1164         local all_mgs_nids=     # mgs nids in all mgs failover nodes
1165         local param=
1166         local ldd_params="$*"
1167
1168         for param in ${ldd_params}; do
1169                 if [ -n "`echo ${param}|awk '/mgsnode=/ {print $0}'`" ]; then
1170                         mgs_nids=`echo ${param#${PARAM_MGSNODE}}`
1171
1172                         if [ -n "${all_mgs_nids}" ]; then
1173                                 all_mgs_nids=${all_mgs_nids}:${mgs_nids}
1174                         else
1175                                 all_mgs_nids=${mgs_nids}
1176                         fi
1177                 fi
1178         done
1179
1180         [ "${all_mgs_nids}" != "${all_mgs_nids#*,*}" ] \
1181         && echo "\""${all_mgs_nids}"\"" || echo ${all_mgs_nids}
1182
1183         return 0
1184 }
1185
1186 # get_failnids ldd_params
1187 # Get the failover nids of lustre target from @ldd_params
1188 get_failnids() {
1189         local fail_nids=        # failover nids in one failover node
1190         local all_fail_nids=    # failover nids in all failover nodes
1191                                 # of this target
1192         local param=
1193         local ldd_params="$*"
1194
1195         for param in ${ldd_params}; do
1196                 if [ -n "`echo ${param}|awk '/failover.node=/ {print $0}'`" ]; then
1197                         fail_nids=`echo ${param#${PARAM_FAILNODE}}`
1198
1199                         if [ -n "${all_fail_nids}" ]; then
1200                                 all_fail_nids=${all_fail_nids}:${fail_nids}
1201                         else
1202                                 all_fail_nids=${fail_nids}
1203                         fi
1204                 fi
1205         done
1206
1207         [ "${all_fail_nids}" != "${all_fail_nids#*,*}" ] \
1208         && echo "\""${all_fail_nids}"\"" || echo ${all_fail_nids}
1209
1210         return 0
1211 }
1212
1213 # get_fmtopts target_devname hostname ldd_params
1214 # Get other format options of the lustre target @target_devname from @ldd_params
1215 get_fmtopts() {
1216         local target_devname=$1
1217         local host_name=$2
1218         shift
1219         shift
1220         local ldd_params="$*"
1221         local param= 
1222         local fmt_opts=
1223
1224         for param in ${ldd_params}; do
1225                 [ -n "`echo ${param}|awk '/mgsnode=/ {print $0}'`" ] && continue
1226                 [ -n "`echo ${param}|awk '/failover.node=/ {print $0}'`" ] && continue
1227
1228                 if [ -n "${param}" ]; then
1229                         if [ -n "${fmt_opts}" ]; then
1230                                 fmt_opts=${fmt_opts}" --param=\""${param}"\""
1231                         else
1232                                 fmt_opts="--param=\""${param}"\""
1233                         fi
1234                 fi
1235         done
1236
1237         echo ${fmt_opts}
1238         return 0
1239 }
1240
1241 # get_stripecount host_name target_fsname
1242 # Get the stripe count for @target_fsname
1243 get_stripecount() {
1244         local host_name=$1
1245         local target_fsname=$2
1246         local stripe_count=
1247         local stripecount_file
1248         local ret_str
1249
1250         # Get the stripe count
1251         stripecount_file=${LUSTRE_PROC}/lov/${target_fsname}-mdtlov/stripecount
1252         ret_str=`${REMOTE} ${host_name} "cat ${stripecount_file}" 2>&1`
1253         if [ $? -ne 0 -a -n "${ret_str}" ]; then
1254                 echo "`basename $0`: get_stripecount() error:" \
1255                 "remote command to ${host_name} error: ${ret_str}"
1256                 return 1
1257         fi
1258
1259         if is_pdsh; then
1260                 stripe_count=`echo ${ret_str} | awk '{print $2}'`
1261         else
1262                 stripe_count=`echo ${ret_str} | awk '{print $1}'`
1263         fi
1264
1265         if [ -z "`echo ${stripe_count}|awk '/^[[:digit:]]/ {print $0}'`" ]
1266         then
1267                 echo "`basename $0`: get_stripecount() error: can't" \
1268                 "get stripe count of ${target_fsname} in ${host_name}!"
1269                 return 1
1270         fi
1271
1272         echo ${stripe_count}
1273         return 0
1274 }
1275
1276 # get_stripecount_opt host_name target_fsname
1277 # Get the stripe count option for lustre mdt target
1278 get_stripecount_opt() {
1279         local host_name=$1
1280         local target_fsname=$2
1281         local stripe_count=
1282         local stripecount_opt=
1283
1284         # Get the stripe count
1285         [ -z "${target_fsname}" ] && target_fsname="lustre"
1286         stripe_count=$(get_stripecount ${host_name} ${target_fsname})
1287         if [ $? -ne 0 ]; then
1288                 echo "${stripe_count}"
1289                 return 1
1290         fi
1291         
1292         if [ "${stripe_count}" != "1" ]; then
1293                 stripecount_opt=${OPTSTR_STRIPE_COUNT}${stripe_count}
1294         fi
1295
1296         echo ${stripecount_opt}
1297         return 0
1298 }
1299
1300 # get_ldds hostname
1301 # Get the lustre target disk data from the node @hostname
1302 get_ldds(){
1303         declare -i i
1304         local host_name=$1
1305         local ret_line line
1306         local flags mnt_opts params
1307         local stripecount_opt
1308
1309         # Initialize the arrays
1310         unset TARGET_DEVTYPES TARGET_FSNAMES TARGET_MGSNIDS TARGET_INDEXES
1311         unset TARGET_FMTOPTS  TARGET_MNTOPTS TARGET_FAILNIDS
1312         
1313         # Get lustre target device type, fsname, index, etc.
1314         # from MOUNT_DATA_FILE. Using tunefs.lustre to read it.
1315         for ((i = 0; i < ${#TARGET_DEVNAMES[@]}; i++)); do
1316                 flags=
1317                 mnt_opts=
1318                 params=
1319                 stripecount_opt=
1320                 [ -z "${TARGET_DEVNAMES[i]}" ] && continue
1321
1322                 # Execute remote command to read MOUNT_DATA_FILE
1323                 while read -r ret_line; do
1324                         if is_pdsh; then
1325                                 set -- ${ret_line}
1326                                 shift
1327                                 line="$*"
1328                         else
1329                                 line="${ret_line}"
1330                         fi
1331
1332                         if [ -n "`echo ${line}|awk '/Index:/ {print $0}'`" ]; then
1333                                 TARGET_INDEXES[i]=`echo ${line}|awk '{print $2}'`
1334                                 continue
1335                         fi
1336
1337                         if [ -n "`echo ${line}|awk '/Lustre FS:/ {print $0}'`" ]; then
1338                                 TARGET_FSNAMES[i]=`echo ${line}|awk '{print $3}'`
1339                                 continue
1340                         fi
1341                         
1342                         if [ -n "`echo ${line}|awk '/Flags:/ {print $0}'`" ]; then
1343                                 flags=`echo ${line}|awk '{print $2}'`
1344                                 continue
1345                         fi
1346
1347                         if [ -n "`echo ${line}|awk '/Persistent mount opts:/ {print $0}'`" ]; then
1348                                 mnt_opts=`echo ${line}|awk '{print $0}'`
1349                                 mnt_opts=`echo ${mnt_opts#Persistent mount opts: }`
1350                                 continue
1351                         fi
1352
1353                         if [ -n "`echo ${line}|awk '/Parameters:/ {print $0}'`" ]; then
1354                                 params=`echo ${line}|awk '{print $0}'`
1355                                 params=`echo ${params#Parameters:}`
1356                                 break
1357                         fi
1358                 done < <(${REMOTE} ${host_name} "${TUNEFS} --print --verbose ${TARGET_DEVNAMES[i]} 2>/dev/null")
1359
1360                 if [ -z "${flags}" ]; then
1361                         echo >&2 "`basename $0`: get_ldds() error: Invalid" \
1362                                  "ldd_flags of target ${TARGET_DEVNAMES[i]}" \
1363                                  "in host ${host_name} - it's value is null!"\
1364                                  "Check ${TUNEFS} command!"
1365                         return 1
1366                 fi
1367                 
1368                 if [ "${TARGET_INDEXES[i]}" = "unassigned" ] \
1369                 || is_target "mgs" ${flags}; then
1370                         TARGET_INDEXES[i]=
1371                 fi
1372
1373                 [ "${TARGET_FSNAMES[i]}" = "lustre" ] && TARGET_FSNAMES[i]=
1374
1375                 # Get the lustre target service type
1376                 TARGET_DEVTYPES[i]=$(get_devtype ${flags})
1377                 if [ $? -ne 0 ]; then
1378                         echo >&2 "${TARGET_DEVTYPES[i]} From device" \
1379                         "${TARGET_DEVNAMES[i]} in host ${host_name}!"
1380                         return 1
1381                 fi
1382
1383                 # Get the lustre target mount options
1384                 TARGET_MNTOPTS[i]=$(get_mntopts "${mnt_opts}")
1385
1386                 # Get mgs nids of the lustre target
1387                 TARGET_MGSNIDS[i]=$(get_mgsnids "${params}")
1388
1389                 # Get failover nids of the lustre target
1390                 TARGET_FAILNIDS[i]=$(get_failnids "${params}")
1391                 if [ $? -ne 0 ]; then
1392                         echo >&2 "${TARGET_FAILNIDS[i]} From device" \
1393                         "${TARGET_DEVNAMES[i]} in host ${host_name}!"
1394                         return 1
1395                 fi
1396
1397                 # Get other format options of the lustre target
1398                 TARGET_FMTOPTS[i]=$(get_fmtopts ${TARGET_DEVNAMES[i]} ${host_name} "${params}")
1399                 if [ $? -ne 0 ]; then
1400                         echo >&2 "${TARGET_FMTOPTS[i]}"
1401                         return 1
1402                 fi
1403
1404                 if [ -n "${TARGET_DEVSIZES[i]}" ]; then
1405                         if [ -n "${TARGET_FMTOPTS[i]}" ]; then
1406                                 TARGET_FMTOPTS[i]="--device-size=${TARGET_DEVSIZES[i]} ""${TARGET_FMTOPTS[i]}"
1407                         else
1408                                 TARGET_FMTOPTS[i]="--device-size=${TARGET_DEVSIZES[i]}"
1409                         fi
1410                 fi
1411
1412                 if [ -n "${FAILOVER_FMTOPTS[i]}" ]; then
1413                         if [ -n "${TARGET_FMTOPTS[i]}" ]; then
1414                                 TARGET_FMTOPTS[i]=${TARGET_FMTOPTS[i]}" "${FAILOVER_FMTOPTS[i]}
1415                         else
1416                                 TARGET_FMTOPTS[i]=${FAILOVER_FMTOPTS[i]}
1417                         fi
1418                 fi
1419
1420                 if is_target "mdt" ${flags}; then
1421                         # Get the stripe count option
1422                         stripecount_opt=$(get_stripecount_opt ${host_name} ${TARGET_FSNAMES[i]})
1423                         if [ $? -ne 0 ]; then
1424                                 echo >&2 "${stripecount_opt}"
1425                                 return 1
1426                         fi
1427
1428                         if [ -n "${stripecount_opt}" ]; then
1429                                 if [ -n "${TARGET_FMTOPTS[i]}" ]; then
1430                                         TARGET_FMTOPTS[i]=${TARGET_FMTOPTS[i]}" "${stripecount_opt}
1431                                 else
1432                                         TARGET_FMTOPTS[i]=${stripecount_opt}
1433                                 fi
1434                         fi
1435                 fi
1436
1437                 if [ "${TARGET_FMTOPTS[i]}" != "${TARGET_FMTOPTS[i]#*,*}" ]; then
1438                         TARGET_FMTOPTS[i]="\""${TARGET_FMTOPTS[i]}"\""
1439                 fi
1440         done
1441
1442         return 0
1443 }
1444
1445 # get_journalsize target_devname hostname
1446 # Get the journal size of lustre target @target_devname from @hostname
1447 get_journalsize() {
1448         local target_devname=$1
1449         local host_name=$2
1450         local journal_inode= 
1451         local journal_size=
1452         local ret_str
1453
1454         # Execute remote command to get the journal inode number
1455         ret_str=`${REMOTE} ${host_name} "/sbin/debugfs -R 'stats -h' \
1456                  ${target_devname} | grep 'Journal inode:'" 2>&1`
1457         if [ $? -ne 0 -a -n "${ret_str}" ]; then
1458                 echo "`basename $0`: get_journalsize() error:" \
1459                      "remote command error: ${ret_str}"
1460                 return 1
1461         fi
1462
1463         ret_str=${ret_str#${ret_str%Journal inode:*}}
1464         journal_inode=`echo ${ret_str} | awk '{print $3}'`
1465         if [ -z "`echo ${journal_inode}|awk '/^[[:digit:]]/ {print $0}'`" ]
1466         then
1467                 echo "`basename $0`: get_journalsize() error: can't" \
1468                 "get journal inode of ${target_devname} in ${host_name}!"
1469                 return 1
1470         fi
1471
1472         # Execute remote command to get the journal size
1473         ret_str=`${REMOTE} ${host_name} "/sbin/debugfs -R \
1474                 'stat <${journal_inode}>' ${target_devname}|grep '^User:'" 2>&1`
1475         if [ $? -ne 0 -a -n "${ret_str}" ]; then
1476                 echo "`basename $0`: get_journalsize() error:" \
1477                      "remote command error: ${ret_str}"
1478                 return 1
1479         fi
1480
1481         ret_str=${ret_str#${ret_str%User:*}}
1482         journal_size=`echo ${ret_str} | awk '{print $6}'`
1483         if [ -z "`echo ${journal_size}|awk '/^[[:digit:]]/ {print $0}'`" ]
1484         then
1485                 echo "`basename $0`: get_journalsize() error: can't" \
1486                 "get journal size of ${target_devname} in ${host_name}!"
1487                 return 1
1488         fi
1489
1490         let "journal_size /= 1024*1024" # MB
1491
1492         echo ${journal_size}
1493         return 0
1494 }
1495
1496 # get_defaultjournalsize target_devsize
1497 # Calculate the default journal size from target device size @target_devsize
1498 get_defaultjournalsize() {
1499         declare -i target_devsize=$1
1500         declare -i journal_size=0 
1501         declare -i max_size base_size 
1502
1503         let "base_size = 1024*1024"
1504         if [ ${target_devsize} -gt ${base_size} ]; then  # 1GB
1505                 let "journal_size = target_devsize / 102400"
1506                 let "journal_size *= 4"
1507         fi
1508
1509         let "max_size = 102400 * L_BLOCK_SIZE"
1510         let "max_size >>= 20" # 400MB
1511
1512         if [ ${journal_size} -gt ${max_size} ]; then
1513                 let "journal_size = max_size"
1514         fi
1515
1516         echo ${journal_size}
1517         return 0
1518 }
1519
1520 # figure_journal_size target_devname hostname
1521 # Find a reasonable journal file size given the number of blocks 
1522 # in the filesystem. This algorithm is derived from figure_journal_size()
1523 # function in util.c of e2fsprogs-1.38.cfs2-1.src.rpm.
1524 figure_journal_size() {
1525         local target_devname=$1
1526         local host_name=$2
1527         local ret_str
1528         declare -i block_count
1529         declare -i journal_blocks
1530         declare -i journal_size
1531
1532         # Execute remote command to get the block count 
1533         ret_str=`${REMOTE} ${host_name} "/sbin/debugfs -R 'stats -h' \
1534                  ${target_devname} | grep 'Block count:'" 2>&1`
1535         if [ $? -ne 0 -a -n "${ret_str}" ]; then
1536                 echo "`basename $0`: figure_journal_size() error:" \
1537                      "remote command error: ${ret_str}"
1538                 return 1
1539         fi
1540
1541         ret_str=${ret_str#${ret_str%Block count:*}}
1542         block_count=`echo ${ret_str} | awk '{print $3}'`
1543         if [ -z "`echo ${block_count}|awk '/^[[:digit:]]/ {print $0}'`" ]
1544         then
1545                 echo "`basename $0`: figure_journal_size() error: can't" \
1546                 "get block count of ${target_devname} in ${host_name}!"
1547                 return 1
1548         fi
1549
1550         if ((block_count < 32768)); then
1551                 let "journal_blocks = 1024"
1552         elif ((block_count < 256*1024)); then
1553                 let "journal_blocks = 4096"
1554         elif ((block_count < 512*1024)); then
1555                 let "journal_blocks = 8192"
1556         elif ((block_count < 1024*1024)); then
1557                 let "journal_blocks = 16384"
1558         else
1559                 let "journal_blocks = 32768"
1560         fi
1561
1562         let "journal_size = journal_blocks * L_BLOCK_SIZE / 1048576"
1563
1564         echo ${journal_size}
1565         return 0
1566 }
1567
1568 # get_J_opt hostname target_devname target_devsize
1569 # Get the mkfs -J option of lustre target @target_devname 
1570 # from the node @hostname
1571 get_J_opt() {
1572         local host_name=$1
1573         local target_devname=$2
1574         local target_devsize=$3
1575         local journal_size=
1576         local default_journal_size=
1577         local journal_opt=
1578
1579         # Get the real journal size of lustre target
1580         journal_size=$(get_journalsize ${target_devname} ${host_name})
1581         if [ $? -ne 0 ]; then
1582                 echo "${journal_size}"
1583                 return 1
1584         fi
1585
1586         # Get the default journal size of lustre target
1587         default_journal_size=$(get_defaultjournalsize ${target_devsize})
1588         if [ "${default_journal_size}" = "0" ]; then
1589                 default_journal_size=$(figure_journal_size ${target_devname} \
1590                                        ${host_name})
1591                 if [ $? -ne 0 ]; then
1592                         echo "${default_journal_size}"
1593                         return 1
1594                 fi
1595         fi
1596
1597         if [ "${journal_size}" != "${default_journal_size}" ]; then
1598                 journal_opt="-J size=${journal_size}"
1599         fi
1600                 
1601         echo ${journal_opt}
1602         return 0
1603 }
1604
1605 # get_ratio target_devname hostname
1606 # Get the bytes/inode ratio of lustre target @target_devname from @hostname
1607 get_ratio() {
1608         local target_devname=$1
1609         local host_name=$2
1610         local inode_count= 
1611         local block_count=
1612         local ratio=
1613         local ret_str
1614
1615         # Execute remote command to get the inode count
1616         ret_str=`${REMOTE} ${host_name} "/sbin/debugfs -R 'stats -h' \
1617                  ${target_devname} | grep 'Inode count:'" 2>&1`
1618         if [ $? -ne 0 -a -n "${ret_str}" ]; then
1619                 echo "`basename $0`: get_ratio() error:" \
1620                      "remote command error: ${ret_str}"
1621                 return 1
1622         fi
1623
1624         ret_str=${ret_str#${ret_str%Inode count:*}}
1625         inode_count=`echo ${ret_str} | awk '{print $3}'`
1626         if [ -z "`echo ${inode_count}|awk '/^[[:digit:]]/ {print $0}'`" ]
1627         then
1628                 echo "`basename $0`: get_ratio() error: can't" \
1629                 "get inode count of ${target_devname} in ${host_name}!"
1630                 return 1
1631         fi
1632
1633         # Execute remote command to get the block count
1634         ret_str=`${REMOTE} ${host_name} "/sbin/debugfs -R 'stats -h' \
1635                  ${target_devname} | grep 'Block count:'" 2>&1`
1636         if [ $? -ne 0 -a -n "${ret_str}" ]; then
1637                 echo "`basename $0`: get_ratio() error:" \
1638                      "remote command error: ${ret_str}"
1639                 return 1
1640         fi
1641
1642         ret_str=${ret_str#${ret_str%Block count:*}}
1643         block_count=`echo ${ret_str} | awk '{print $3}'`
1644         if [ -z "`echo ${block_count}|awk '/^[[:digit:]]/ {print $0}'`" ]
1645         then
1646                 echo "`basename $0`: get_ratio() error: can't" \
1647                 "get block count of ${target_devname} in ${host_name}!"
1648                 return 1
1649         fi
1650
1651         let "ratio = block_count*L_BLOCK_SIZE/inode_count"
1652
1653         echo ${ratio}
1654         return 0
1655 }
1656
1657 # get_default_ratio target_devtype target_devsize
1658 # Calculate the default bytes/inode ratio from target type @target_devtype
1659 get_default_ratio() {
1660         local target_devtype=$1
1661         declare -i target_devsize=$2
1662         local ratio=
1663
1664         case "${target_devtype}" in
1665         "mdt" | "mgs|mdt" | "mdt|mgs")
1666                 ratio=4096;;
1667         "ost")
1668                 [ ${target_devsize} -gt 1000000 ] && ratio=16384;;
1669         esac
1670
1671         [ -z "${ratio}" ] && ratio=${L_BLOCK_SIZE}
1672
1673         echo ${ratio}
1674         return 0
1675 }
1676
1677 # get_i_opt hostname target_devname target_devtype target_devsize
1678 # Get the mkfs -i option of lustre target @target_devname 
1679 # from the node @hostname
1680 get_i_opt() {
1681         local host_name=$1
1682         local target_devname=$2
1683         local target_devtype=$3
1684         local target_devsize=$4
1685         local ratio=
1686         local default_ratio=
1687         local ratio_opt=
1688
1689         # Get the real bytes/inode ratio of lustre target
1690         ratio=$(get_ratio ${target_devname} ${host_name})
1691         if [ $? -ne 0 ]; then
1692                 echo "${ratio}"
1693                 return 1
1694         fi
1695
1696         # Get the default bytes/inode ratio of lustre target
1697         default_ratio=$(get_default_ratio ${target_devtype} ${target_devsize})
1698
1699         if [ "${ratio}" != "${default_ratio}" ]; then
1700                 ratio_opt="-i ${ratio}"
1701         fi
1702                 
1703         echo ${ratio_opt}
1704         return 0
1705 }
1706
1707 # get_isize target_devname hostname
1708 # Get the inode size of lustre target @target_devname from @hostname
1709 get_isize() {
1710         local target_devname=$1
1711         local host_name=$2
1712         local inode_size= 
1713         local ret_str
1714
1715         # Execute remote command to get the inode size 
1716         ret_str=`${REMOTE} ${host_name} "/sbin/debugfs -R 'stats -h' \
1717                  ${target_devname} | grep 'Inode size:'" 2>&1`
1718         if [ $? -ne 0 -a -n "${ret_str}" ]; then
1719                 echo "`basename $0`: get_isize() error:" \
1720                      "remote command error: ${ret_str}"
1721                 return 1
1722         fi
1723
1724         ret_str=${ret_str#${ret_str%Inode size:*}}
1725         inode_size=`echo ${ret_str} | awk '{print $3}'`
1726         if [ -z "`echo ${inode_size}|awk '/^[[:digit:]]/ {print $0}'`" ]
1727         then
1728                 echo "`basename $0`: get_isize() error: can't" \
1729                 "get inode size of ${target_devname} in ${host_name}!"
1730                 return 1
1731         fi
1732
1733         echo ${inode_size}
1734         return 0
1735 }
1736
1737 # get_mdt_default_isize host_name target_fsname
1738 # Calculate the default inode size of lustre mdt target
1739 get_mdt_default_isize() {
1740         local host_name=$1
1741         local target_fsname=$2
1742         declare -i stripe_count
1743         local inode_size=
1744
1745         # Get the stripe count
1746         stripe_count=$(get_stripecount ${host_name} ${target_fsname})
1747         if [ $? -ne 0 ]; then
1748                 echo "${stripe_count}"
1749                 return 1
1750         fi
1751
1752         if ((stripe_count > 77)); then
1753                 inode_size=512
1754         elif ((stripe_count > 34)); then
1755                 inode_size=2048
1756         elif ((stripe_count > 13)); then
1757                 inode_size=1024
1758         else
1759                 inode_size=512
1760         fi
1761
1762         echo ${inode_size}
1763         return 0
1764 }
1765
1766 # get_default_isize host_name target_devtype target_fsname
1767 # Calculate the default inode size of lustre target type @target_devtype
1768 get_default_isize() {
1769         local host_name=$1
1770         local target_devtype=$2
1771         local target_fsname=$3
1772         local inode_size=
1773
1774         case "${target_devtype}" in
1775         "mdt" | "mgs|mdt" | "mdt|mgs")
1776                 inode_size=$(get_mdt_default_isize ${host_name} ${target_fsname})
1777                 if [ $? -ne 0 ]; then
1778                         echo "${inode_size}"
1779                         return 1
1780                 fi
1781                 ;;
1782         "ost")
1783                 inode_size=256;;
1784         esac
1785
1786         [ -z "${inode_size}" ] && inode_size=128
1787
1788         echo ${inode_size}
1789         return 0
1790 }
1791
1792 # get_I_opt hostname target_devname target_devtype target_fsname
1793 # Get the mkfs -I option of lustre target @target_devname 
1794 # from the node @hostname
1795 get_I_opt() {
1796         local host_name=$1
1797         local target_devname=$2
1798         local target_devtype=$3
1799         local target_fsname=$4
1800         local isize=
1801         local default_isize=
1802         local isize_opt=
1803
1804         # Get the real inode size of lustre target
1805         isize=$(get_isize ${target_devname} ${host_name})
1806         if [ $? -ne 0 ]; then
1807                 echo "${isize}"
1808                 return 1
1809         fi
1810
1811         # Get the default inode size of lustre target
1812         [ -z "${target_fsname}" ] && target_fsname="lustre"
1813         default_isize=$(get_default_isize ${host_name} ${target_devtype} \
1814                         ${target_fsname})
1815         if [ $? -ne 0 ]; then
1816                 echo "${default_isize}"
1817                 return 1
1818         fi
1819
1820         if [ "${isize}" != "${default_isize}" ]; then
1821                 isize_opt="-I ${isize}"
1822         fi
1823                 
1824         echo ${isize_opt}
1825         return 0
1826 }
1827
1828 # get_mkfsopts hostname
1829 # Get the mkfs options of lustre targets from the node @hostname
1830 get_mkfsopts(){
1831         declare -i i
1832         local host_name=$1
1833         local journal_opt
1834         local ratio_opt
1835         local inode_size_opt
1836
1837         # Initialize the arrays
1838         unset TARGET_MKFSOPTS
1839         
1840         # FIXME: Get other mkfs options of ext3/ldiskfs besides -J, -i and -I
1841         for ((i = 0; i < ${#TARGET_DEVNAMES[@]}; i++)); do
1842                 journal_opt=
1843                 ratio_opt=
1844                 inode_size_opt=
1845
1846                 [ -z "${TARGET_DEVNAMES[i]}" ] && continue
1847
1848                 if [ -z "${TARGET_DEVSIZES[i]}" ]; then
1849                         # Get the device size
1850                         TARGET_DEVSIZES[i]=$(get_devsize ${host_name} \
1851                                          ${TARGET_DEVNAMES[i]})
1852                         if [ $? -ne 0 ]; then
1853                                 echo >&2 "${TARGET_DEVSIZES[i]}"
1854                                 return 1
1855                         fi
1856                 fi
1857
1858                 # Get the journal option
1859                 journal_opt=$(get_J_opt ${host_name} ${TARGET_DEVNAMES[i]} \
1860                               ${TARGET_DEVSIZES[i]})
1861                 if [ $? -ne 0 ]; then
1862                         echo >&2 "${journal_opt}"
1863                         return 1
1864                 fi
1865
1866                 if [ -n "${journal_opt}" ]; then
1867                         if [ -z "${TARGET_MKFSOPTS[i]}" ]; then
1868                                 TARGET_MKFSOPTS[i]="${journal_opt}"
1869                         else
1870                                 TARGET_MKFSOPTS[i]=${TARGET_MKFSOPTS[i]}" ${journal_opt}"
1871                         fi
1872                 fi
1873                 
1874                 # Get the bytes-per-inode ratio option
1875                 ratio_opt=$(get_i_opt ${host_name} ${TARGET_DEVNAMES[i]} \
1876                             ${TARGET_DEVTYPES[i]} ${TARGET_DEVSIZES[i]})
1877                 if [ $? -ne 0 ]; then
1878                         echo >&2 "${ratio_opt}"
1879                         return 1
1880                 fi
1881
1882                 if [ -n "${ratio_opt}" ]; then
1883                         if [ -z "${TARGET_MKFSOPTS[i]}" ]; then
1884                                 TARGET_MKFSOPTS[i]="${ratio_opt}"
1885                         else
1886                                 TARGET_MKFSOPTS[i]=${TARGET_MKFSOPTS[i]}" ${ratio_opt}"
1887                         fi
1888                 fi
1889
1890                 # Get the inode size option
1891                 inode_size_opt=$(get_I_opt ${host_name} ${TARGET_DEVNAMES[i]} \
1892                                  ${TARGET_DEVTYPES[i]} ${TARGET_FSNAMES[i]})
1893                 if [ $? -ne 0 ]; then
1894                         echo >&2 "${inode_size_opt}"
1895                         return 1
1896                 fi
1897
1898                 if [ -n "${inode_size_opt}" ]; then
1899                         if [ -z "${TARGET_MKFSOPTS[i]}" ]; then
1900                                 TARGET_MKFSOPTS[i]="${inode_size_opt}"
1901                         else
1902                                 TARGET_MKFSOPTS[i]=${TARGET_MKFSOPTS[i]}" ${inode_size_opt}"
1903                         fi
1904                 fi
1905
1906                 if [ "${TARGET_MKFSOPTS[i]}" != "${TARGET_MKFSOPTS[i]#*,*}" ]; then
1907                         TARGET_MKFSOPTS[i]="\""${TARGET_MKFSOPTS[i]}"\""
1908                 fi
1909         done
1910         return 0
1911 }
1912
1913 # get_target_configs hostname
1914 # Get the lustre target informations from the node @hostname
1915 get_target_configs() {
1916         declare -i i
1917         local host_name=$1
1918         local ret_line line
1919
1920         # Initialize the arrays
1921         unset TARGET_CONFIGS
1922
1923         # Get lustre target server names
1924         if ! get_svnames ${host_name}; then
1925                 return 1
1926         fi
1927
1928         # Get lustre target device names, mount points and loop device sizes
1929         if ! get_devnames ${host_name}; then
1930                 return 1
1931         fi
1932
1933         # Get lustre target device type, fsname, index, etc.
1934         if ! get_ldds ${host_name}; then
1935                 return 1
1936         fi
1937
1938         # Get mkfs options of lustre targets
1939         if ! get_mkfsopts ${host_name}; then
1940                 return 1
1941         fi
1942
1943         # Construct lustre target configs
1944         for ((i = 0; i < ${#TARGET_DEVNAMES[@]}; i++)); do
1945                 [ -z "${TARGET_DEVNAMES[i]}" ] && continue
1946                 TARGET_CONFIGS[i]=${TARGET_DEVNAMES[i]},${TARGET_MNTPNTS[i]},${TARGET_DEVTYPES[i]},${TARGET_FSNAMES[i]},${TARGET_MGSNIDS[i]},${TARGET_INDEXES[i]},${TARGET_FMTOPTS[i]},${TARGET_MKFSOPTS[i]},${TARGET_MNTOPTS[i]},${TARGET_FAILNIDS[i]}
1947         done
1948
1949         return 0
1950 }
1951
1952 # get_configs hostname
1953 # Get all the informations needed to generate a csv file from 
1954 # the node @hostname
1955 get_configs() {
1956         # Check the hostname
1957         if [ -z "$1" ]; then
1958                 echo >&2 "`basename $0`: get_configs() error:" \
1959                          "Missing hostname!"
1960                 return 1
1961         fi
1962
1963         # Get network module options
1964         verbose_output ""
1965         verbose_output "Collecting network module options from host $1..."
1966         if ! get_module_opts $1; then
1967                 return 1
1968         fi
1969         verbose_output "OK"
1970
1971         # Get lustre target informations
1972         verbose_output "Collecting Lustre targets informations from host $1..."
1973         if ! get_target_configs $1; then
1974                 return 1
1975         fi
1976         verbose_output "OK"
1977
1978         # Get HA software configurations
1979         if ! get_ha_configs $1; then
1980                 return 1
1981         fi
1982
1983         return 0
1984 }
1985
1986 # Collect linux MD/LVM device informations from the lustre cluster and
1987 # append them to the csv file
1988 get_mdlvm_info() {
1989         declare -i idx
1990         declare -i i
1991         local line
1992
1993         # Collect and append linux MD/LVM informations to the csv file
1994         for ((idx = 0; idx < ${#HOST_NAMES[@]}; idx++)); do
1995                 [ -z "${HOST_NAMES[idx]}" ] && continue
1996
1997                 # Collect MD device informations
1998                 ! get_md_configs ${HOST_NAMES[idx]} && return 1
1999
2000                 # Append MD device informations to the csv file
2001                 for ((i = 0; i < ${#MD_NAME[@]}; i++)); do
2002                         line=${HOST_NAMES[idx]},${MD_MARKER},${MD_NAME[i]},,,${MD_LEVEL[i]},${MD_DEVS[i]}
2003                         verbose_output "Informations of MD device ${MD_NAME[i]}" \
2004                                        "in host ${HOST_NAMES[idx]} are as follows:"
2005                         verbose_output "${line}"
2006                         echo "${line}" >> ${LUSTRE_CSV_FILE}
2007                 done
2008
2009                 # Collect PV informations
2010                 ! get_pv_configs ${HOST_NAMES[idx]} && return 1
2011
2012                 # Append PV informations to the csv file
2013                 if [ -n "${PV_NAMES}" ]; then
2014                         line=${HOST_NAMES[idx]},${PV_MARKER},${PV_NAMES}
2015                         verbose_output "Informations of PVs" \
2016                                        "in host ${HOST_NAMES[idx]} are as follows:"
2017                         verbose_output "${line}"
2018                         echo "${line}" >> ${LUSTRE_CSV_FILE}
2019                 fi
2020
2021                 # Collect VG informations
2022                 ! get_vg_configs ${HOST_NAMES[idx]} && return 1
2023
2024                 # Append VG informations to the csv file
2025                 for ((i = 0; i < ${#VG_NAME[@]}; i++)); do
2026                         line=${HOST_NAMES[idx]},${VG_MARKER},${VG_NAME[i]},,,${VG_PVNAMES[i]}
2027                         verbose_output "Informations of VG ${VG_NAME[i]}" \
2028                                        "in host ${HOST_NAMES[idx]} are as follows:"
2029                         verbose_output "${line}"
2030                         echo "${line}" >> ${LUSTRE_CSV_FILE}
2031                 done
2032
2033                 # Collect LV informations
2034                 ! get_lv_configs ${HOST_NAMES[idx]} && return 1
2035
2036                 # Append LV informations to the csv file
2037                 for ((i = 0; i < ${#LV_NAME[@]}; i++)); do
2038                         line=${HOST_NAMES[idx]},${LV_MARKER},${LV_NAME[i]},,,${LV_SIZE[i]},${LV_VGNAME[i]}
2039                         verbose_output "Informations of LV /dev/${LV_VGNAME[i]}/${LV_NAME[i]}"\
2040                                        "in host ${HOST_NAMES[idx]} are as follows:"
2041                         verbose_output "${line}"
2042                         echo "${line}" >> ${LUSTRE_CSV_FILE}
2043                 done
2044         done
2045         return 0
2046 }
2047
2048 # Generate the csv file from the lustre cluster
2049 gen_csvfile() {
2050         declare -i idx
2051         declare -i i
2052         local line
2053
2054         # Get lustre cluster node names
2055         verbose_output "Collecting Lustre cluster node names..."
2056         if ! get_hostnames; then
2057                 return 1
2058         fi
2059         verbose_output "OK"
2060
2061         : > ${LUSTRE_CSV_FILE}
2062
2063         ${GET_MDLVM_INFO} && get_mdlvm_info
2064
2065         # Collect and append lustre target informations to the csv file
2066         for ((idx = 0; idx < ${#HOST_NAMES[@]}; idx++)); do
2067                 # Collect informations
2068                 if ! get_configs ${HOST_NAMES[idx]}; then
2069                         rm -f ${LUSTRE_CSV_FILE}
2070                         return 1
2071                 fi
2072
2073                 # Append informations to the csv file
2074                 for ((i = 0; i < ${#TARGET_DEVNAMES[@]}; i++)); do
2075                         [ -z "${TARGET_DEVNAMES[i]}" ] && continue
2076
2077                         if [ -z "${HA_CONFIGS[i]}" ]; then
2078                                 line=${HOST_NAMES[idx]},${MODULE_OPTS},${TARGET_CONFIGS[i]}
2079                         else
2080                                 line=${HOST_NAMES[idx]},${MODULE_OPTS},${TARGET_CONFIGS[i]},${HA_CONFIGS[i]}
2081                         fi
2082                         verbose_output "Informations of target ${TARGET_DEVNAMES[i]}" \
2083                                        "in host ${HOST_NAMES[idx]} are as follows:"
2084                         verbose_output "${line}"
2085                         echo "" >> ${LUSTRE_CSV_FILE}
2086                         echo "${line}" >> ${LUSTRE_CSV_FILE}
2087                 done
2088         done
2089
2090         return 0
2091 }
2092
2093 # Main flow
2094 echo "`basename $0`: ******** Generate csv file -- ${LUSTRE_CSV_FILE} START ********"
2095 if ! gen_csvfile; then
2096         exit 1
2097 fi
2098 echo "`basename $0`: ******** Generate csv file -- ${LUSTRE_CSV_FILE} OK **********"
2099
2100 exit 0