Whamcloud - gitweb
LU-814 test: automate NFS over lustre testing
[fs/lustre-release.git] / lustre / tests / parallel-scale-nfsv4.sh
1 #!/bin/bash
2 #
3 #set -vx
4
5 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
6 . $LUSTRE/tests/test-framework.sh
7 init_test_env $@
8 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
9 init_logging
10
11 . $LUSTRE/tests/setup-nfs.sh
12
13 # first unmount all the lustre client
14 cleanup_mount $MOUNT
15 # mount lustre on mds
16 lustre_client=$(facet_active_host $SINGLEMDS)
17 zconf_mount_clients $lustre_client $MOUNT \
18     "-o user_xattr,acl,flock,32bitapi" || \
19     error "mount lustre on $lustre_client failed"
20
21 # setup the nfs
22 if ! setup_nfs "4" "$MOUNT" "$lustre_client" "$CLIENTS"; then
23     error_noexit false "setup nfs failed!"
24     cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
25         error_noexit false "failed to cleanup nfs"
26     if ! zconf_umount_clients $lustre_client $MOUNT force; then
27         error_noexit false "failed to umount lustre on $lustre_client"
28     elif ! zconf_mount_clients $CLIENTS $MOUNT; then
29         error_noexit false "failed to mount lustre after nfs test"
30     fi
31     check_and_cleanup_lustre
32     exit
33 fi
34
35 NFSCLIENT=yes
36 FAIL_ON_ERROR=false
37
38 # common setup
39 #
40 MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
41 clients=${CLIENTS:-$HOSTNAME}
42 generate_machine_file $clients $MACHINEFILE || \
43     error "Failed to generate machine file"
44 num_clients=$(get_node_count ${clients//,/ })
45
46 # compilbench
47 #
48 cbench_DIR=${cbench_DIR:-"/usr/bin"}
49 cbench_IDIRS=${cbench_IDIRS:-4}
50 # FIXME: wiki page requirements is 30, do we really need 30 ?
51 cbench_RUNS=${cbench_RUNS:-4}
52
53 if [ "$SLOW" = "no" ]; then
54     cbench_IDIRS=2
55     cbench_RUNS=2
56 fi
57
58 #
59 # metabench
60 #
61 METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
62 mbench_NFILES=${mbench_NFILES:-30400}
63 [ "$SLOW" = "no" ] && mbench_NFILES=10000
64 # threads per client
65 mbench_THREADS=${mbench_THREADS:-4}
66
67 #
68 # connectathon
69 #
70 cnt_DIR=${cnt_DIR:-""}
71 cnt_NRUN=${cnt_NRUN:-10}
72 [ "$SLOW" = "no" ] && cnt_NRUN=2
73
74 #
75 # IOR
76 #
77 IOR=${IOR:-$(which IOR 2> /dev/null || true)}
78 # threads per client
79 ior_THREADS=${ior_THREADS:-2}
80 ior_iteration=${ior_iteration:-1}
81 ior_blockSize=${ior_blockSize:-6} # Gb
82 ior_xferSize=${ior_xferSize:-2m}
83 ior_type=${ior_type:-POSIX}
84 ior_DURATION=${ior_DURATION:-60} # minutes
85 [ "$SLOW" = "no" ] && ior_DURATION=30
86
87 # source the common file after all parameters are set to take affect
88 . $LUSTRE/tests/functions.sh
89
90 build_test_filter
91 check_and_setup_lustre
92
93 get_mpiuser_id $MPI_USER
94 MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
95 $GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
96
97 test_compilebench() {
98     run_compilebench
99 }
100 run_test compilebench "compilebench"
101
102 test_metabench() {
103     run_metabench
104 }
105 run_test metabench "metabench"
106
107 test_connectathon() {
108     run_connectathon
109 }
110 run_test connectathon "connectathon"
111
112 test_iorssf() {
113     run_ior "ssf"
114 }
115 run_test iorssf "iorssf"
116
117 test_iorfpp() {
118     run_ior "fpp"
119 }
120 run_test iorfpp "iorfpp"
121
122 # cleanup nfs
123 cleanup_nfs "$MOUNT" "$lustre_client" "$CLIENTS" || \
124     error_noexit false "cleanup_nfs failed"
125 if ! zconf_umount_clients $lustre_client $MOUNT force; then
126     error_noexit false "failed to umount lustre on $lustre_client"
127 elif ! zconf_mount_clients $CLIENTS $MOUNT; then
128     error_noexit false "failed to mount lustre after nfs test"
129 fi
130
131 complete $(basename $0) $SECONDS
132 check_and_cleanup_lustre
133 exit_status