Whamcloud - gitweb
LU-14755 tests: create custom pools 66/43966/2
authorElena Gryaznova <elena.gryaznova@hpe.com>
Thu, 10 Jun 2021 09:51:52 +0000 (12:51 +0300)
committerOleg Drokin <green@whamcloud.com>
Wed, 30 Jun 2021 03:17:12 +0000 (03:17 +0000)
We are interested in running some tests on fs with
the pools. The proposed enhancement allows to create
$FS_NPOOLS number of pools containing $FS_POOL_NOSTS
number of osts. If $FS_NPOOLS not set the number of
pools created is $OSTCOUNT / $FS_POOL_NOSTS.
Pools names are $FS_POOL based. Pools are not created if
FS_POOL not set.
Examples 1:
  FS_POOL=global OSTCOUNT=2
lustre.global0
OST lustre-OST0000_UUID
OST lustre-OST0001_UUID
Example 2:
  FS_POOL=global OSTCOUNT=6 FS_POOL_NOSTS=3
lustre.global0
OST lustre-OST0000_UUID
OST lustre-OST0001_UUID
OST lustre-OST0002_UUID
lustre.global1
OST lustre-OST0003_UUID
OST lustre-OST0004_UUID
OST lustre-OST0005_UUID
Example 3:
  FS_POOL=p OSTCOUNT=5 KEEP_POOLS=true FS_NPOOLS=7 FS_POOL_NOSTS=3
Pool: lustre.p0
lustre-OST0000_UUID
lustre-OST0001_UUID
lustre-OST0002_UUID
Pool: lustre.p1
lustre-OST0003_UUID
lustre-OST0004_UUID
lustre-OST0000_UUID
Pool: lustre.p2
lustre-OST0001_UUID
lustre-OST0002_UUID
lustre-OST0003_UUID
Pool: lustre.p3
lustre-OST0004_UUID
lustre-OST0000_UUID
lustre-OST0001_UUID
Pool: lustre.p4
lustre-OST0002_UUID
lustre-OST0003_UUID
lustre-OST0004_UUID
Pool: lustre.p5
lustre-OST0000_UUID
lustre-OST0001_UUID
lustre-OST0002_UUID
Pool: lustre.p6
lustre-OST0003_UUID
lustre-OST0004_UUID
lustre-OST0000_UUID

Patch adds the ability to remove all old pools at the
start if DELETE_OLD_POOLS set to true (default is false)
and the ability keep the new pools not deleted at the
end if KEEP_POOLS set to true (default is false).

Test-Parameters: trivial testlist=sanity-flr,ost-pools,ost-pools,sanity-pfl,sanity,sanityn
Signed-off-by: Elena Gryaznova <elena.gryaznova@hpe.com>
HPE-bug-id: LUS-8172
Reviewed-by: Sergey Cheremencev <sergey.cheremencev@hpe.com>
Reviewed-by: Vladimir Saveliev <vladimir.saveliev@hpe.com>
Change-Id: I73b72f9f39933b5b875978ce4fede5e9828c4c71
Reviewed-on: https://review.whamcloud.com/43966
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Vladimir Saveliev <vlaidimir.saveliev@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/tests/sanity.sh
lustre/tests/test-framework.sh

index 6861224..558373a 100755 (executable)
@@ -2755,7 +2755,8 @@ test_27I() {
        save_layout_restore_at_exit $MOUNT
        $LFS setstripe -c 2 -i 0 $MOUNT
        pool_add $pool || error "pool_add failed"
-       pool_add_targets $pool $ostrange || "pool_add_targets failed"
+       pool_add_targets $pool $ostrange ||
+               error "pool_add_targets failed"
        test_mkdir $DIR/$tdir
        $LFS setstripe -p $pool $DIR/$tdir
        $MULTIOP $DIR/$tdir/$tfile Oc || error "multiop failed"
index 32d1cb2..f5c2aba 100755 (executable)
@@ -428,6 +428,8 @@ init_test_env() {
 
        # Constants used in more than one test script
        export LOV_MAX_STRIPE_COUNT=2000
+       export DELETE_OLD_POOLS=${DELETE_OLD_POOLS:-false}
+       export KEEP_POOLS=${KEEP_POOLS:-false}
 
        export MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
        . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
@@ -5387,6 +5389,46 @@ is_mounted () {
        echo $mounted' ' | grep -w -q $mntpt' '
 }
 
+create_pools () {
+       local pool=$1
+       local ostsn=${2:-$OSTCOUNT}
+       local npools=${FS_NPOOLS:-$((OSTCOUNT / ostsn))}
+       local n
+
+       echo ostsn=$ostsn npools=$npools
+       if [[ $ostsn -gt $OSTCOUNT ]];  then
+               echo "request to use $ostsn OSTs in the pool, \
+                       using max available OSTCOUNT=$OSTCOUNT"
+               ostsn=$OSTCOUNT
+       fi
+       for (( n=0; n < $npools; n++ )); do
+               p=${pool}$n
+               if ! $DELETE_OLD_POOLS; then
+                       log "request to not delete old pools: $FSNAME.$p exist?"
+                       if ! check_pool_not_exist $FSNAME.$p; then
+                               echo "Using existing $FSNAME.$p"
+                               $LCTL pool_list $FSNAME.$p
+                               continue
+                       fi
+               fi
+               create_pool $FSNAME.$p $KEEP_POOLS ||
+                       error "create_pool $FSNAME.$p failed"
+
+               local first=$(( (n * ostsn) % OSTCOUNT ))
+               local last=$(( (first + ostsn - 1) % OSTCOUNT ))
+               if [[ $first -le $last ]]; then
+                       pool_add_targets $p $first $last ||
+                               error "pool_add_targets $p $first $last failed"
+               else
+                       pool_add_targets $p $first $(( OSTCOUNT - 1 )) ||
+                               error "pool_add_targets $p $first \
+                                       $(( OSTCOUNT - 1 )) failed"
+                       pool_add_targets $p 0 $last ||
+                               error "pool_add_targets $p 0 $last failed"
+               fi
+       done
+}
+
 check_and_setup_lustre() {
        sanitize_parameters
        nfs_client_mode && return
@@ -5472,6 +5514,13 @@ check_and_setup_lustre() {
                set_flavor_all $SEC
        fi
 
+       if $DELETE_OLD_POOLS; then
+               destroy_all_pools
+       fi
+       if [[ -n "$FS_POOL" ]]; then
+               create_pools $FS_POOL $FS_POOL_NOSTS
+       fi
+
        if [ "$ONLY" == "setup" ]; then
                exit 0
        fi
@@ -7648,6 +7697,7 @@ check_pool_not_exist() {
 create_pool() {
        local fsname=${1%%.*}
        local poolname=${1##$fsname.}
+       local keep_pools=${2:-false}
 
        stack_trap "destroy_test_pools $fsname" EXIT
        do_facet mgs lctl pool_new $1
@@ -7666,7 +7716,7 @@ create_pool() {
        wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
                2>/dev/null || echo foo" "" || error "pool_new failed $1"
 
-       add_pool_to_list $1
+       $keep_pools || add_pool_to_list $1
        return $RC
 }
 
@@ -7684,10 +7734,18 @@ remove_pool_from_list () {
        local poolname=${1##$fsname.}
 
        local listvar=${fsname}_CREATED_POOLS
-       local temp=${listvar}=$(exclude_items_from_list ${!listvar} $poolname)
+       local temp=${listvar}=$(exclude_items_from_list "${!listvar}" $poolname)
        eval export $temp
 }
 
+# cleanup all pools exist on $FSNAME
+destroy_all_pools () {
+       local i
+       for i in $(list_pool $FSNAME); do
+               destroy_pool $i
+       done
+}
+
 destroy_pool_int() {
        local ost
        local OSTS=$(list_pool $1)
@@ -7708,8 +7766,7 @@ destroy_pool() {
 
        local RC
 
-       check_pool_not_exist $fsname.$poolname
-       [[ $? -eq 0 ]] && return 0
+       check_pool_not_exist $fsname.$poolname && return 0 || true
 
        destroy_pool_int $fsname.$poolname
        RC=$?