The previous change to sanity test_136 to improve test reliability
on ZFS servers resulted in the test time increasing by about 8x
(from ~300s to ~2400s). Only wait for deletion and drop caches on
ZFS MDS nodes, and not on ldiskfs where this is not needed.
Test-Parameters: trivial
Test-Parameters: testlist=sanity env=ONLY=136,SLOW=yes,ONLY_MINUTES=30 fstype=zfs
Test-Parameters: testlist=sanity env=ONLY=136,SLOW=yes,ONLY_MINUTES=30
Fixes:
627cc62369 ("LU-18354 tests: avoid sanity/136 OOM on ZFS servers")
Signed-off-by: Andreas Dilger <adilger@whamcloud.com>
Change-Id: Ic5dc79f9b7e6c2df50a97d0447ef3aa9d3c73e1d
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/58199
Tested-by: Maloo <maloo@whamcloud.com>
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Reviewed-by: Olaf Faaland <faaland1@llnl.gov>
Reviewed-by: Timothy Day <timday@amazon.com>
ost_set_temp_seq_width_all $DATA_SEQ_MAX_WIDTH
# fill already existed 2 plain llogs each 64767 wrapping whole catalog,
- # drop server memory periodically to avoid OOM during testing
+ # drop server memory periodically only on ZFS to avoid OOM during test
local items=1000
for ((created = 0; created < 64767 * 5 / 2; created += items)); do
echo "$(date +%s): create $created-$((created + items - 1))"
- createmany -o -u $DIR/$tdir/$tfile- $items
- wait_delete_completed
- do_facet mds1 "echo 1 > /proc/sys/vm/drop_caches"
+ createmany -o -u $DIR/$tdir/$tfile- $created $items
+ if [[ "$mds1_FSTYPE" == "zfs" ]]; then
+ wait_delete_completed
+ do_facet mds1 "echo 1 > /proc/sys/vm/drop_caches"
+ fi
done
+ [[ "$FSTYPE" == "zfs" ]] || wait_delete_completed
createmany -o $DIR/$tdir/$tfile_ 10
sleep 25