X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=640e737341ba0ae782f2bee7a264faffad9df0e5;hp=8ff09036e4d1a20903fa6ef2156b6f5f6b5815df;hb=2ccb34d882b01305794e7780b6dd691179ddae7e;hpb=f426fb920e3b97298a603a142d56be89c12f2006 diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index 8ff0903..640e737 100644 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -37,8 +37,13 @@ if [ -f "$EXCEPT_LIST_FILE" ]; then . $EXCEPT_LIST_FILE fi -[ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] && MODPROBECONF=/etc/modprobe.conf -[ -z "$MODPROBECONF" -a -f /etc/modprobe.d/Lustre ] && MODPROBECONF=/etc/modprobe.d/Lustre +# check config files for options in decreasing order of preference +[ -z "$MODPROBECONF" -a -f /etc/modprobe.d/lustre.conf ] && + MODPROBECONF=/etc/modprobe.d/lustre.conf +[ -z "$MODPROBECONF" -a -f /etc/modprobe.d/Lustre ] && + MODPROBECONF=/etc/modprobe.d/Lustre +[ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] && + MODPROBECONF=/etc/modprobe.conf assert_DIR () { local failed="" @@ -125,13 +130,6 @@ init_test_env() { export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after each test suite export FSCK_MAX_ERR=4 # File system errors left uncorrected - # This is used by a small number of tests to share state between the client - # running the tests, or in some cases between the servers (e.g. lfsck.sh). - # It needs to be a non-lustre filesystem that is available on all the nodes. - export SHARED_DIRECTORY=${SHARED_DIRECTORY:-"/tmp"} - export MDSDB=${MDSDB:-$SHARED_DIRECTORY/mdsdb} - export OSTDB=${OSTDB:-$SHARED_DIRECTORY/ostdb} - #[ -d /r ] && export ROOT=${ROOT:-/r} export TMP=${TMP:-$ROOT/tmp} export TESTSUITELOG=${TMP}/${TESTSUITE}.log @@ -139,7 +137,7 @@ init_test_env() { export LOGDIR=${LOGDIR:-${TMP}/test_logs/}/$(date +%s) export LOGDIRSET=true fi - export HOSTNAME=${HOSTNAME:-`hostname`} + export HOSTNAME=${HOSTNAME:-$(hostname -s)} if ! echo $PATH | grep -q $LUSTRE/utils; then export PATH=$LUSTRE/utils:$PATH fi @@ -258,6 +256,13 @@ init_test_env() { rm -f $TMP/*active } +kernel_version() { + echo -n $((($1 << 16) | ($2 << 8) | $3)) +} + +export LINUX_VERSION=$(uname -r | sed -e "s/[-.]/ /3" -e "s/ .*//") +export LINUX_VERSION_CODE=$(kernel_version ${LINUX_VERSION//\./ }) + case `uname -r` in 2.4.*) EXT=".o"; USE_QUOTA=no; [ ! "$CLIENTONLY" ] && FSTYPE=ext3;; *) EXT=".ko"; USE_QUOTA=yes;; @@ -371,10 +376,15 @@ load_modules_local() { load_module lov/lov load_module mgc/mgc if ! client_only; then - grep -q crc16 /proc/kallsyms || { modprobe crc16 2>/dev/null || true; } - grep -q -w jbd /proc/kallsyms || { modprobe jbd 2>/dev/null || true; } - grep -q -w jbd2 /proc/kallsyms || { modprobe jbd2 2>/dev/null || true; } - [ "$FSTYPE" = "ldiskfs" ] && load_module ../ldiskfs/ldiskfs/ldiskfs + SYMLIST=/proc/kallsyms + grep -q crc16 $SYMLIST || { modprobe crc16 2>/dev/null || true; } + grep -q -w jbd $SYMLIST || { modprobe jbd 2>/dev/null || true; } + grep -q -w jbd2 $SYMLIST || { modprobe jbd2 2>/dev/null || true; } + if [ "$FSTYPE" = "ldiskfs" ]; then + grep -q exportfs_decode_fh $SYMLIST || + { modprobe exportfs 2> /dev/null || true; } + load_module ../ldiskfs/ldiskfs/ldiskfs + fi load_module mgs/mgs load_module mds/mds load_module mdd/mdd @@ -581,7 +591,7 @@ ostdevlabel() { set_debug_size () { local dz=${1:-$DEBUG_SIZE} - local cpus=$(getconf _NPROCESSORS_CONF) + local cpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/possible)+1)) # bug 19944, adjust size to be -gt num_possible_cpus() # promise 2MB for every cpu at least @@ -607,12 +617,12 @@ set_default_debug_nodes () { local nodes=$1 if [[ ,$nodes, = *,$HOSTNAME,* ]]; then - nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME") - set_default_debug + nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME") + set_default_debug fi [[ -n $nodes ]] && do_rpc_nodes $nodes set_default_debug \ - \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true + \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true } set_default_debug_facet () { @@ -691,7 +701,7 @@ stop() { local mntpt=$(facet_mntpt $facet) running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts") || true if [ ${running} -ne 0 ]; then - echo "Stopping $mntpt (opts:$@)" + echo "Stopping $mntpt (opts:$@) on $HOST" do_facet ${facet} umount -d $@ $mntpt fi @@ -1256,7 +1266,8 @@ wait_update () { while [ true ]; do RESULT=$(do_node $node "$TEST") if [ "$RESULT" == "$FINAL" ]; then - echo "Updated after $WAIT sec: wanted '$FINAL' got '$RESULT'" + [ -z "$RESULT" -o $WAIT -le $sleep ] || + echo "Updated after ${WAIT}s: wanted '$FINAL' got '$RESULT'" return 0 fi [ $WAIT -ge $MAX ] && break @@ -1264,7 +1275,7 @@ wait_update () { WAIT=$((WAIT + sleep)) sleep $sleep done - echo "Update not seen after $MAX sec: wanted '$FINAL' got '$RESULT'" + echo "Update not seen after ${MAX}s: wanted '$FINAL' got '$RESULT'" return 3 } @@ -1517,6 +1528,14 @@ facet_failover() { echo "Failing $facet on node $host" + # Make sure the client data is synced to disk. LU-924 + # + # We don't write client data synchrnously (to avoid flooding sync writes + # when there are many clients connecting), so if the server reboots before + # the client data reachs disk, the client data will be lost and the client + # will be evicted after recovery, which is not what we expected. + do_facet $facet "sync; sync; sync" + local affected=$(affected_facets $facet) shutdown_facet $facet @@ -1545,7 +1564,7 @@ obd_name() { replay_barrier() { local facet=$1 - do_facet $facet sync + do_facet $facet "sync; sync; sync" df $MOUNT # make sure there will be no seq change @@ -1561,7 +1580,7 @@ replay_barrier() { replay_barrier_nodf() { local facet=$1 echo running=${running} - do_facet $facet sync + do_facet $facet "sync; sync; sync" local svc=${facet}_svc echo Replay barrier on ${!svc} do_facet $facet $LCTL --device %${!svc} notransno @@ -1604,6 +1623,7 @@ fail_abort() { local facet=$1 stop $facet change_active $facet + wait_for_facet $facet mount_facet $facet -o abort_recovery clients_up || echo "first df failed: $?" clients_up || error "post-failover df: $?" @@ -1614,6 +1634,18 @@ do_lmc() { exit 1 } +host_nids_address() { + local nodes=$1 + local kind=$2 + + if [ -n "$kind" ]; then + nids=$(do_nodes $nodes "$LCTL list_nids | grep $kind | cut -f 1 -d '@'") + else + nids=$(do_nodes $nodes "$LCTL list_nids all | cut -f 1 -d '@'") + fi + echo $nids +} + h2name_or_ip() { if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else echo $1"@$2" @@ -1654,6 +1686,95 @@ h2o2ib() { } declare -fx h2o2ib +# This enables variables in cfg/"setup".sh files to support the pdsh HOSTLIST +# expressions format. As a bonus we can then just pass in those variables +# to pdsh. What this function does is take a HOSTLIST type string and +# expand it into a space deliminated list for us. +hostlist_expand() { + local hostlist=$1 + local offset=$2 + local myList + local item + local list + + [ -z "$hostlist" ] && return + + # Translate the case of [..],..,[..] to [..] .. [..] + list="${hostlist/],/] }" + front=${list%%[*} + [[ "$front" == *,* ]] && { + new="${list%,*} " + old="${list%,*}," + list=${list/${old}/${new}} + } + + for item in $list; do + # Test if we have any []'s at all + if [ "$item" != "${item/\[/}" ]; then { + # Expand the [*] into list + name=${item%%[*} + back=${item#*]} + + if [ "$name" != "$item" ]; then + group=${item#$name[*} + group=${group%%]*} + + for range in ${group//,/ }; do + begin=${range%-*} + end=${range#*-} + + # Number of leading zeros + padlen=${#begin} + padlen2=${#end} + end=$(echo $end | sed 's/0*//') + [[ -z "$end" ]] && end=0 + [[ $padlen2 -gt $padlen ]] && { + [[ $padlen2 -eq ${#end} ]] && padlen2=0 + padlen=$padlen2 + } + begin=$(echo $begin | sed 's/0*//') + [ -z $begin ] && begin=0 + + for num in $(seq -f "%0${padlen}g" $begin $end); do + value="${name#*,}${num}${back}" + [ "$value" != "${value/\[/}" ] && { + value=$(hostlist_expand "$value") + } + myList="$myList $value" + done + done + fi + } else { + myList="$myList $item" + } fi + done + myList=${myList//,/ } + myList=${myList:1} # Remove first character which is a space + + # Filter any duplicates without sorting + list="$myList " + myList="${list%% *}" + + while [[ "$list" != ${myList##* } ]]; do + list=${list//${list%% *} /} + myList="$myList ${list%% *}" + done + myList="${myList%* }"; + + # We can select an object at a offset in the list + [ $# -eq 2 ] && { + cnt=0 + for item in $myList; do + let cnt=cnt+1 + [ $cnt -eq $offset ] && { + myList=$item + } + done + [ $(get_node_count $myList) -ne 1 ] && myList="" + } + echo $myList +} + facet_host() { local facet=$1 @@ -2523,24 +2644,29 @@ run_e2fsck() { return 0 } +# verify a directory is shared among nodes. +check_shared_dir() { + local dir=$1 + + [ -z "$dir" ] && return 1 + do_rpc_nodes $(comma_list $(nodes_list)) check_logdir $dir + check_write_access $dir || return 1 + return 0 +} + # Run e2fsck on MDT and OST(s) to generate databases used for lfsck. generate_db() { local i local ostidx local dev - local tmp_file - [ $MDSCOUNT -eq 1 ] || error "CMD is not supported" - tmp_file=$(mktemp -p $SHARED_DIRECTORY || - error "fail to create file in $SHARED_DIRECTORY") + check_shared_dir $SHARED_DIRECTORY || + error "$SHARED_DIRECTORY isn't a shared directory" - # make sure everything gets to the backing store - local list=$(comma_list $CLIENTS $(facet_host $SINGLEMDS) $(osts_nodes)) - do_nodes $list "sync; sleep 2; sync" + export MDSDB=$SHARED_DIRECTORY/mdsdb + export OSTDB=$SHARED_DIRECTORY/ostdb - do_nodes $list ls $tmp_file || \ - error "$SHARED_DIRECTORY is not a shared directory" - rm $tmp_file + [ $MDSCOUNT -eq 1 ] || error "CMD is not supported" run_e2fsck $(mdts_nodes) $MDTDEV "--mdsdb $MDSDB" @@ -2739,15 +2865,18 @@ at_is_enabled() { fi } -at_max_get() { +at_get() { local facet=$1 + local at=$2 - # suppose that all ost-s has the same at_max set - if [ $facet == "ost" ]; then - do_facet ost1 "lctl get_param -n at_max" - else - do_facet $facet "lctl get_param -n at_max" - fi + # suppose that all ost-s have the same $at value set + [ $facet != "ost" ] || facet=ost1 + + do_facet $facet "lctl get_param -n $at" +} + +at_max_get() { + at_get $1 at_max } at_max_set() { @@ -2755,20 +2884,17 @@ at_max_set() { shift local facet + local hosts for facet in $@; do if [ $facet == "ost" ]; then - for i in `seq $OSTCOUNT`; do - do_facet ost$i "lctl set_param at_max=$at_max" - - done + facet=$(get_facets OST) elif [ $facet == "mds" ]; then - for i in `seq $MDSCOUNT`; do - do_facet mds$i "lctl set_param at_max=$at_max" - done - else - do_facet $facet "lctl set_param at_max=$at_max" + facet=$(get_facets MDS) fi + hosts=$(expand_list $hosts $(facets_hosts $facet)) done + + do_nodes $hosts lctl set_param at_max=$at_max } ################################## @@ -2942,7 +3068,7 @@ error_noexit() { # We need to dump the logs on all nodes if $dump; then - gather_logs $(comma_list $(nodes_list)) + gather_logs $(comma_list $(nodes_list)) 0 fi debugrestore @@ -3221,7 +3347,7 @@ run_one_logged() { echo log_sub_test_begin test_${1} - (run_one $1 "$2") 2>&1 | tee $test_log + (run_one $1 "$2") 2>&1 | tee -i $test_log local RC=${PIPESTATUS[0]} [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] && \ @@ -3307,7 +3433,22 @@ osc_to_ost() ostuuid_from_index() { - $LFS osts $2 | awk '/^'$1'/ { print $2 }' + $LFS osts $2 | sed -ne "/^$1: /s/.* \(.*\) .*$/\1/p" +} + +ostname_from_index() { + local uuid=$(ostuuid_from_index $1) + echo ${uuid/_UUID/} +} + +index_from_ostuuid() +{ + $LFS osts $2 | sed -ne "/${1}/s/\(.*\): .* .*$/\1/p" +} + +mdtuuid_from_index() +{ + $LFS mdts $2 | awk '/^'$1'/ { print $2 }' } remote_node () { @@ -3436,19 +3577,20 @@ remote_nodes_list () { init_clients_lists () { # Sanity check: exclude the local client from RCLIENTS - local rclients=$(echo " $RCLIENTS " | sed -re "s/\s+$HOSTNAME\s+/ /g") + local clients=$(hostlist_expand "$RCLIENTS") + local rclients=$(exclude_items_from_list "$clients" $HOSTNAME) # Sanity check: exclude the dup entries - rclients=$(for i in $rclients; do echo $i; done | sort -u) + RCLIENTS=$(for i in ${rclients//,/ }; do echo $i; done | sort -u) - local clients="$SINGLECLIENT $HOSTNAME $rclients" + clients="$SINGLECLIENT $HOSTNAME $RCLIENTS" # Sanity check: exclude the dup entries from CLIENTS # for those configs which has SINGLCLIENT set to local client clients=$(for i in $clients; do echo $i; done | sort -u) - CLIENTS=`comma_list $clients` - local -a remoteclients=($rclients) + CLIENTS=$(comma_list $clients) + local -a remoteclients=($RCLIENTS) for ((i=0; $i<${#remoteclients[@]}; i++)); do varname=CLIENT$((i + 2)) eval $varname=${remoteclients[i]} @@ -3779,10 +3921,7 @@ convert_facet2label() { } get_clientosc_proc_path() { - local ost=$1 - - # exclude -osc-M* - echo "${1}-osc-[!M]*" + echo "${1}-osc-[^M]*" } get_lustre_version () { @@ -3852,12 +3991,19 @@ get_osc_import_name() { _wait_import_state () { local expected=$1 local CONN_PROC=$2 - local maxtime=${3:-max_recovery_time} + local maxtime=${3:-$(max_recovery_time)} local CONN_STATE local i=0 CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) while [ "${CONN_STATE}" != "${expected}" ]; do + if [ "${expected}" == "DISCONN" ]; then + # for disconn we can check after proc entry is removed + [ "x${CONN_STATE}" == "x" ] && return 0 + # with AT enabled, we can have connect request timeout near of + # reconnect timeout and test can't see real disconnect + [ "${CONN_STATE}" == "CONNECTING" ] && return 0 + fi [ $i -ge $maxtime ] && \ error "can't put import for $CONN_PROC into ${expected} state after $i sec, have ${CONN_STATE}" && \ return 1 @@ -3873,43 +4019,78 @@ _wait_import_state () { wait_import_state() { local state=$1 local params=$2 - local maxtime=${3:-max_recovery_time} + local maxtime=${3:-$(max_recovery_time)} local param for param in ${params//,/ }; do _wait_import_state $state $param $maxtime || return done } + +# One client request could be timed out because server was not ready +# when request was sent by client. +# The request timeout calculation details : +# ptl_send_rpc () +# /* We give the server rq_timeout secs to process the req, and +# add the network latency for our local timeout. */ +# request->rq_deadline = request->rq_sent + request->rq_timeout + +# ptlrpc_at_get_net_latency(request) ; +# +# ptlrpc_connect_import () +# request->rq_timeout = INITIAL_CONNECT_TIMEOUT +# +# init_imp_at () -> +# -> at_init(&at->iat_net_latency, 0, 0) -> iat_net_latency=0 +# ptlrpc_at_get_net_latency(request) -> +# at_get (max (iat_net_latency=0, at_min)) = at_min +# +# i.e.: +# request->rq_timeout + ptlrpc_at_get_net_latency(request) = +# INITIAL_CONNECT_TIMEOUT + at_min +# +# We will use obd_timeout instead of INITIAL_CONNECT_TIMEOUT +# because we can not get this value in runtime, +# the value depends on configure options, and it is not stored in /proc. +# obd_support.h: +# #define CONNECTION_SWITCH_MIN 5U +# #ifndef CRAY_XT3 +# #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN,obd_timeout/20) +# #else +# #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN,obd_timeout/2) + +request_timeout () { + local facet=$1 + + # request->rq_timeout = INITIAL_CONNECT_TIMEOUT + local init_connect_timeout=$TIMEOUT + [[ $init_connect_timeout -ge 5 ]] || init_connect_timeout=5 + + local at_min=$(at_get $facet at_min) + + echo $(( init_connect_timeout + at_min )) +} + wait_osc_import_state() { local facet=$1 local ost_facet=$2 local expected=$3 local ost=$(get_osc_import_name $facet $ost_facet) - local CONN_PROC - local CONN_STATE - local i=0 - CONN_PROC="osc.${ost}.ost_server_uuid" - CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2) - while [ "${CONN_STATE}" != "${expected}" ]; do - if [ "${expected}" == "DISCONN" ]; then - # for disconn we can check after proc entry is removed - [ "x${CONN_STATE}" == "x" ] && return 0 - # with AT we can have connect request timeout ~ reconnect timeout - # and test can't see real disconnect - [ "${CONN_STATE}" == "CONNECTING" ] && return 0 - fi - # disconnect rpc should be wait not more obd_timeout - [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \ - error "can't put import for ${ost}(${ost_facet}) into ${expected} state" && return 1 - sleep 1 - CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2) - i=$(($i + 1)) - done + local param="osc.${ost}.ost_server_uuid" + + # 1. wait the deadline of client 1st request (it could be skipped) + # 2. wait the deadline of client 2nd request + local maxtime=$(( 2 * $(request_timeout $facet))) + + if ! do_rpc_nodes $(facet_host $facet) \ + _wait_import_state $expected $param $maxtime; then + error "import is not in ${expected} state" + return 1 + fi - log "${ost_facet} now in ${CONN_STATE} state" return 0 } + get_clientmdc_proc_path() { echo "${1}-mdc-*" } @@ -4066,6 +4247,7 @@ cleanup_pools () { gather_logs () { local list=$1 + local tar_logs=$2 local ts=$(date +%s) local docp=true @@ -4091,10 +4273,12 @@ gather_logs () { do_nodes $list rsync -az "${prefix}.*.${suffix}" $HOSTNAME:$LOGDIR fi - local archive=$LOGDIR/${TESTSUITE}-$ts.tar.bz2 - tar -jcf $archive $LOGDIR/*$ts* $LOGDIR/*${TESTSUITE}* + if [ $tar_logs == 1 ]; then + local archive=$LOGDIR/${TESTSUITE}-$ts.tar.bz2 + tar -jcf $archive $LOGDIR/*$ts* $LOGDIR/*${TESTSUITE}* - echo $archive + echo $archive + fi } cleanup_logs () { @@ -4137,7 +4321,7 @@ do_ls () { max_recovery_time () { local init_connect_timeout=$(( TIMEOUT / 20 )) - [[ $init_connect_timeout > 5 ]] || init_connect_timeout=5 + [[ $init_connect_timeout -ge 5 ]] || init_connect_timeout=5 local service_time=$(( $(at_max_get client) + $(( 2 * $(( 25 + 1 + init_connect_timeout)) )) )) @@ -4376,7 +4560,7 @@ wait_flavor() echo -n "checking $dir..." res=$(do_check_flavor $dir $flavor) echo "found $res/$expect $flavor connections" - [ $res -eq $expect ] && return 0 + [ $res -ge $expect ] && return 0 sleep 4 done @@ -4458,6 +4642,7 @@ check_write_access() { return 1 fi done + rm -f $dir/node.*.yml return 0 } @@ -4472,8 +4657,7 @@ init_logging() { mkdir -p $LOGDIR init_clients_lists - do_rpc_nodes $(comma_list $(nodes_list)) check_logdir $LOGDIR - if check_write_access $LOGDIR; then + if check_shared_dir $LOGDIR; then touch $LOGDIR/shared echo "Logging to shared log directory: $LOGDIR" else @@ -4540,7 +4724,7 @@ remove_mdt_files() { local mdtdev=$2 shift 2 local files="$@" - local mntpt=${MOUNT%/*}/$facet + local mntpt=$(facet_mntpt $facet) echo "removing files from $mdtdev on $facet: $files" mount -t $FSTYPE $MDS_MOUNT_OPTS $mdtdev $mntpt || return $? @@ -4557,7 +4741,7 @@ duplicate_mdt_files() { local mdtdev=$2 shift 2 local files="$@" - local mntpt=${MOUNT%/*}/$facet + local mntpt=$(facet_mntpt $facet) echo "duplicating files on $mdtdev on $facet: $files" mkdir -p $mntpt || return $? @@ -4625,3 +4809,56 @@ is_sanity_benchmark() { min_ost_size () { $LCTL get_param -n osc.*.kbytesavail | sort -n | head -n1 } + +# Get the block size of the filesystem. +get_block_size() { + local facet=$1 + local device=$2 + local size + + size=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" | + awk '/^Block size:/ {print $3}') + echo $size +} + +# Check whether the "large_xattr" feature is enabled or not. +large_xattr_enabled() { + local mds_dev=$(mdsdevname ${SINGLEMDS//mds/}) + + do_facet $SINGLEMDS "$DUMPE2FS -h $mds_dev 2>&1 | grep -q large_xattr" + return ${PIPESTATUS[0]} +} + +# Get the maximum xattr size supported by the filesystem. +max_xattr_size() { + local size + + if large_xattr_enabled; then + # include/linux/limits.h: #define XATTR_SIZE_MAX 65536 + size=65536 + else + local mds_dev=$(mdsdevname ${SINGLEMDS//mds/}) + local block_size=$(get_block_size $SINGLEMDS $mds_dev) + + # maximum xattr size = size of block - size of header - + # size of 1 entry - 4 null bytes + size=$((block_size - 32 - 32 - 4)) + fi + + echo $size +} + +# Dump the value of the named xattr from a file. +get_xattr_value() { + local xattr_name=$1 + local file=$2 + + echo "$(getfattr -n $xattr_name --absolute-names --only-values $file)" +} + +# Generate a string with size of $size bytes. +generate_string() { + local size=${1:-1024} # in bytes + + echo "$(head -c $size < /dev/zero | tr '\0' y)" +}