if (cmd == OBD_BRW_WRITE) {
struct lu_nodemap *nodemap;
+ __u32 mapped_uid, mapped_gid;
+
+ nodemap = nodemap_get_from_exp(exp);
+ mapped_uid = nodemap_map_id(nodemap, NODEMAP_UID,
+ NODEMAP_FS_TO_CLIENT,
+ oa->o_uid);
+ mapped_gid = nodemap_map_id(nodemap, NODEMAP_GID,
+ NODEMAP_FS_TO_CLIENT,
+ oa->o_gid);
+
+ if (!IS_ERR_OR_NULL(nodemap)) {
+ /* do not bypass quota enforcement if squashed uid */
+ if (unlikely(mapped_uid == nodemap->nm_squash_uid)) {
+ int idx;
+
+ for (idx = 0; idx < npages; idx++)
+ lnb[idx].lnb_flags &= ~OBD_BRW_NOQUOTA;
+ }
+ nodemap_putref(nodemap);
+ }
valid = OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLPROJID |
OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME;
/* Convert back to client IDs. LU-9671.
* nodemap_get_from_exp() may fail due to nodemap deactivated,
* server ID will be returned back to client in that case. */
- nodemap = nodemap_get_from_exp(exp);
- if (nodemap != NULL && !IS_ERR(nodemap)) {
- oa->o_uid = nodemap_map_id(nodemap, NODEMAP_UID,
- NODEMAP_FS_TO_CLIENT,
- oa->o_uid);
- oa->o_gid = nodemap_map_id(nodemap, NODEMAP_GID,
- NODEMAP_FS_TO_CLIENT,
- oa->o_gid);
- nodemap_putref(nodemap);
- }
+ oa->o_uid = mapped_uid;
+ oa->o_gid = mapped_gid;
} else if (cmd == OBD_BRW_READ) {
rc = ofd_commitrw_read(env, ofd, fid, objcount,
npages, lnb);
}
/* check if the file's owner/group is over quota */
- if (!(cmd & OBD_BRW_NOQUOTA)) {
+ if (!io->ci_noquota) {
struct cl_object *obj;
struct cl_attr *attr;
unsigned int qid[LL_MAXQUOTAS];
}
run_test 74 "check quota pools per user"
+function cleanup_quota_test_75()
+{
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property trusted --value 1
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property squash_uid --value 99
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property squash_gid --value 99
+
+ wait_nm_sync default admin_nodemap
+ wait_nm_sync default trusted_nodemap
+
+ do_facet mgs $LCTL nodemap_activate 0
+ wait_nm_sync active
+
+ resetquota -u $TSTUSR
+
+ cleanup_quota_test
+}
+
+test_75()
+{
+ local limit=10 # MB
+ local testfile="$DIR/$tdir/$tfile-0"
+
+ setup_quota_test || error "setup quota failed with $?"
+ stack_trap cleanup_quota_test_75 EXIT
+
+ # enable ost quota
+ set_ost_qtype $QTYPE || error "enable ost quota failed"
+
+ # test for user
+ log "User $TSTUSR quota block hardlimit:$limit MB"
+ $LFS setquota -u $TSTID -b 0 -B ${limit}M -i 0 -I 0 $DIR ||
+ error "set user quota failed"
+
+ # make sure the system is clean
+ local used=$(getquota -u $TSTID global curspace)
+ [ $used -ne 0 ] && error "Used space ($used) for user $TSTUSR not 0."
+
+ chmod 777 $DIR/$tdir || error "chmod 777 $DIR/$tdir failed"
+
+ do_facet mgs $LCTL nodemap_activate 1
+ wait_nm_sync active
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property admin --value 0
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property trusted --value 0
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property deny_unknown --value 0
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property squash_uid --value $TSTID
+ do_facet mgs $LCTL nodemap_modify --name default \
+ --property squash_gid --value $TSTID
+ cancel_lru_locks mdc
+ wait_nm_sync default admin_nodemap
+ wait_nm_sync default trusted_nodemap
+ wait_nm_sync default squash_uid
+
+ log "Write..."
+ $DD of=$testfile bs=1M count=$((limit/2)) ||
+ quota_error u $TSTID \
+ "root write failure, but expect success"
+
+ log "Write out of block quota ..."
+ # possibly a cache write, ignore failure
+ $DD of=$testfile bs=1M count=$((limit/2)) seek=$((limit/2)) || true
+ # flush cache, ensure noquota flag is set on client
+ cancel_lru_locks osc
+ sync; sync_all_data || true
+ # sync forced cache flush, but did not guarantee that slave
+ # got new edquot through glimpse, so wait to make sure
+ sleep 5
+ $DD of=$testfile bs=1M count=1 seek=$limit conv=fsync &&
+ quota_error u $TSTID \
+ "user write success, but expect EDQUOT"
+ rm -f $testfile
+ wait_delete_completed || error "wait_delete_completed failed"
+ sync_all_data || true
+ used=$(getquota -u $TSTUSR global curspace)
+ [ $used -eq 0 ] || quota_error u $TSTID \
+ "user quota not released after deletion"
+}
+run_test 75 "nodemap squashed root respects quota enforcement"
+
quota_fini()
{
do_nodes $(comma_list $(nodes_list)) \
fi
}
-wait_nm_sync() {
- local nodemap_name=$1
- local key=$2
- local value=$3
- local opt=$4
- local proc_param
- local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
- local max_retries=20
- local is_sync
- local out1=""
- local out2
- local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
- local i
-
- if [ "$nodemap_name" == "active" ]; then
- proc_param="active"
- elif [ -z "$key" ]; then
- proc_param=${nodemap_name}
- else
- proc_param="${nodemap_name}.${key}"
- fi
- if [ "$opt" == "inactive" ]; then
- # check nm sync even if nodemap is not activated
- is_active=1
- opt=""
- fi
- (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
-
- if [ -z "$value" ]; then
- out1=$(do_facet mgs $LCTL get_param $opt \
- nodemap.${proc_param} 2>/dev/null)
- echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
- else
- out1=$value;
- fi
-
- # wait up to 10 seconds for other servers to sync with mgs
- for i in $(seq 1 10); do
- for node in $(all_server_nodes); do
- local node_ip=$(host_nids_address $node $NETTYPE |
- cut -d' ' -f1)
-
- is_sync=true
- if [ -z "$value" ]; then
- [ $node_ip == $mgs_ip ] && continue
- fi
-
- out2=$(do_node $node_ip $LCTL get_param $opt \
- nodemap.$proc_param 2>/dev/null)
- echo "On $node ${node_ip}, ${proc_param} = $out2"
- [ "$out1" != "$out2" ] && is_sync=false && break
- done
- $is_sync && break
- sleep 1
- done
- if ! $is_sync; then
- echo MGS
- echo $out1
- echo OTHER - IP: $node_ip
- echo $out2
- error "mgs and $nodemap_name ${key} mismatch, $i attempts"
- fi
- echo "waited $((i - 1)) seconds for sync"
-}
-
# ensure that the squash defaults are the expected defaults
squash_id default 99 0
wait_nm_sync default squash_uid '' inactive
mkdir_on_mdt0() {
$LFS mkdir -i 0 -c 1 $*
}
+
+# Wait for nodemap synchronization
+wait_nm_sync() {
+ local nodemap_name=$1
+ local key=$2
+ local value=$3
+ local opt=$4
+ local proc_param
+ local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active)
+ local max_retries=20
+ local is_sync
+ local out1=""
+ local out2
+ local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1)
+ local i
+
+ if [ "$nodemap_name" == "active" ]; then
+ proc_param="active"
+ elif [ -z "$key" ]; then
+ proc_param=${nodemap_name}
+ else
+ proc_param="${nodemap_name}.${key}"
+ fi
+ if [ "$opt" == "inactive" ]; then
+ # check nm sync even if nodemap is not activated
+ is_active=1
+ opt=""
+ fi
+ (( is_active == 0 )) && [ "$proc_param" != "active" ] && return
+
+ if [ -z "$value" ]; then
+ out1=$(do_facet mgs $LCTL get_param $opt \
+ nodemap.${proc_param} 2>/dev/null)
+ echo "On MGS ${mgs_ip}, ${proc_param} = $out1"
+ else
+ out1=$value;
+ fi
+
+ # if servers run on the same node, it is impossible to tell if they get
+ # synced with the mgs, so just wait an arbitrary 10 seconds
+ if [ $(facet_active_host mgs) == $(facet_active_host mds) ] &&
+ [ $(facet_active_host mgs) == $(facet_active_host ost1) ]; then
+ echo "waiting 10 secs for sync"
+ sleep 10
+ return
+ fi
+
+ # wait up to 10 seconds for other servers to sync with mgs
+ for i in $(seq 1 10); do
+ for node in $(all_server_nodes); do
+ local node_ip=$(host_nids_address $node $NETTYPE |
+ cut -d' ' -f1)
+
+ is_sync=true
+ if [ -z "$value" ]; then
+ [ $node_ip == $mgs_ip ] && continue
+ fi
+
+ out2=$(do_node $node_ip $LCTL get_param $opt \
+ nodemap.$proc_param 2>/dev/null)
+ echo "On $node ${node_ip}, ${proc_param} = $out2"
+ [ "$out1" != "$out2" ] && is_sync=false && break
+ done
+ $is_sync && break
+ sleep 1
+ done
+ if ! $is_sync; then
+ echo MGS
+ echo $out1
+ echo OTHER - IP: $node_ip
+ echo $out2
+ error "mgs and $nodemap_name ${key} mismatch, $i attempts"
+ fi
+ echo "waited $((i - 1)) seconds for sync"
+}