#include <linux/sort.h>
#include <lustre_fid.h>
#include <lustre_dlm.h>
+#include <lustre_nodemap.h>
#ifndef MAX_IQ_TIME
#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */
struct qmt_handlers {
/* Handle quotactl request from client. */
int (*qmth_quotactl)(const struct lu_env *env, struct lu_device *d,
- struct obd_quotactl *, char *buf, int len);
+ struct lu_nodemap *nodemap, struct obd_quotactl *,
+ char *buf);
/* Handle dqacq/dqrel request from slave. */
int (*qmth_dqacq)(const struct lu_env *env, struct lu_device *d,
* on slave
*/
int lquotactl_slv(const struct lu_env *env, struct dt_device *dt,
- struct obd_quotactl *obdq, char *buf, int len);
+ struct lu_nodemap *nm, struct obd_quotactl *obdq, char *buf);
+
+static inline int lquota_iter_change_qid(struct lu_nodemap *nodemap,
+ struct obd_quotactl *oqctl)
+{
+ unsigned long end, start;
+
+ if (!nodemap)
+ return 0;
+
+ start = nodemap_map_id(nodemap, oqctl->qc_type, NODEMAP_CLIENT_TO_FS,
+ oqctl->qc_iter_qid_start);
+ end = oqctl->qc_iter_qid_end;
+ if (!end) {
+ switch (oqctl->qc_type) {
+ case USRQUOTA:
+ end = nodemap->nm_offset_limit_uid - 1;
+ break;
+ case GRPQUOTA:
+ end = nodemap->nm_offset_limit_gid - 1;
+ break;
+ case PRJQUOTA:
+ end = nodemap->nm_offset_limit_projid - 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ end = nodemap_map_id(nodemap, oqctl->qc_type, NODEMAP_CLIENT_TO_FS,
+ end);
+
+ /* start and end sanity check */
+ if ((!start && !end) || start > end)
+ return -EINVAL;
+
+ if (oqctl->qc_iter_qid_start != start ||
+ oqctl->qc_iter_qid_end != end) {
+ CDEBUG(D_QUOTA, "set IDs according to nodemap start:%llu->%lu end:%llu->%lu\n",
+ oqctl->qc_iter_qid_start, start, oqctl->qc_iter_qid_end,
+ end);
+ oqctl->qc_iter_qid_start = start;
+ oqctl->qc_iter_qid_end = end;
+ }
+
+ return 0;
+}
/** @} quota */
#endif /* _LUSTRE_QUOTA_H */
if (oqctl->qc_cmd == Q_SETINFO || oqctl->qc_cmd == Q_SETQUOTA) {
if (unlikely(!barrier_entry(tsi->tsi_tgt->lut_bottom)))
- RETURN(-EINPROGRESS);
+ GOTO(out_nodemap, -EINPROGRESS);
}
switch (oqctl->qc_cmd) {
+ case LUSTRE_Q_ITERQUOTA:
+ rc = lquota_iter_change_qid(nodemap, oqctl);
+ if (rc)
+ GOTO(out_nodemap, rc);
+ fallthrough;
case Q_GETINFO:
case Q_SETINFO:
case Q_SETQUOTA:
case LUSTRE_Q_GETDEFAULT_POOL:
case LUSTRE_Q_DELETEQID:
case LUSTRE_Q_RESETQID:
- case LUSTRE_Q_ITERQUOTA:
/* forward quotactl request to QMT */
- rc = qmt_hdls.qmth_quotactl(tsi->tsi_env, qmt, oqctl, buffer,
- buffer == NULL ? 0 :
- LQUOTA_ITER_BUFLEN);
+ rc = qmt_hdls.qmth_quotactl(tsi->tsi_env, qmt, nodemap, oqctl,
+ buffer);
break;
+ case LUSTRE_Q_ITEROQUOTA:
+ rc = lquota_iter_change_qid(nodemap, oqctl);
+ if (rc)
+ GOTO(out_nodemap, rc);
+ fallthrough;
case Q_GETOINFO:
case Q_GETOQUOTA:
- case LUSTRE_Q_ITEROQUOTA:
/* slave quotactl */
rc = lquotactl_slv(tsi->tsi_env, tsi->tsi_tgt->lut_bottom,
- oqctl, buffer,
- buffer == NULL ? 0 : LQUOTA_ITER_BUFLEN);
+ nodemap, oqctl, buffer);
break;
default:
repoqc = req_capsule_server_get(tsi->tsi_pill, &RMF_OBD_QUOTACTL);
if (repoqc == NULL)
RETURN(err_serious(-ENOMEM));
- *repoqc = *oqctl;
if (oqctl->qc_cmd == LUSTRE_Q_ITEROQUOTA) {
buffer = req_capsule_server_get(tsi->tsi_pill,
if (IS_ERR(nodemap))
RETURN(PTR_ERR(nodemap));
- id = repoqc->qc_id;
+ id = oqctl->qc_id;
if (oqctl->qc_type == USRQUOTA)
id = nodemap_map_id(nodemap, NODEMAP_UID,
- NODEMAP_CLIENT_TO_FS,
- repoqc->qc_id);
+ NODEMAP_CLIENT_TO_FS, id);
else if (oqctl->qc_type == GRPQUOTA)
id = nodemap_map_id(nodemap, NODEMAP_GID,
- NODEMAP_CLIENT_TO_FS,
- repoqc->qc_id);
+ NODEMAP_CLIENT_TO_FS, id);
else if (oqctl->qc_type == PRJQUOTA)
id = nodemap_map_id(nodemap, NODEMAP_PROJID,
- NODEMAP_CLIENT_TO_FS,
- repoqc->qc_id);
+ NODEMAP_CLIENT_TO_FS, id);
+ if (oqctl->qc_cmd == LUSTRE_Q_ITEROQUOTA)
+ rc = lquota_iter_change_qid(nodemap, oqctl);
nodemap_putref(nodemap);
+ if (rc)
+ RETURN(rc);
- if (repoqc->qc_id != id)
- swap(repoqc->qc_id, id);
+ if (oqctl->qc_id != id)
+ swap(oqctl->qc_id, id);
- rc = lquotactl_slv(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, repoqc,
- buffer, buffer == NULL ? 0 : LQUOTA_ITER_BUFLEN);
+ rc = lquotactl_slv(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, nodemap,
+ oqctl, buffer);
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_QUOTACTL,
tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
- if (repoqc->qc_id != id)
- swap(repoqc->qc_id, id);
+ if (oqctl->qc_id != id)
+ swap(oqctl->qc_id, id);
+
+ QCTL_COPY_NO_PNAME(repoqc, oqctl);
RETURN(rc);
}
struct dt_object_format qti_dof;
struct lu_fid qti_fid;
char qti_buf[LQUOTA_NAME_MAX];
+ struct lu_nodemap *nodemap;
};
#define qti_glb_rec qti_rec.lqr_glb_rec
enum lquota_type *);
const struct dt_index_features *glb_idx_feature(struct lu_fid *);
int lquota_obj_iter(const struct lu_env *env, struct dt_device *dev,
- struct dt_object *obj, struct lquota_entry *lqe_def,
- struct obd_quotactl *oqctl, char *buffer, int size,
- bool is_glb, bool is_md);
+ struct dt_object *obj, struct lu_nodemap *nm,
+ struct lquota_entry *lqe_def, struct obd_quotactl *oqctl,
+ char *buffer, int size, bool is_glb, bool is_md);
/* lquota_entry.c */
/* site create/destroy */
* \param is_md - true to iterate LQUOTA_MD quota settings
*/
int lquota_obj_iter(const struct lu_env *env, struct dt_device *dev,
- struct dt_object *obj, struct lquota_entry *lqe_def,
- struct obd_quotactl *oqctl, char *buf, int size,
- bool is_glb, bool is_md)
+ struct dt_object *obj, struct lu_nodemap *nodemap,
+ struct lquota_entry *lqe_def, struct obd_quotactl *oqctl,
+ char *buf, int size, bool is_glb, bool is_md)
{
struct lquota_thread_info *qti = lquota_info(env);
const struct dt_it_ops *iops;
offset = oqctl->qc_iter_dt_offset;
while ((size - cur) > (sizeof(__u64) + rec_size)) {
+ __u32 orig_id, cli_id, fs_id;
+
if (!skip)
goto get_setting;
GOTO(out_fini, rc);
}
+ orig_id = *(__u64 *)key;
if (oqctl->qc_iter_qid_end != 0 &&
- (*((__u64 *)key) < oqctl->qc_iter_qid_start ||
- *((__u64 *)key) > oqctl->qc_iter_qid_end))
+ (orig_id < oqctl->qc_iter_qid_start ||
+ orig_id > oqctl->qc_iter_qid_end))
+ goto next;
+
+ /* This place could be optimised for a case when trusted=0.
+ * In a such case we should return only two records for ROOT and
+ * for the squashed id.
+ */
+ cli_id = nodemap_map_id(nodemap, oqctl->qc_type,
+ NODEMAP_FS_TO_CLIENT,
+ orig_id);
+ fs_id = nodemap_map_id(nodemap, oqctl->qc_type,
+ NODEMAP_CLIENT_TO_FS, cli_id);
+ /* If remapped fs id does not match original id, it means the id
+ * is squashed and does correspond to the squashed value.
+ */
+ if (fs_id != orig_id)
goto next;
+ *(__u64 *)key = cli_id;
memcpy(buf + cur, key, sizeof(__u64));
cur += sizeof(__u64);
* \param oqctl - is the quotactl request
*/
int lquotactl_slv(const struct lu_env *env, struct dt_device *dev,
- struct obd_quotactl *oqctl, char *buffer, int size)
+ struct lu_nodemap *nodemap, struct obd_quotactl *oqctl,
+ char *buffer)
{
struct lquota_thread_info *qti = lquota_info(env);
- __u64 key;
- struct dt_object *obj, *obj_aux = NULL;
- struct obd_dqblk *dqblk = &oqctl->qc_dqblk;
- int rc;
+ struct dt_object *obj, *obj_aux = NULL;
+ struct obd_dqblk *dqblk = &oqctl->qc_dqblk;
+ int size = buffer == NULL ? 0 : LQUOTA_ITER_BUFLEN;
+ __u64 key;
+ int rc;
ENTRY;
if (oqctl->qc_cmd != Q_GETOQUOTA &&
if (oqctl->qc_cmd == LUSTRE_Q_ITEROQUOTA) {
if (lu_device_is_md(dev->dd_lu_dev.ld_site->ls_top_dev))
- rc = lquota_obj_iter(env, dev, obj, NULL, oqctl, buffer,
- size, false, true);
+ rc = lquota_obj_iter(env, dev, obj, nodemap, NULL,
+ oqctl, buffer, size, false, true);
else
- rc = lquota_obj_iter(env, dev, obj, NULL, oqctl, buffer,
- size, false, false);
+ rc = lquota_obj_iter(env, dev, obj, nodemap, NULL,
+ oqctl, buffer, size, false, false);
GOTO(out, rc);
}
* \param oqctl - is the quotactl request
*/
static int qmt_quotactl(const struct lu_env *env, struct lu_device *ld,
- struct obd_quotactl *oqctl, char *buffer, int size)
+ struct lu_nodemap *nodemap, struct obd_quotactl *oqctl,
+ char *buffer)
{
struct qmt_thread_info *qti = qmt_info(env);
union lquota_id *id = &qti->qti_id;
char *poolname;
int qtype = oqctl->qc_type;
int rc = 0;
+ int size = buffer == NULL ? 0 : LQUOTA_ITER_BUFLEN;
bool is_default = false;
bool is_first_iter = false;
ENTRY;
glb_obj = pool->qpi_glb_obj[qtype];
rc = lquota_obj_iter(env, lu2dt_dev(ld), glb_obj,
+ nodemap,
pool->qpi_grace_lqe[qtype], oqctl,
buffer, size / 2, true, true);
glb_obj = pool->qpi_glb_obj[qtype];
rc = lquota_obj_iter(env, lu2dt_dev(ld), glb_obj,
+ nodemap,
pool->qpi_grace_lqe[qtype], oqctl,
buffer + size / 2, size / 2,
true, false);
}
run_test 93 "update projid while client write to OST"
+test_94()
+{
+ local off=100000
+ local lim=70000
+ local squash=100
+ local idcount=5
+ local nm="tenant"
+ local ip=$(host_nids_address $HOSTNAME $NETTYPE)
+ local nid=$(h2nettype $ip)
+ local lineno=0
+ local act
+
+ (( $MDS1_VERSION >= $(version_code 2.16.55.1) )) ||
+ skip "need MDS >= v2_16_55-1-g9cd60bd to respect nodemap offset"
+
+ for ((i = off; i < off + idcount; i++)); do
+ $LFS setquota -u $i -B$i $MOUNT ||
+ error "Set quota for usr id $i failed"
+ $LFS setquota -g $i -B$i $MOUNT ||
+ error "Set quota for grp id $i failed"
+ is_project_quota_supported && $LFS setquota -p $i -B$i \
+ $MOUNT || error "Set quota for prj id $i failed"
+ done
+ stack_trap "cleanup_lqes $off $((off + idcount))"
+ for ((i = 2 * off; i < 2 * off + idcount; i++)); do
+ $LFS setquota -u $i -B$i $MOUNT ||
+ error "Set quota for usr id $i failed"
+ $LFS setquota -g $i -B$i $MOUNT ||
+ error "Set quota for grp id $i failed"
+ is_project_quota_supported && $LFS setquota -p $i -B$i \
+ $MOUNT || error "Set quota for prj id $i failed"
+ done
+ stack_trap "cleanup_lqes $((2 * off)) $((2 * off + idcount))"
+
+ $LFS setquota -u $((off + squash)) -B$off $MOUNT ||
+ error "Set quota for usr id $i failed"
+ stack_trap "$LFS setquota -u $((off + squash)) -B0 $MOUNT"
+ $LFS setquota -g $((off + squash)) -B$off $MOUNT ||
+ error "Set quota for usr id $i failed"
+ stack_trap "$LFS setquota -g $((off + squash)) -B0 $MOUNT"
+ $LFS setquota -p $((off + squash)) -B$off $MOUNT ||
+ error "Set quota for usr id $i failed"
+ stack_trap "$LFS setquota -p $((off + squash)) -B0 $MOUNT"
+
+ act=$(do_facet mgs $LCTL get_param -n nodemap.active)
+ do_facet mgs $LCTL nodemap_activate 1
+ stack_trap "do_facet mgs $LCTL nodemap_activate $act; \
+ wait_nm_sync active"
+
+ do_facet mgs $LCTL nodemap_add $nm ||
+ error "unable to add $nm as nodemap"
+ stack_trap "do_facet mgs $LCTL nodemap_del $nm"
+
+ do_facet mgs $LCTL nodemap_add_range --name $nm --range $nid ||
+ error "Add range $nid to $nm failed"
+
+ do_facet mgs "$LCTL nodemap_modify --name $nm \
+ --property squash_uid --value $squash"
+ do_facet mgs "$LCTL nodemap_modify --name $nm \
+ --property squash_gid --value $squash"
+ do_facet mgs "$LCTL nodemap_modify --name $nm \
+ --property squash_projid --value $squash"
+ do_facet mgs $LCTL nodemap_add_offset --name $nm \
+ --offset $off --limit $lim ||
+ error "cannot set offset $off-$((lim + off - 1))"
+ do_facet mgs $LCTL nodemap_modify --name $nm \
+ --property admin --value 1
+ do_facet mgs $LCTL nodemap_modify --name $nm \
+ --property trusted --value 1
+ wait_nm_sync $nm trusted_nodemap
+
+ $LFS quota -u -a $MOUNT | head -n 10
+ while IFS= read -r line; do
+ (( lineno++ >= 2 )) || continue
+ read -r qid qval <<< "$line"
+ numid=$(id -u "$qid" 2>/dev/null || echo "$qid")
+ ((numid <= lim)) ||
+ error "Access to foreign quota uid range $qid"
+ # squash id is not sequential plus it might have QIDs set in
+ # prevous tests. So check limits only for the first idcount QIDs
+ # that have been set in a current test.
+ if ((lineno - 2 <= idcount)); then
+ local exp=$((off + lineno -3))
+ ((qval == exp)) ||
+ error "Quota uid $qid is $qval expect $exp"
+ fi
+ done < <($LFS quota -u -a --bhardlimit $MOUNT)
+
+ $LFS quota -g -a $MOUNT | head -n 10
+ lineno=0
+ while IFS= read -r line; do
+ (( lineno++ >= 2 )) || continue
+ read -r qid qval <<< "$line"
+ numid=$(getent group $qid | cut -d: -f3)
+ ((numid <= lim)) ||
+ error "Access to foreign quota gid range $qid"
+ if ((lineno - 2 <= idcount)); then
+ local exp=$((off + lineno -3))
+ ((qval == exp)) ||
+ error "Quota gid $qid is $qval expect $exp"
+ fi
+ done < <($LFS quota -g -a --bhardlimit $MOUNT)
+
+ is_project_quota_supported && {
+ $LFS quota -p -a $MOUNT | head -n 10
+ lineno=0
+ while IFS= read -r line; do
+ (( lineno++ >= 2 )) || continue
+ read -r qid qval <<< "$line"
+ ((qid <= lim)) ||
+ error "Access to foreign quota prjid range $qid"
+ if ((lineno - 2 <= idcount)); then
+ local exp=$((off + lineno -3))
+ ((qval == exp)) ||
+ error "Quota prj $qid lim $qval != $exp"
+ fi
+ done < <($LFS quota -p -a --bhardlimit $MOUNT)
+ }
+
+ do_facet mgs $LCTL nodemap_modify --name $nm \
+ --property trusted --value 0
+ wait_nm_sync $nm trusted_nodemap
+
+ # When trusted=0 it could return only ROOT and squash id records for
+ # user and group. And only squash id for project.
+ $LFS quota -u -a $MOUNT | head -n 10
+ lineno=0
+ while IFS= read -r line; do
+ (( lineno++ >= 2 )) || continue
+ read -r qid qval <<< "$line"
+ numid=$(id -u "$qid" 2>/dev/null || echo "$qid")
+ ((numid == 0 || numid == squash)) ||
+ error "Access to untrusted quota uid $qid"
+ ((qval == off)) ||
+ error "Quota uid $qid is now $qval expect $((off))"
+ ((lineno <= 4)) ||
+ error "Quota uid $qid val $qval not expected"
+ done < <($LFS quota -u -a --bhardlimit $MOUNT)
+
+ $LFS quota -g -a $MOUNT | head -n 10
+ lineno=0
+ while IFS= read -r line; do
+ (( lineno++ >= 2 )) || continue
+ read -r qid qval <<< "$line"
+ numid=$(getent group $qid | cut -d: -f3)
+ ((numid == 0 || numid == squash)) ||
+ error "Access to untrusted quota gid $qid"
+ ((qval == off)) ||
+ error "Quota gid $qid is now $qval expect $((off))"
+ ((lineno <= 4)) ||
+ error "Quota gid $qid val $qval not expected"
+ done < <($LFS quota -g -a --bhardlimit $MOUNT)
+
+ is_project_quota_supported && {
+ $LFS quota -p -a $MOUNT | head -n 10
+ lineno=0
+ while IFS= read -r line; do
+ (( lineno++ >= 2 )) || continue
+ read -r qid qval <<< "$line"
+ ((qid == squash)) ||
+ error "Quota prj $qid val $qval not expected"
+ ((qval == off)) ||
+ error "Quota prj $qid lim is $qval expect $off"
+ ((lineno <= 3)) ||
+ error "Quota prj $qid val $qval not expected"
+ done < <($LFS quota -p -a --bhardlimit $MOUNT)
+ }
+
+ return 0
+}
+run_test 94 "lfs quota all respects nodemap offset"
+
test_95() {
local cmd="do_facet mgs $LCTL get_param -n "
local squash=100