}
static int cmm_quota_check(const struct lu_env *env, struct md_device *m,
- struct obd_export *exp, __u32 type)
+ __u32 type)
{
struct cmm_device *cmm_dev = md2cmm_dev(m);
int rc;
rc = cmm_child_ops(cmm_dev)->mdo_quota.mqo_check(env,
cmm_dev->cmm_child,
- exp, type);
+ type);
RETURN(rc);
}
* can finish a block_write or inode_create rpc. It updates the pending
* record of block and inode, acquires quota if necessary
*/
- int (*quota_chkquota) (struct obd_device *, const unsigned int [],
- int [], int, quota_acquire,
- struct obd_trans_info *, int, struct inode *,
- int);
+ int (*quota_chkquota) (struct obd_device *, struct obd_export *,
+ const unsigned int [], int [],
+ int, quota_acquire, struct obd_trans_info *,
+ int, struct inode *, int);
/**
* For quota client, the actions after the pending write is committed
#ifdef __KERNEL__
static inline int lquota_chkquota(quota_interface_t *interface,
struct obd_device *obd,
+ struct obd_export *exp,
const unsigned int id[], int pending[],
int count, struct obd_trans_info *oti,
int isblk, void *data, int frags)
QUOTA_CHECK_OP(interface, chkquota);
QUOTA_CHECK_OP(interface, acquire);
- rc = QUOTA_OP(interface, chkquota)(obd, id, pending, count,
+ rc = QUOTA_OP(interface, chkquota)(obd, exp, id, pending, count,
QUOTA_OP(interface, acquire), oti,
isblk, (struct inode *)data, frags);
RETURN(rc);
struct lustre_capa *mc_capa[MD_CAPAINFO_MAX];
};
+struct md_quota {
+ struct obd_export *mq_exp;
+};
+
/**
* Implemented in mdd/mdd_handler.c.
*
*/
struct md_ucred *md_ucred(const struct lu_env *env);
struct md_capainfo *md_capainfo(const struct lu_env *env);
+struct md_quota *md_quota(const struct lu_env *env);
/** metadata attributes */
enum ma_valid {
int (*mqo_check)(const struct lu_env *env,
struct md_device *m,
- struct obd_export *exp,
__u32 type);
int (*mqo_on)(const struct lu_env *env,
}
EXPORT_SYMBOL(md_capainfo);
+/*
+ * context key constructor/destructor:
+ * mdd_quota_key_init, mdd_quota_key_fini
+ */
+LU_KEY_INIT_FINI(mdd_quota, struct md_quota);
+
+struct lu_context_key mdd_quota_key = {
+ .lct_tags = LCT_SESSION,
+ .lct_init = mdd_quota_key_init,
+ .lct_fini = mdd_quota_key_fini
+};
+
+struct md_quota *md_quota(const struct lu_env *env)
+{
+ LASSERT(env->le_ses != NULL);
+ return lu_context_key_get(env->le_ses, &mdd_quota_key);
+}
+EXPORT_SYMBOL(md_quota);
+
static int mdd_changelog_user_register(struct mdd_device *mdd, int *id)
{
struct llog_ctxt *ctxt;
}
/* type constructor/destructor: mdd_type_init, mdd_type_fini */
-LU_TYPE_INIT_FINI(mdd, &mdd_thread_key, &mdd_ucred_key, &mdd_capainfo_key);
+LU_TYPE_INIT_FINI(mdd, &mdd_thread_key, &mdd_ucred_key, &mdd_capainfo_key,
+ &mdd_quota_key);
const struct md_device_operations mdd_ops = {
.mdo_statfs = mdd_statfs,
struct thandle *handle;
#ifdef HAVE_QUOTA_SUPPORT
struct obd_device *obd = mdd->mdd_obd_dev;
+ struct obd_export *exp = md_quota(env)->mq_exp;
struct mds_obd *mds = &obd->u.mds;
unsigned int qids[MAXQUOTAS] = { 0, 0 };
int quota_opc = 0, rec_pending[MAXQUOTAS] = { 0, 0 };
quota_opc = FSFILT_OP_LINK;
mdd_quota_wrapper(la_tmp, qids);
/* get block quota for parent */
- lquota_chkquota(mds_quota_interface_ref, obd,
+ lquota_chkquota(mds_quota_interface_ref, obd, exp,
qids, rec_pending, 1, NULL,
LQUOTA_FLAGS_BLK, data, 1);
}
#ifdef HAVE_QUOTA_SUPPORT
struct md_ucred *uc = md_ucred(env);
struct obd_device *obd = mdd->mdd_obd_dev;
+ struct obd_export *exp = md_quota(env)->mq_exp;
struct mds_obd *mds = &obd->u.mds;
unsigned int qids[MAXQUOTAS] = { 0, 0 };
int quota_opc = 0, rec_pending[MAXQUOTAS] = { 0, 0 };
mdd_quota_wrapper(la_tmp, qids);
/* get block quota for parent */
lquota_chkquota(mds_quota_interface_ref, obd,
- qids, rec_pending, 1, NULL,
+ exp, qids, rec_pending, 1, NULL,
LQUOTA_FLAGS_BLK, data, 1);
}
} else {
struct thandle *handle;
#ifdef HAVE_QUOTA_SUPPORT
struct obd_device *obd = mdd->mdd_obd_dev;
+ struct obd_export *exp = md_quota(env)->mq_exp;
struct mds_obd *mds = &obd->u.mds;
unsigned int qcids[MAXQUOTAS] = { 0, 0 };
unsigned int qpids[MAXQUOTAS] = { 0, 0 };
quota_popc = FSFILT_OP_LINK;
mdd_quota_wrapper(la_tmp, qpids);
/* get block quota for target parent */
- lquota_chkquota(mds_quota_interface_ref, obd,
+ lquota_chkquota(mds_quota_interface_ref, obd, exp,
qpids, rec_pending, 1, NULL,
LQUOTA_FLAGS_BLK, data, 1);
}
int got_def_acl = 0;
#ifdef HAVE_QUOTA_SUPPORT
struct obd_device *obd = mdd->mdd_obd_dev;
+ struct obd_export *exp = md_quota(env)->mq_exp;
struct mds_obd *mds = &obd->u.mds;
unsigned int qcids[MAXQUOTAS] = { 0, 0 };
unsigned int qpids[MAXQUOTAS] = { 0, 0 };
mdd_quota_wrapper(&ma->ma_attr, qcids);
mdd_quota_wrapper(la_tmp, qpids);
/* get file quota for child */
- lquota_chkquota(mds_quota_interface_ref, obd, qcids,
- inode_pending, 1, NULL, 0, NULL, 0);
+ lquota_chkquota(mds_quota_interface_ref, obd, exp,
+ qcids, inode_pending, 1, NULL, 0, NULL,
+ 0);
switch (ma->ma_attr.la_mode & S_IFMT) {
case S_IFLNK:
case S_IFDIR:
/* get block quota for child and parent */
if (block_count)
lquota_chkquota(mds_quota_interface_ref, obd,
- qcids, block_pending,
+ exp, qcids, block_pending,
block_count, NULL,
LQUOTA_FLAGS_BLK, NULL, 0);
if (!same)
lquota_chkquota(mds_quota_interface_ref, obd,
- qpids, parent_pending, 1, NULL,
- LQUOTA_FLAGS_BLK, NULL, 0);
+ exp, qpids, parent_pending, 1,
+ NULL, LQUOTA_FLAGS_BLK, NULL,
+ 0);
}
}
#endif
#ifdef HAVE_QUOTA_SUPPORT
struct obd_device *obd = mdd->mdd_obd_dev;
+ struct obd_export *exp = md_quota(env)->mq_exp;
struct mds_obd *mds = &obd->u.mds;
unsigned int qspids[MAXQUOTAS] = { 0, 0 };
unsigned int qtcids[MAXQUOTAS] = { 0, 0 };
mdd_quota_wrapper(la_tmp, qtpids);
/* get block quota for target parent */
lquota_chkquota(mds_quota_interface_ref,
- obd, qtpids,
+ obd, exp, qtpids,
rec_pending, 1, NULL,
LQUOTA_FLAGS_BLK,
data, 1);
int mdd_quota_cleanup(const struct lu_env *env, struct md_device *m);
int mdd_quota_recovery(const struct lu_env *env, struct md_device *m);
int mdd_quota_check(const struct lu_env *env, struct md_device *m,
- struct obd_export *exp, __u32 type);
+ __u32 type);
int mdd_quota_on(const struct lu_env *env, struct md_device *m,
__u32 type);
int mdd_quota_off(const struct lu_env *env, struct md_device *m,
struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
#ifdef HAVE_QUOTA_SUPPORT
struct obd_device *obd = mdd->mdd_obd_dev;
+ struct obd_export *exp = md_quota(env)->mq_exp;
struct mds_obd *mds = &obd->u.mds;
unsigned int qnids[MAXQUOTAS] = { 0, 0 };
unsigned int qoids[MAXQUOTAS] = { 0, 0 };
mdd_quota_wrapper(la_copy, qnids);
mdd_quota_wrapper(la_tmp, qoids);
/* get file quota for new owner */
- lquota_chkquota(mds_quota_interface_ref, obd, qnids,
- inode_pending, 1, NULL, 0, NULL, 0);
+ lquota_chkquota(mds_quota_interface_ref, obd, exp,
+ qnids, inode_pending, 1, NULL, 0,
+ NULL, 0);
block_count = (la_tmp->la_blocks + 7) >> 3;
if (block_count) {
void *data = NULL;
mdd_data_get(env, mdd_obj, &data);
/* get block quota for new owner */
lquota_chkquota(mds_quota_interface_ref, obd,
- qnids, block_pending,
+ exp, qnids, block_pending,
block_count, NULL,
LQUOTA_FLAGS_BLK, data, 1);
}
struct thandle *handle;
#ifdef HAVE_QUOTA_SUPPORT
struct obd_device *obd = mdd->mdd_obd_dev;
+ struct obd_export *exp = md_quota(env)->mq_exp;
struct mds_obd *mds = &obd->u.mds;
unsigned int qids[MAXQUOTAS] = { 0, 0 };
int quota_opc = 0, block_count = 0;
quota_opc = FSFILT_OP_CREATE_PARTIAL_CHILD;
mdd_quota_wrapper(&ma->ma_attr, qids);
/* get file quota for child */
- lquota_chkquota(mds_quota_interface_ref, obd, qids,
- inode_pending, 1, NULL, 0, NULL, 0);
+ lquota_chkquota(mds_quota_interface_ref, obd, exp,
+ qids, inode_pending, 1, NULL, 0,
+ NULL, 0);
switch (ma->ma_attr.la_mode & S_IFMT) {
case S_IFLNK:
case S_IFDIR:
}
/* get block quota for child */
if (block_count)
- lquota_chkquota(mds_quota_interface_ref, obd, qids,
- block_pending, block_count, NULL,
- LQUOTA_FLAGS_BLK, NULL, 0);
+ lquota_chkquota(mds_quota_interface_ref, obd, exp,
+ qids, block_pending, block_count,
+ NULL, LQUOTA_FLAGS_BLK, NULL, 0);
}
#endif
}
int mdd_quota_check(const struct lu_env *env, struct md_device *m,
- struct obd_export *exp, __u32 type)
+ __u32 type)
{
struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
struct obd_device *obd = mdd->mdd_obd_dev;
+ struct obd_export *exp = md_quota(env)->mq_exp;
struct obd_quotactl *oqctl = &mdd_env_info(env)->mti_oqctl;
int rc;
ENTRY;
{
struct req_capsule *pill = info->mti_pill;
struct mdt_device *mdt = info->mti_mdt;
+ struct md_quota *mq = md_quota(info->mti_env);
struct mdt_body *repbody;
int rc = 0;
ENTRY;
rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
GOTO(out_ucred, rc);
}
+ mq->mq_exp = info->mti_exp;
rc = mdt_reint_rec(info, lhc);
EXIT;
out_ucred:
struct obd_quotactl *oqctl;
struct req_capsule *pill = info->mti_pill;
struct obd_export *exp = info->mti_exp;
+ struct md_quota *mq = md_quota(info->mti_env);
struct md_device *next = info->mti_mdt->mdt_child;
int rc;
ENTRY;
if (rc)
RETURN(rc);
- rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next, exp,
+ mq->mq_exp = exp;
+ rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next,
oqctl->qc_type);
RETURN(rc);
}
struct obd_quotactl *oqctl, *repoqc;
struct req_capsule *pill = info->mti_pill;
struct obd_export *exp = info->mti_exp;
+ struct md_quota *mq = md_quota(info->mti_env);
struct md_device *next = info->mti_mdt->mdt_child;
const struct md_quota_operations *mqo = &next->md_ops->mdo_quota;
int id, rc;
repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
LASSERT(repoqc != NULL);
+ mq->mq_exp = exp;
switch (oqctl->qc_cmd) {
case Q_QUOTAON:
rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type);
/* we try to get enough quota to write here, and let ldiskfs
* decide if it is out of quota or not b=14783 */
- lquota_chkquota(filter_quota_interface_ref, obd, qcids, rec_pending,
- quota_pages, oti, LQUOTA_FLAGS_BLK, (void *)inode,
- obj->ioo_bufcnt);
-
+ rc = lquota_chkquota(filter_quota_interface_ref, obd, exp, qcids,
+ rec_pending, quota_pages, oti, LQUOTA_FLAGS_BLK,
+ (void *)inode, obj->ioo_bufcnt);
+ if (rc == -ENOTCONN)
+ GOTO(cleanup, rc);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
cleanup_phase = 2;
/* Must commit after prep above in all cases */
rc = obd_commitrw(OBD_BRW_WRITE, exp, &repbody->oa, objcount, ioo,
remote_nb, npages, local_nb, oti, rc);
+ if (rc == -ENOTCONN)
+ /* quota acquire process has been given up because
+ * either the client has been evicted or the client
+ * has timed out the request already */
+ no_reply = 1;
+
if (exp_connect_rmtclient(exp)) {
repbody->oa.o_uid = o_uid;
repbody->oa.o_gid = o_gid;
return q_set;
}
-static int quota_chk_acq_common(struct obd_device *obd, const unsigned int id[],
- int pending[], int count, quota_acquire acquire,
+static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp,
+ const unsigned int id[], int pending[],
+ int count, quota_acquire acquire,
struct obd_trans_info *oti, int isblk,
struct inode *inode, int frags)
{
if (!quota_is_set(obd, id, isblk ? QB_SET : QI_SET))
RETURN(0);
+ if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
+ /* If the client has been evicted or if it
+ * timed out and tried to reconnect already,
+ * abort the request immediately */
+ RETURN(-ENOTCONN);
+
CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name);
pending[USRQUOTA] = pending[GRPQUOTA] = 0;
/* Unfortunately, if quota master is too busy to handle the
break;
}
+ if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
+ /* The client has been evicted or tried to
+ * to reconnect already, abort the request */
+ RETURN(-ENOTCONN);
+
/* -EBUSY and others, wait a second and try again */
if (rc < 0) {
cfs_waitq_t waitq;
LQUOTA_WAIT_FOR_CHK_INO,
timediff);
+ if (rc > 0)
+ rc = 0;
RETURN(rc);
}
$RUNAS dd if=/dev/zero of=$TESTFILE bs=$BLK_SZ count=$(($LIMIT/2)) || quota_error g $TSTUSR "(grp) write failure, but expect success"
etime=`date +%s`
delta=$((etime - stime))
- rate=$((BLK_SZ * LIMIT / 2 / delta / 1024))
- [ $rate -gt 1024 ] || error "SLOW IO for $TSTUSR (group): $rate KB/sec"
+ if [ $delta -gt 0 ]; then
+ rate=$((BLK_SZ * LIMIT / 2 / delta / 1024))
+ [ $rate -gt 1024 ] || error "SLOW IO for $TSTUSR (group): $rate KB/sec"
+ fi
log " Done"
log " Write out of block quota ..."
# this time maybe cache write, ignore it's failure