#ifdef HAVE_QUOTA_SUPPORT
static cfs_time_t last_print = 0;
-static spinlock_t last_print_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t last_print_lock = CFS_SPIN_LOCK_UNLOCKED;
static int filter_quota_setup(struct obd_device *obd)
{
struct obd_device_target *obt = &obd->u.obt;
ENTRY;
- init_rwsem(&obt->obt_rwsem);
+ cfs_init_rwsem(&obt->obt_rwsem);
obt->obt_qfmt = LUSTRE_QUOTA_V2;
- atomic_set(&obt->obt_quotachecking, 1);
+ cfs_sema_init(&obt->obt_quotachecking, 1);
rc = qctxt_init(obd, NULL);
if (rc)
CERROR("initialize quota context failed! (rc:%d)\n", rc);
LASSERT(imp != NULL);
/* setup the quota context import */
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (qctxt->lqc_import != NULL) {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
if (qctxt->lqc_import == imp)
CDEBUG(D_WARNING, "%s: lqc_import(%p) of obd(%p) was "
"activated already.\n", obd->obd_name, imp, obd);
else
- CDEBUG(D_ERROR, "%s: lqc_import(%p:%p) of obd(%p) was "
+ CERROR("%s: lqc_import(%p:%p) of obd(%p) was "
"activated by others.\n", obd->obd_name,
qctxt->lqc_import, imp, obd);
} else {
imp->imp_connect_data.ocd_connect_flags |=
(exp->exp_connect_flags &
(OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated "
"now.\n", obd->obd_name, imp, obd);
/* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
* should be invalid b=12374 */
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (qctxt->lqc_import == imp) {
qctxt->lqc_import = NULL;
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is invalid now.\n",
obd->obd_name, imp, obd);
ptlrpc_cleanup_imp(imp);
dqacq_interrupt(qctxt);
} else {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
}
RETURN(0);
}
{
ENTRY;
- if (!sb_any_quota_enabled(obd->u.obt.obt_sb))
+ if (!ll_sb_any_quota_active(obd->u.obt.obt_sb))
RETURN(0);
if (ignore) {
struct obd_quotactl *oqctl;
ENTRY;
- if (!sb_any_quota_enabled(obt->obt_sb))
+ if (!ll_sb_any_quota_active(obt->obt_sb))
RETURN(0);
OBD_ALLOC_PTR(oqctl);
- if (!oqctl) {
- CERROR("Not enough memory!");
+ if (!oqctl)
RETURN(-ENOMEM);
- }
/* set over quota flags for a uid/gid */
oa->o_valid |= OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA;
qctxt, 0);
if (lqs == NULL || IS_ERR(lqs)) {
rc = PTR_ERR(lqs);
+ if (rc)
+ CDEBUG(D_QUOTA, "search lqs for %s %d failed, "
+ "(rc = %d)\n",
+ cnt == USRQUOTA ? "user" : "group",
+ cnt == USRQUOTA ? oa->o_uid : oa->o_gid,
+ rc);
break;
} else {
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) {
oa->o_flags |= (cnt == USRQUOTA) ?
OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), "
"sync_blk(%d)\n", lqs->lqs_bunit_sz,
qctxt->lqc_sync_blk);
lqs_putref(lqs);
continue;
}
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
/* this is for quota_search_lqs */
lqs_putref(lqs);
}
rc = err;
oa->o_valid &= ~((cnt == USRQUOTA) ? OBD_MD_FLUSRQUOTA :
OBD_MD_FLGRPQUOTA);
+ CDEBUG(D_QUOTA, "fsfilt getquota for %s %d failed, "
+ "(rc = %d)\n",
+ cnt == USRQUOTA ? "user" : "group",
+ cnt == USRQUOTA ? oa->o_uid : oa->o_gid, err);
continue;
}
if (oqctl->qc_dqblk.dqb_bhardlimit &&
(toqb(oqctl->qc_dqblk.dqb_curspace) >=
- oqctl->qc_dqblk.dqb_bhardlimit))
+ oqctl->qc_dqblk.dqb_bhardlimit)) {
oa->o_flags |= (cnt == USRQUOTA) ?
OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
+ CDEBUG(D_QUOTA, "out of quota for %s %d\n",
+ cnt == USRQUOTA ? "user" : "group",
+ cnt == USRQUOTA ? oa->o_uid : oa->o_gid);
+ }
}
OBD_FREE_PTR(oqctl);
RETURN(rc);
int rc = 0, rc2[2] = { 0, 0 };
ENTRY;
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (!qctxt->lqc_valid){
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
RETURN(rc);
}
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
for (i = 0; i < MAXQUOTAS; i++) {
struct lustre_qunit_size *lqs = NULL;
}
rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
if (!cycle) {
if (isblk) {
pending[i] = count * CFS_PAGE_SIZE;
if (inode) {
mb = pending[i];
rc = fsfilt_get_mblk(obd, qctxt->lqc_sb,
- &mb, inode,frags);
+ &mb, inode,
+ frags);
if (rc)
- CDEBUG(D_ERROR,
- "can't get extra "
- "meta blocks.\n");
+ CERROR("%s: can't get extra "
+ "meta blocks\n",
+ obd->obd_name);
else
pending[i] += mb;
}
rc2[i] = QUOTA_RET_ACQUOTA;
}
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
if (lqs->lqs_blk_rec < 0 &&
qdata[i].qd_count <
struct lustre_qunit_size *lqs;
int i, q_set = 0;
- if (!sb_any_quota_enabled(obd->u.obt.obt_qctxt.lqc_sb))
+ if (!ll_sb_any_quota_active(obd->u.obt.obt_qctxt.lqc_sb))
RETURN(0);
for (i = 0; i < MAXQUOTAS; i++) {
return q_set;
}
-static int quota_chk_acq_common(struct obd_device *obd, const unsigned int id[],
- int pending[], int count, quota_acquire acquire,
+static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp,
+ const unsigned int id[], int pending[],
+ int count, quota_acquire acquire,
struct obd_trans_info *oti, int isblk,
struct inode *inode, int frags)
{
if (!quota_is_set(obd, id, isblk ? QB_SET : QI_SET))
RETURN(0);
+ if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
+ /* If the client has been evicted or if it
+ * timed out and tried to reconnect already,
+ * abort the request immediately */
+ RETURN(-ENOTCONN);
+
CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name);
pending[USRQUOTA] = pending[GRPQUOTA] = 0;
/* Unfortunately, if quota master is too busy to handle the
* pre-dqacq in time and quota hash on ost is used up, we
* have to wait for the completion of in flight dqacq/dqrel,
* in order to get enough quota for write b=12588 */
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
while ((rc = quota_check_common(obd, id, pending, count, cycle, isblk,
inode, frags)) &
QUOTA_RET_ACQUOTA) {
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (!qctxt->lqc_import && oti) {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
LASSERT(oti && oti->oti_thread &&
oti->oti_thread->t_watchdog);
&lwi);
CDEBUG(D_QUOTA, "wake up when quota master is back\n");
lc_watchdog_touch(oti->oti_thread->t_watchdog,
- GET_TIMEOUT(oti->oti_thread->t_svc));
+ CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
} else {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
}
cycle++;
break;
}
+ /* Related quota has been disabled by master, but enabled by
+ * slave, do not try again. */
+ if (unlikely(rc == -ESRCH)) {
+ CERROR("mismatched quota configuration, stop try.\n");
+ break;
+ }
+
+ if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
+ /* The client has been evicted or tried to
+ * to reconnect already, abort the request */
+ RETURN(-ENOTCONN);
+
/* -EBUSY and others, wait a second and try again */
if (rc < 0) {
cfs_waitq_t waitq;
if (oti && oti->oti_thread && oti->oti_thread->t_watchdog)
lc_watchdog_touch(oti->oti_thread->t_watchdog,
- GET_TIMEOUT(oti->oti_thread->t_svc));
+ CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc,
count_err++);
- init_waitqueue_head(&waitq);
+ cfs_waitq_init(&waitq);
lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL,
NULL);
l_wait_event(waitq, 0, &lwi);
}
- if (rc < 0 || cycle % 10 == 2) {
- spin_lock(&last_print_lock);
+ if (rc < 0 || cycle % 10 == 0) {
+ cfs_spin_lock(&last_print_lock);
if (last_print == 0 ||
cfs_time_before((last_print + cfs_time_seconds(30)),
cfs_time_current())) {
last_print = cfs_time_current();
- spin_unlock(&last_print_lock);
+ cfs_spin_unlock(&last_print_lock);
CWARN("still haven't managed to acquire quota "
"space from the quota master after %d "
"retries (err=%d, rc=%d)\n",
cycle, count_err - 1, rc);
} else {
- spin_unlock(&last_print_lock);
+ cfs_spin_unlock(&last_print_lock);
}
}
CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
cycle);
}
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats,
isblk ? LQUOTA_WAIT_FOR_CHK_BLK :
LQUOTA_WAIT_FOR_CHK_INO,
timediff);
+ if (rc > 0)
+ rc = 0;
RETURN(rc);
}
CDEBUG(D_QUOTA, "commit pending quota for %s\n", obd->obd_name);
CLASSERT(MAXQUOTAS < 4);
- if (!sb_any_quota_enabled(qctxt->lqc_sb))
+ if (!ll_sb_any_quota_active(qctxt->lqc_sb))
RETURN(0);
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
for (i = 0; i < MAXQUOTAS; i++) {
struct lustre_qunit_size *lqs = NULL;
continue;
}
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
if (isblk) {
LASSERTF(lqs->lqs_bwrite_pending >= pending[i],
"there are too many blocks! [id %u] [%c] "
obd->obd_name,
isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
i, pending[i], isblk);
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
/* for quota_search_lqs in pending_commit */
lqs_putref(lqs);
/* for quota_search_lqs in quota_check */
lqs_putref(lqs);
}
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats,
isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK :
RETURN(0);
}
- init_rwsem(&obt->obt_rwsem);
+ cfs_init_rwsem(&obt->obt_rwsem);
obt->obt_qfmt = LUSTRE_QUOTA_V2;
mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
- atomic_set(&obt->obt_quotachecking, 1);
+ cfs_sema_init(&obt->obt_quotachecking, 1);
/* initialize quota master and quota context */
- sema_init(&mds->mds_qonoff_sem, 1);
+ cfs_init_rwsem(&mds->mds_qonoff_sem);
rc = qctxt_init(obd, dqacq_handler);
if (rc) {
- CERROR("initialize quota context failed! (rc:%d)\n", rc);
+ CERROR("%s: initialize quota context failed! (rc:%d)\n",
+ obd->obd_name, rc);
RETURN(rc);
}
mds->mds_quota = 1;
memset(&oqctl, 0, sizeof(oqctl));
oqctl.qc_type = UGQUOTA;
- down(&mds->mds_qonoff_sem);
+ cfs_down_write(&mds->mds_qonoff_sem);
mds_admin_quota_off(obd, &oqctl);
- up(&mds->mds_qonoff_sem);
+ cfs_up_write(&mds->mds_qonoff_sem);
RETURN(0);
}
#endif /* __KERNEL__ */
struct osc_quota_info {
- struct list_head oqi_hash; /* hash list */
+ cfs_list_t oqi_hash; /* hash list */
struct client_obd *oqi_cli; /* osc obd */
unsigned int oqi_id; /* uid/gid of a file */
short oqi_type; /* quota type */
};
-spinlock_t qinfo_list_lock = SPIN_LOCK_UNLOCKED;
+cfs_spinlock_t qinfo_list_lock = CFS_SPIN_LOCK_UNLOCKED;
-static struct list_head qinfo_hash[NR_DQHASH];
+static cfs_list_t qinfo_hash[NR_DQHASH];
/* SLAB cache for client quota context */
cfs_mem_cache_t *qinfo_cachep = NULL;
/* caller must hold qinfo_list_lock */
static inline void insert_qinfo_hash(struct osc_quota_info *oqi)
{
- struct list_head *head = qinfo_hash +
+ cfs_list_t *head = qinfo_hash +
hashfn(oqi->oqi_cli, oqi->oqi_id, oqi->oqi_type);
LASSERT_SPIN_LOCKED(&qinfo_list_lock);
- list_add(&oqi->oqi_hash, head);
+ cfs_list_add(&oqi->oqi_hash, head);
}
/* caller must hold qinfo_list_lock */
static inline void remove_qinfo_hash(struct osc_quota_info *oqi)
{
LASSERT_SPIN_LOCKED(&qinfo_list_lock);
- list_del_init(&oqi->oqi_hash);
+ cfs_list_del_init(&oqi->oqi_hash);
}
/* caller must hold qinfo_list_lock */
ENTRY;
LASSERT_SPIN_LOCKED(&qinfo_list_lock);
- list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
+ cfs_list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
if (oqi->oqi_cli == cli &&
oqi->oqi_id == id && oqi->oqi_type == type)
return oqi;
int cnt, rc = QUOTA_OK;
ENTRY;
- spin_lock(&qinfo_list_lock);
+ cfs_spin_lock(&qinfo_list_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
struct osc_quota_info *oqi = NULL;
break;
}
}
- spin_unlock(&qinfo_list_lock);
+ cfs_spin_unlock(&qinfo_list_lock);
+ if (rc == NO_QUOTA)
+ CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
+ cnt == USRQUOTA ? "user" : "group", id);
RETURN(rc);
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- struct osc_quota_info *oqi, *old;
+ struct osc_quota_info *oqi = NULL, *old;
if (!(valid & ((cnt == USRQUOTA) ?
OBD_MD_FLUSRQUOTA : OBD_MD_FLGRPQUOTA)))
noquota = (cnt == USRQUOTA) ?
(flags & OBD_FL_NO_USRQUOTA) : (flags & OBD_FL_NO_GRPQUOTA);
- oqi = alloc_qinfo(cli, id, cnt);
- if (oqi) {
- spin_lock(&qinfo_list_lock);
-
- old = find_qinfo(cli, id, cnt);
- if (old && !noquota)
- remove_qinfo_hash(old);
- else if (!old && noquota)
- insert_qinfo_hash(oqi);
-
- spin_unlock(&qinfo_list_lock);
+ if (noquota) {
+ oqi = alloc_qinfo(cli, id, cnt);
+ if (!oqi) {
+ rc = -ENOMEM;
+ CDEBUG(D_QUOTA, "setdq for %s %d failed, "
+ "(rc = %d)\n",
+ cnt == USRQUOTA ? "user" : "group",
+ id, rc);
+ break;
+ }
+ }
- if (old || !noquota)
+ cfs_spin_lock(&qinfo_list_lock);
+ old = find_qinfo(cli, id, cnt);
+ if (old && !noquota)
+ remove_qinfo_hash(old);
+ else if (!old && noquota)
+ insert_qinfo_hash(oqi);
+ cfs_spin_unlock(&qinfo_list_lock);
+
+ if (old && !noquota)
+ CDEBUG(D_QUOTA, "setdq to remove for %s %d\n",
+ cnt == USRQUOTA ? "user" : "group", id);
+ else if (!old && noquota)
+ CDEBUG(D_QUOTA, "setdq to insert for %s %d\n",
+ cnt == USRQUOTA ? "user" : "group", id);
+
+ if (old) {
+ if (noquota)
free_qinfo(oqi);
- if (old && !noquota)
+ else
free_qinfo(old);
- } else {
- CERROR("not enough mem!\n");
- rc = -ENOMEM;
- break;
}
}
int i;
ENTRY;
- spin_lock(&qinfo_list_lock);
+ cfs_spin_lock(&qinfo_list_lock);
for (i = 0; i < NR_DQHASH; i++) {
- list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
+ cfs_list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
if (oqi->oqi_cli != cli)
continue;
remove_qinfo_hash(oqi);
free_qinfo(oqi);
}
}
- spin_unlock(&qinfo_list_lock);
+ cfs_spin_unlock(&qinfo_list_lock);
RETURN(0);
}
int i, rc;
ENTRY;
- spin_lock(&qinfo_list_lock);
+ cfs_spin_lock(&qinfo_list_lock);
for (i = 0; i < NR_DQHASH; i++) {
- list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
+ cfs_list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
remove_qinfo_hash(oqi);
free_qinfo(oqi);
}
}
- spin_unlock(&qinfo_list_lock);
+ cfs_spin_unlock(&qinfo_list_lock);
rc = cfs_mem_cache_destroy(qinfo_cachep);
LASSERTF(rc == 0, "couldn't destory qinfo_cachep slab\n");