unsigned long default_iunit_sz = 5000; /* 5000 inodes */
unsigned long default_itune_ratio = 50; /* 50 percentage */
-kmem_cache_t *qunit_cachep = NULL;
+cfs_mem_cache_t *qunit_cachep = NULL;
struct list_head qunit_hash[NR_DQHASH];
spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
ENTRY;
LASSERT(imp);
- if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)
+ if ((imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64) &&
+ !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT))
RETURN(0);
else
RETURN(1);
spin_unlock(&qunit_hash_lock);
if (qunit_cachep) {
-#ifdef HAVE_KMEM_CACHE_DESTROY_INT
int rc;
- rc = kmem_cache_destroy(qunit_cachep);
+ rc = cfs_mem_cache_destroy(qunit_cachep);
LASSERTF(rc == 0, "couldn't destory qunit_cache slab\n");
-#else
- kmem_cache_destroy(qunit_cachep);
-#endif
qunit_cachep = NULL;
}
EXIT;
ENTRY;
LASSERT(qunit_cachep == NULL);
- qunit_cachep = kmem_cache_create("ll_qunit_cache",
- sizeof(struct lustre_qunit),
- 0, 0, NULL, NULL);
+ qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
+ sizeof(struct lustre_qunit),
+ 0, 0);
if (!qunit_cachep)
RETURN(-ENOMEM);
return tmp;
}
+/* compute the remaining quota for certain gid or uid b=11693 */
+int compute_remquota(struct obd_device *obd,
+ struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
+{
+ struct super_block *sb = qctxt->lqc_sb;
+ __u64 usage, limit;
+ struct obd_quotactl *qctl;
+ int ret = QUOTA_RET_OK;
+ __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
+ ENTRY;
+
+ if (!sb_any_quota_enabled(sb))
+ RETURN(QUOTA_RET_NOQUOTA);
+
+ /* ignore root user */
+ if (qdata->qd_id == 0 && qdata_type == USRQUOTA)
+ RETURN(QUOTA_RET_NOLIMIT);
+
+ OBD_ALLOC_PTR(qctl);
+ if (qctl == NULL)
+ RETURN(-ENOMEM);
+
+ /* get fs quota usage & limit */
+ qctl->qc_cmd = Q_GETQUOTA;
+ qctl->qc_id = qdata->qd_id;
+ qctl->qc_type = qdata_type;
+ ret = fsfilt_quotactl(obd, sb, qctl);
+ if (ret) {
+ if (ret == -ESRCH) /* no limit */
+ ret = QUOTA_RET_NOLIMIT;
+ else
+ CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
+ ret);
+ GOTO(out, ret);
+ }
+
+ usage = qctl->qc_dqblk.dqb_curspace;
+ limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
+ if (!limit){ /* no limit */
+ ret = QUOTA_RET_NOLIMIT;
+ GOTO(out, ret);
+ }
+
+ if (limit >= usage)
+ qdata->qd_count = limit - usage;
+ else
+ qdata->qd_count = 0;
+ EXIT;
+out:
+ OBD_FREE_PTR(qctl);
+ return ret;
+}
+
/* caller must hold qunit_hash_lock */
static inline struct lustre_qunit *find_qunit(unsigned int hashent,
struct lustre_quota_ctxt *qctxt,
if (!sb_any_quota_enabled(sb))
RETURN(0);
- /* ignore root user */
- if (qdata->qd_id == 0 && qdata_type == USRQUOTA)
- RETURN(0);
-
OBD_ALLOC_PTR(qctl);
if (qctl == NULL)
RETURN(-ENOMEM);
struct lustre_qunit *qunit = NULL;
ENTRY;
- OBD_SLAB_ALLOC(qunit, qunit_cachep, SLAB_NOFS, sizeof(*qunit));
+ OBD_SLAB_ALLOC(qunit, qunit_cachep, CFS_ALLOC_IO, sizeof(*qunit));
if (qunit == NULL)
RETURN(NULL);
/* FIXME check if this mds is the master of specified id */
-static int
-is_master(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
+static int
+is_master(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
unsigned int id, int type)
{
return qctxt->lqc_handler ? 1 : 0;
}
-static int
+static int
schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
struct qunit_data *qdata, int opc, int wait);
static int split_before_schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
struct qunit_data *qdata, int opc, int wait)
{
- int rc = 0, ret;
+ int rc = 0;
+ unsigned long factor;
struct qunit_data tmp_qdata;
ENTRY;
- LASSERT(qdata);
- if (qctxt->lqc_import)
- while (should_translate_quota(qctxt->lqc_import) &&
- qdata->qd_count > MAX_QUOTA_COUNT32) {
+ LASSERT(qdata && qdata->qd_count);
+ QDATA_DEBUG(qdata, "%s quota split.\n",
+ (qdata->qd_flags & QUOTA_IS_BLOCK) ? "block" : "inode");
+ if (qdata->qd_flags & QUOTA_IS_BLOCK)
+ factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
+ qctxt->lqc_bunit_sz;
+ else
+ factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
+ qctxt->lqc_iunit_sz;
+ if (qctxt->lqc_import && should_translate_quota(qctxt->lqc_import) &&
+ qdata->qd_count > factor) {
tmp_qdata = *qdata;
- tmp_qdata.qd_count = MAX_QUOTA_COUNT32;
+ tmp_qdata.qd_count = factor;
qdata->qd_count -= tmp_qdata.qd_count;
- ret = schedule_dqacq(obd, qctxt, &tmp_qdata, opc, wait);
- if (!rc)
- rc = ret;
- }
-
- if (qdata->qd_count){
- ret = schedule_dqacq(obd, qctxt, qdata, opc, wait);
- if (!rc)
- rc = ret;
+ QDATA_DEBUG((&tmp_qdata), "be split.\n");
+ rc = schedule_dqacq(obd, qctxt, &tmp_qdata, opc, wait);
+ } else{
+ QDATA_DEBUG(qdata, "don't be split.\n");
+ rc = schedule_dqacq(obd, qctxt, qdata, opc, wait);
}
RETURN(rc);
LASSERT(qdata);
qunit_sz = is_blk ? qctxt->lqc_bunit_sz : qctxt->lqc_iunit_sz;
div_r = do_div(qd_tmp, qunit_sz);
- LASSERT(!div_r);
+ LASSERTF(!div_r, "qunit_sz: %lu, return qunit_sz: "LPU64"\n",
+ qunit_sz, qd_tmp);
/* update local operational quota file */
if (rc == 0) {
switch (opc) {
case QUOTA_DQACQ:
+ CDEBUG(D_QUOTA, "%s(acq):count: %d, hardlimt: "LPU64
+ ",type: %s.\n", obd->obd_name, count, *hardlimit,
+ qdata_type ? "grp": "usr");
INC_QLIMIT(*hardlimit, count);
break;
case QUOTA_DQREL:
- LASSERT(count < *hardlimit);
+ CDEBUG(D_QUOTA, "%s(rel):count: %d, hardlimt: "LPU64
+ ",type: %s.\n", obd->obd_name, count, *hardlimit,
+ qdata_type ? "grp": "usr");
+ LASSERTF(count < *hardlimit,
+ "count: %d, hardlimit: "LPU64".\n",
+ count, *hardlimit);
*hardlimit -= count;
break;
default:
qdata = lustre_quota_old_to_new(qdata_old);
}
if (qdata == NULL) {
- DEBUG_REQ(D_ERROR, req, "error unpacking qunit_data\n");
+ DEBUG_REQ(D_ERROR, req, "error unpacking qunit_data");
RETURN(-EPROTO);
}
struct qunit_data *reqdata;
struct dqacq_async_args *aa;
int size[2] = { sizeof(struct ptlrpc_body), sizeof(*reqdata) };
+ unsigned long factor;
int rc = 0;
ENTRY;
if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
RETURN(-ENOMEM);
-
+
spin_lock(&qunit_hash_lock);
qunit = dqacq_in_flight(qctxt, qdata);
if (qunit) {
- if (wait)
+ if (wait)
list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
spin_unlock(&qunit_hash_lock);
-
+
free_qunit(empty);
goto wait_completion;
- }
+ }
qunit = empty;
insert_qunit_nolock(qctxt, qunit);
if (wait)
RETURN(-ENOMEM);
}
+ if (qdata->qd_flags & QUOTA_IS_BLOCK)
+ factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
+ qctxt->lqc_bunit_sz;
+ else
+ factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
+ qctxt->lqc_iunit_sz;
+
LASSERT(!should_translate_quota(qctxt->lqc_import) ||
- qdata->qd_count <= MAX_QUOTA_COUNT32);
- if (should_translate_quota(qctxt->lqc_import) ||
- OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT))
+ qdata->qd_count <= factor);
+ if (should_translate_quota(qctxt->lqc_import))
{
struct qunit_data_old *reqdata_old, *tmp;
req->rq_interpret_reply = dqacq_interpret;
ptlrpcd_add_req(req);
- QDATA_DEBUG(qdata, "%s scheduled.\n",
+ QDATA_DEBUG(qdata, "%s scheduled.\n",
opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
wait_completion:
if (wait && qunit) {
RETURN(rc);
}
-int
+int
qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
unsigned short type, int isblk)
{
remove_qunit_nolock(qunit);
/* wake up all waiters */
- list_for_each_entry_safe(qw, tmp2, &qunit->lq_waiters,
+ list_for_each_entry_safe(qw, tmp2, &qunit->lq_waiters,
qw_entry) {
list_del_init(&qw->qw_entry);
qw->qw_rc = 0;
struct qslave_recov_thread_data *data = arg;
struct obd_device *obd = data->obd;
struct lustre_quota_ctxt *qctxt = data->qctxt;
- unsigned int type;
+ unsigned int type;
int rc = 0;
ENTRY;
rc = 0;
if (rc)
- CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
+ CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
"qslave recovery failed! (id:%d type:%d "
" rc:%d)\n", dqid->di_id, type, rc);
free:
RETURN(rc);
}
-void
+void
qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
{
struct qslave_recov_thread_data data;