* GPL HEADER END
*/
/*
- * Copyright (c) 2011, 2012, Intel, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
* Use is subject to license terms.
*
- * Author: Johann Lombardi <johann@whamcloud.com>
- * Author: Niu Yawei <niu@whamcloud.com>
+ * Author: Johann Lombardi <johann.lombardi@intel.com>
+ * Author: Niu Yawei <yawei.niu@intel.com>
*/
#ifndef EXPORT_SYMTAB
qqi = lock->l_ast_data;
if (qqi != NULL) {
qqi_getref(qqi);
- lu_ref_add(&qqi->qqi_reference, "ast_data_get", lock);
if (reset)
lock->l_ast_data = NULL;
}
unlock_res_and_lock(lock);
+ if (qqi != NULL)
+ /* it is not safe to call lu_ref_add() under spinlock */
+ lu_ref_add(&qqi->qqi_reference, "ast_data_get", lock);
+
if (reset && qqi != NULL) {
/* release qqi reference hold for the lock */
- qqi_putref(qqi);
lu_ref_del(&qqi->qqi_reference, "glb_lock", lock);
+ qqi_putref(qqi);
}
RETURN(qqi);
}
RETURN(-EFAULT);
/* prepare reply */
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ sizeof(struct lquota_lvb));
rc = req_capsule_server_pack(&req->rq_pill);
if (rc != 0) {
CERROR("Can't pack response, rc %d\n", rc);
LDLM_DEBUG(lock, "blocking AST on global quota lock");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
break;
}
case LDLM_CB_CANCELING: {
/* we are losing the global index lock, so let's mark the
* global & slave indexes as not up-to-date any more */
- cfs_write_lock(&qqi->qqi_qsd->qsd_lock);
+ write_lock(&qqi->qqi_qsd->qsd_lock);
qqi->qqi_glb_uptodate = false;
qqi->qqi_slv_uptodate = false;
if (lock->l_handle.h_cookie == qqi->qqi_lockh.cookie)
memset(&qqi->qqi_lockh, 0, sizeof(qqi->qqi_lockh));
- cfs_write_unlock(&qqi->qqi_qsd->qsd_lock);
+ write_unlock(&qqi->qqi_qsd->qsd_lock);
CDEBUG(D_QUOTA, "%s: losing global index lock for %s type\n",
qqi->qqi_qsd->qsd_svname, QTYPE_NAME((qqi->qqi_qtype)));
/* valid race */
GOTO(out, rc = -ELDLM_NO_LOCK_DATA);
- LCONSOLE_INFO("%s: glimpse on glb quota locks, id:"LPU64" ver:"LPU64
- " hard:" LPU64" soft:"LPU64"\n", qqi->qqi_qsd->qsd_svname,
- desc->gl_id.qid_uid, desc->gl_ver, desc->gl_hardlimit,
- desc->gl_softlimit);
+ CDEBUG(D_QUOTA, "%s: glimpse on glb quota locks, id:"LPU64" ver:"LPU64
+ " hard:" LPU64" soft:"LPU64"\n", qqi->qqi_qsd->qsd_svname,
+ desc->gl_id.qid_uid, desc->gl_ver, desc->gl_hardlimit,
+ desc->gl_softlimit);
if (desc->gl_ver == 0) {
CERROR("%s: invalid global index version "LPU64"\n",
/* extract new hard & soft limits from the glimpse descriptor */
rec.qbr_hardlimit = desc->gl_hardlimit;
rec.qbr_softlimit = desc->gl_softlimit;
- rec.qbr_time = 0;
+ rec.qbr_time = desc->gl_time;
rec.qbr_granted = 0;
/* We can't afford disk io in the context of glimpse callback handling
LDLM_DEBUG(lock, "blocking AST on ID quota lock");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
break;
}
case LDLM_CB_CANCELING: {
* which means there could be a short window that slave is
* holding spare grant wihtout per-ID lock. */
if (rel)
- rc = qsd_dqacq(env, lqe, QSD_REL);
+ rc = qsd_adjust(env, lqe);
/* release lqe reference grabbed by qsd_id_ast_data_get() */
lqe_putref(lqe);
/* valid race */
GOTO(out, rc = -ELDLM_NO_LOCK_DATA);
- LQUOTA_CONSOLE(lqe, "glimpse on quota locks, new qunit:"LPU64,
- desc->gl_qunit);
+ LQUOTA_DEBUG(lqe, "glimpse on quota locks, new qunit:"LPU64,
+ desc->gl_qunit);
qsd = lqe2qqi(lqe)->qqi_qsd;
if (space > 0) {
if (lqe->lqe_pending_req > 0) {
- LQUOTA_ERROR(lqe, "request in flight, postpone "
+ LQUOTA_DEBUG(lqe, "request in flight, postpone "
"release of "LPD64, space);
lvb->lvb_id_may_rel = space;
} else {
lqe->lqe_pending_req++;
/* release quota space in glimpse reply */
- LQUOTA_ERROR(lqe, "releasing "LPD64, space);
+ LQUOTA_DEBUG(lqe, "releasing "LPD64, space);
lqe->lqe_granted -= space;
lvb->lvb_id_rel = space;
ldlm_lock_dump_handle(D_QUOTA, lockh);
if (rlockh == NULL)
+ /* caller not interested in remote handle */
RETURN(0);
/* look up lock associated with local handle and extract remote handle