* GPL HEADER END
*/
/*
- * Copyright (c) 2011, 2012, Intel, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
* Use is subject to license terms.
*
* Author: Johann Lombardi <johann.lombardi@intel.com>
* Author: Niu Yawei <yawei.niu@intel.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
-
#define DEBUG_SUBSYSTEM S_LQUOTA
#include <lustre_dlm.h>
#include "qsd_internal.h"
+typedef int (enqi_bl_cb_t)(struct ldlm_lock *lock,
+ struct ldlm_lock_desc *desc, void *data,
+ int flag);
+static enqi_bl_cb_t qsd_glb_blocking_ast, qsd_id_blocking_ast;
+
+typedef int (enqi_gl_cb_t)(struct ldlm_lock *lock, void *data);
+static enqi_gl_cb_t qsd_glb_glimpse_ast, qsd_id_glimpse_ast;
+
+struct ldlm_enqueue_info qsd_glb_einfo = {
+ .ei_type = LDLM_PLAIN,
+ .ei_mode = LCK_CR,
+ .ei_cb_bl = qsd_glb_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ .ei_cb_gl = qsd_glb_glimpse_ast,
+};
+
+struct ldlm_enqueue_info qsd_id_einfo = {
+ .ei_type = LDLM_PLAIN,
+ .ei_mode = LCK_CR,
+ .ei_cb_bl = qsd_id_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ .ei_cb_gl = qsd_id_glimpse_ast,
+};
+
/*
* Return qsd_qtype_info structure associated with a global lock
*
qqi = lock->l_ast_data;
if (qqi != NULL) {
qqi_getref(qqi);
- lu_ref_add(&qqi->qqi_reference, "ast_data_get", lock);
if (reset)
lock->l_ast_data = NULL;
}
unlock_res_and_lock(lock);
+ if (qqi != NULL)
+ /* it is not safe to call lu_ref_add() under spinlock */
+ lu_ref_add(&qqi->qqi_reference, "ast_data_get", lock);
+
if (reset && qqi != NULL) {
/* release qqi reference hold for the lock */
- qqi_putref(qqi);
lu_ref_del(&qqi->qqi_reference, "glb_lock", lock);
+ qqi_putref(qqi);
}
RETURN(qqi);
}
RETURN(-EFAULT);
/* prepare reply */
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ sizeof(struct lquota_lvb));
rc = req_capsule_server_pack(&req->rq_pill);
if (rc != 0) {
CERROR("Can't pack response, rc %d\n", rc);
LDLM_DEBUG(lock, "blocking AST on global quota lock");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
break;
}
case LDLM_CB_CANCELING: {
/* we are losing the global index lock, so let's mark the
* global & slave indexes as not up-to-date any more */
- cfs_write_lock(&qqi->qqi_qsd->qsd_lock);
+ write_lock(&qqi->qqi_qsd->qsd_lock);
qqi->qqi_glb_uptodate = false;
qqi->qqi_slv_uptodate = false;
if (lock->l_handle.h_cookie == qqi->qqi_lockh.cookie)
memset(&qqi->qqi_lockh, 0, sizeof(qqi->qqi_lockh));
- cfs_write_unlock(&qqi->qqi_qsd->qsd_lock);
+ write_unlock(&qqi->qqi_qsd->qsd_lock);
CDEBUG(D_QUOTA, "%s: losing global index lock for %s type\n",
qqi->qqi_qsd->qsd_svname, QTYPE_NAME((qqi->qqi_qtype)));
/* kick off reintegration thread if not running already, if
* it's just local cancel (for stack clean up or eviction),
* don't re-trigger the reintegration. */
- if ((lock->l_flags & LDLM_FL_LOCAL_ONLY) == 0)
+ if (!ldlm_is_local_only(lock))
qsd_start_reint_thread(qqi);
lu_ref_del(&qqi->qqi_reference, "ast_data_get", lock);
/* extract new hard & soft limits from the glimpse descriptor */
rec.qbr_hardlimit = desc->gl_hardlimit;
rec.qbr_softlimit = desc->gl_softlimit;
- rec.qbr_time = 0;
+ rec.qbr_time = desc->gl_time;
rec.qbr_granted = 0;
/* We can't afford disk io in the context of glimpse callback handling
return rc;
}
-struct ldlm_enqueue_info qsd_glb_einfo = { LDLM_PLAIN,
- LCK_CR,
- qsd_glb_blocking_ast,
- ldlm_completion_ast,
- qsd_glb_glimpse_ast,
- NULL, NULL };
-/*
+/**
* Blocking callback handler for per-ID lock
*
* \param lock - is the lock for which ast occurred.
LDLM_DEBUG(lock, "blocking AST on ID quota lock");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
break;
}
case LDLM_CB_CANCELING: {
/* just local cancel (for stack clean up or eviction), don't
* release quota space in this case */
- if ((lock->l_flags & LDLM_FL_LOCAL_ONLY) != 0) {
+ if (ldlm_is_local_only(lock)) {
lqe_putref(lqe);
break;
}
/* Clear lqe_lockh & reset qunit to 0 */
qsd_set_qunit(lqe, 0);
memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
- lqe->lqe_edquot = false;
+ qsd_set_edquot(lqe, false);
rel = true;
}
lqe_write_unlock(lqe);
* which means there could be a short window that slave is
* holding spare grant wihtout per-ID lock. */
if (rel)
- rc = qsd_dqacq(env, lqe, QSD_REL);
+ rc = qsd_adjust(env, lqe);
/* release lqe reference grabbed by qsd_id_ast_data_get() */
lqe_putref(lqe);
}
}
- lqe->lqe_edquot = !!(desc->gl_flags & LQUOTA_FL_EDQUOT);
+ qsd_set_edquot(lqe, !!(desc->gl_flags & LQUOTA_FL_EDQUOT));
lqe_write_unlock(lqe);
if (wakeup)
- cfs_waitq_broadcast(&lqe->lqe_waiters);
+ wake_up_all(&lqe->lqe_waiters);
lqe_putref(lqe);
out:
req->rq_status = rc;
RETURN(rc);
}
-struct ldlm_enqueue_info qsd_id_einfo = { LDLM_PLAIN,
- LCK_CR,
- qsd_id_blocking_ast,
- ldlm_completion_ast,
- qsd_id_glimpse_ast,
- NULL, NULL };
-
-/*
+/**
* Check whether a slave already own a ldlm lock for the quota identifier \qid.
*
* \param lockh - is the local lock handle from lquota entry.
ldlm_lock_dump_handle(D_QUOTA, lockh);
if (rlockh == NULL)
+ /* caller not interested in remote handle */
RETURN(0);
/* look up lock associated with local handle and extract remote handle
if (lustre_handle_is_used(&qti->qti_lockh)) {
memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
qsd_set_qunit(lqe, 0);
- lqe->lqe_edquot = false;
+ qsd_set_edquot(lqe, false);
}
lqe_write_unlock(lqe);