Whamcloud - gitweb
LU-4156 wireshark: Fix build against wireshark 1.10.x
[fs/lustre-release.git] / lustre / quota / qsd_lock.c
index ae916c7..f6790a9 100644 (file)
  * GPL HEADER END
  */
 /*
- * Copyright (c) 2011, 2012, Intel, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
  * Use is subject to license terms.
  *
  * Author: Johann Lombardi <johann.lombardi@intel.com>
  * Author: Niu    Yawei    <yawei.niu@intel.com>
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
-
 #define DEBUG_SUBSYSTEM S_LQUOTA
 
 #include <lustre_dlm.h>
 
 #include "qsd_internal.h"
 
+typedef int (enqi_bl_cb_t)(struct ldlm_lock *lock,
+                           struct ldlm_lock_desc *desc, void *data,
+                           int flag);
+static enqi_bl_cb_t qsd_glb_blocking_ast, qsd_id_blocking_ast;
+
+typedef int (enqi_gl_cb_t)(struct ldlm_lock *lock, void *data);
+static enqi_gl_cb_t qsd_glb_glimpse_ast, qsd_id_glimpse_ast;
+
+struct ldlm_enqueue_info qsd_glb_einfo = {
+       .ei_type        = LDLM_PLAIN,
+       .ei_mode        = LCK_CR,
+       .ei_cb_bl       = qsd_glb_blocking_ast,
+       .ei_cb_cp       = ldlm_completion_ast,
+       .ei_cb_gl       = qsd_glb_glimpse_ast,
+};
+
+struct ldlm_enqueue_info qsd_id_einfo = {
+       .ei_type        = LDLM_PLAIN,
+       .ei_mode        = LCK_CR,
+       .ei_cb_bl       = qsd_id_blocking_ast,
+       .ei_cb_cp       = ldlm_completion_ast,
+       .ei_cb_gl       = qsd_id_glimpse_ast,
+};
+
 /*
  * Return qsd_qtype_info structure associated with a global lock
  *
@@ -54,16 +74,19 @@ static struct qsd_qtype_info *qsd_glb_ast_data_get(struct ldlm_lock *lock,
        qqi = lock->l_ast_data;
        if (qqi != NULL) {
                qqi_getref(qqi);
-               lu_ref_add(&qqi->qqi_reference, "ast_data_get", lock);
                if (reset)
                        lock->l_ast_data = NULL;
        }
        unlock_res_and_lock(lock);
 
+       if (qqi != NULL)
+               /* it is not safe to call lu_ref_add() under spinlock */
+               lu_ref_add(&qqi->qqi_reference, "ast_data_get", lock);
+
        if (reset && qqi != NULL) {
                /* release qqi reference hold for the lock */
-               qqi_putref(qqi);
                lu_ref_del(&qqi->qqi_reference, "glb_lock", lock);
+               qqi_putref(qqi);
        }
        RETURN(qqi);
 }
@@ -126,6 +149,8 @@ static int qsd_common_glimpse_ast(struct ptlrpc_request *req,
                RETURN(-EFAULT);
 
        /* prepare reply */
+       req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+                            sizeof(struct lquota_lvb));
        rc = req_capsule_server_pack(&req->rq_pill);
        if (rc != 0) {
                CERROR("Can't pack response, rc %d\n", rc);
@@ -161,7 +186,7 @@ static int qsd_glb_blocking_ast(struct ldlm_lock *lock,
 
                LDLM_DEBUG(lock, "blocking AST on global quota lock");
                ldlm_lock2handle(lock, &lockh);
-               rc = ldlm_cli_cancel(&lockh);
+               rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
                break;
        }
        case LDLM_CB_CANCELING: {
@@ -175,12 +200,12 @@ static int qsd_glb_blocking_ast(struct ldlm_lock *lock,
 
                /* we are losing the global index lock, so let's mark the
                 * global & slave indexes as not up-to-date any more */
-               cfs_write_lock(&qqi->qqi_qsd->qsd_lock);
+               write_lock(&qqi->qqi_qsd->qsd_lock);
                qqi->qqi_glb_uptodate = false;
                qqi->qqi_slv_uptodate = false;
                if (lock->l_handle.h_cookie == qqi->qqi_lockh.cookie)
                        memset(&qqi->qqi_lockh, 0, sizeof(qqi->qqi_lockh));
-               cfs_write_unlock(&qqi->qqi_qsd->qsd_lock);
+               write_unlock(&qqi->qqi_qsd->qsd_lock);
 
                CDEBUG(D_QUOTA, "%s: losing global index lock for %s type\n",
                       qqi->qqi_qsd->qsd_svname, QTYPE_NAME((qqi->qqi_qtype)));
@@ -188,7 +213,7 @@ static int qsd_glb_blocking_ast(struct ldlm_lock *lock,
                /* kick off reintegration thread if not running already, if
                 * it's just local cancel (for stack clean up or eviction),
                 * don't re-trigger the reintegration. */
-               if ((lock->l_flags & LDLM_FL_LOCAL_ONLY) == 0)
+               if (!ldlm_is_local_only(lock))
                        qsd_start_reint_thread(qqi);
 
                lu_ref_del(&qqi->qqi_reference, "ast_data_get", lock);
@@ -241,7 +266,7 @@ static int qsd_glb_glimpse_ast(struct ldlm_lock *lock, void *data)
        /* extract new hard & soft limits from the glimpse descriptor */
        rec.qbr_hardlimit = desc->gl_hardlimit;
        rec.qbr_softlimit = desc->gl_softlimit;
-       rec.qbr_time      = 0;
+       rec.qbr_time      = desc->gl_time;
        rec.qbr_granted   = 0;
 
        /* We can't afford disk io in the context of glimpse callback handling
@@ -257,13 +282,7 @@ out:
        return rc;
 }
 
-struct ldlm_enqueue_info qsd_glb_einfo = { LDLM_PLAIN,
-                                          LCK_CR,
-                                          qsd_glb_blocking_ast,
-                                          ldlm_completion_ast,
-                                          qsd_glb_glimpse_ast,
-                                          NULL, NULL };
-/*
+/**
  * Blocking callback handler for per-ID lock
  *
  * \param lock - is the lock for which ast occurred.
@@ -285,7 +304,7 @@ static int qsd_id_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *de
 
                LDLM_DEBUG(lock, "blocking AST on ID quota lock");
                ldlm_lock2handle(lock, &lockh);
-               rc = ldlm_cli_cancel(&lockh);
+               rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
                break;
        }
        case LDLM_CB_CANCELING: {
@@ -302,7 +321,7 @@ static int qsd_id_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *de
 
                /* just local cancel (for stack clean up or eviction), don't
                 * release quota space in this case */
-               if ((lock->l_flags & LDLM_FL_LOCAL_ONLY) != 0) {
+               if (ldlm_is_local_only(lock)) {
                        lqe_putref(lqe);
                        break;
                }
@@ -339,7 +358,7 @@ static int qsd_id_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *de
                 * which means there could be a short window that slave is
                 * holding spare grant wihtout per-ID lock. */
                if (rel)
-                       rc = qsd_dqacq(env, lqe, QSD_REL);
+                       rc = qsd_adjust(env, lqe);
 
                /* release lqe reference grabbed by qsd_id_ast_data_get() */
                lqe_putref(lqe);
@@ -428,21 +447,14 @@ static int qsd_id_glimpse_ast(struct ldlm_lock *lock, void *data)
        lqe_write_unlock(lqe);
 
        if (wakeup)
-               cfs_waitq_broadcast(&lqe->lqe_waiters);
+               wake_up_all(&lqe->lqe_waiters);
        lqe_putref(lqe);
 out:
        req->rq_status = rc;
        RETURN(rc);
 }
 
-struct ldlm_enqueue_info qsd_id_einfo = { LDLM_PLAIN,
-                                         LCK_CR,
-                                         qsd_id_blocking_ast,
-                                         ldlm_completion_ast,
-                                         qsd_id_glimpse_ast,
-                                         NULL, NULL };
-
-/*
+/**
  * Check whether a slave already own a ldlm lock for the quota identifier \qid.
  *
  * \param lockh  - is the local lock handle from lquota entry.
@@ -470,6 +482,7 @@ int qsd_id_lock_match(struct lustre_handle *lockh, struct lustre_handle *rlockh)
        ldlm_lock_dump_handle(D_QUOTA, lockh);
 
        if (rlockh == NULL)
+               /* caller not interested in remote handle */
                RETURN(0);
 
        /* look up lock associated with local handle and extract remote handle