Whamcloud - gitweb
LU-1347 style: removes obsolete EXPORT_SYMTAB macros v2
[fs/lustre-release.git] / lustre / quota / qsd_request.c
index c5e61e4..e4851e1 100644 (file)
  * GPL HEADER END
  */
 /*
- * Copyright (c) 2012 Intel, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
  * Use is subject to license terms.
  *
  * Author: Johann Lombardi <johann.lombardi@intel.com>
  * Author: Niu    Yawei    <yawei.niu@intel.com>
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
-
 #define DEBUG_SUBSYSTEM S_LQUOTA
 
 #include <lustre_net.h>
@@ -45,7 +41,7 @@ struct qsd_async_args {
        struct obd_export     *aa_exp;
        struct qsd_qtype_info *aa_qqi;
        void                  *aa_arg;
-       union ldlm_wire_lvb   *aa_lvb;
+       struct lquota_lvb     *aa_lvb;
        struct lustre_handle   aa_lockh;
        qsd_req_completion_t   aa_completion;
 };
@@ -173,7 +169,7 @@ static int qsd_intent_interpret(const struct lu_env *env,
 
        rc = ldlm_cli_enqueue_fini(aa->aa_exp, req, LDLM_PLAIN, 0, LCK_CR,
                                   &flags, (void *)aa->aa_lvb,
-                                  sizeof(union ldlm_wire_lvb), lockh, rc);
+                                  sizeof(struct lquota_lvb), lockh, rc);
        if (rc < 0)
                /* the lock has been destroyed, forget about the lock handle */
                memset(lockh, 0, sizeof(*lockh));
@@ -208,7 +204,7 @@ static int qsd_intent_interpret(const struct lu_env *env,
 int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
                    struct quota_body *qbody, bool sync, int it_op,
                    qsd_req_completion_t completion, struct qsd_qtype_info *qqi,
-                   union ldlm_wire_lvb *lvb, void *arg)
+                   struct lquota_lvb *lvb, void *arg)
 {
        struct qsd_thread_info  *qti = qsd_info(env);
        struct ptlrpc_request   *req;
@@ -242,6 +238,8 @@ int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
        req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
        *req_qbody = *qbody;
 
+       req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+                            sizeof(*lvb));
        ptlrpc_request_set_replen(req);
 
        switch(it_op) {
@@ -258,8 +256,8 @@ int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
                break;
        case IT_QUOTA_DQACQ:
                /* build resource name associated for per-ID quota lock */
-               fid_build_quota_resid(&qbody->qb_fid, &qbody->qb_id,
-                                     &qti->qti_resid);
+               fid_build_quota_res_name(&qbody->qb_fid, &qbody->qb_id,
+                                        &qti->qti_resid);
 
                /* copy einfo template and fill ei_cbdata with lqe pointer */
                memcpy(&qti->qti_einfo, &qsd_id_einfo, sizeof(qti->qti_einfo));
@@ -271,8 +269,8 @@ int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
 
        /* build lock enqueue request */
        rc = ldlm_cli_enqueue(exp, &req, &qti->qti_einfo, &qti->qti_resid, NULL,
-                             &flags, (void *)lvb, sizeof(*lvb), &qti->qti_lockh,
-                             1);
+                             &flags, (void *)lvb, sizeof(*lvb), LVB_T_LQUOTA,
+                             &qti->qti_lockh, 1);
        if (rc < 0) {
                ptlrpc_req_finished(req);
                GOTO(out, rc);
@@ -283,6 +281,7 @@ int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
        case IT_QUOTA_CONN:
                /* grab reference on qqi for new lock */
 #ifdef USE_LU_REF
+       {
                struct ldlm_lock        *lock;
 
                lock = ldlm_handle2lock(&qti->qti_lockh);
@@ -292,6 +291,7 @@ int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
                }
                lu_ref_add(&qqi->qqi_reference, "glb_lock", lock);
                LDLM_LOCK_PUT(lock);
+       }
 #endif
                qqi_getref(qqi);
                break;
@@ -348,7 +348,7 @@ out:
  */
 int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp,
                    struct idx_info *ii, unsigned int npages,
-                   cfs_page_t **pages, bool *need_swab)
+                   struct page **pages, bool *need_swab)
 {
        struct ptlrpc_request   *req;
        struct idx_info         *req_ii;
@@ -372,7 +372,7 @@ int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp,
        ptlrpc_at_set_req_timeout(req);
 
        /* allocate bulk descriptor */
-       desc = ptlrpc_prep_bulk_imp(req, npages, BULK_PUT_SINK,
+       desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK,
                                    MDS_BULK_PORTAL);
        if (desc == NULL) {
                ptlrpc_request_free(req);
@@ -381,7 +381,7 @@ int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp,
 
        /* req now owns desc and will free it when it gets freed */
        for (i = 0; i < npages; i++)
-               ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE);
+               ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
 
        /* pack index information in request */
        req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
@@ -398,7 +398,10 @@ int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp,
                                          req->rq_bulk->bd_nob_transferred);
        if (rc < 0)
                GOTO(out, rc);
-       rc = 0;
+       else
+               /* sptlrpc_cli_unwrap_bulk_read() returns the number of bytes
+                * transferred*/
+               rc = 0;
 
        req_ii = req_capsule_server_get(&req->rq_pill, &RMF_IDX_INFO);
        *ii = *req_ii;