Whamcloud - gitweb
LU-14462 gss: fix support for namespace in lgss_keyring
[fs/lustre-release.git] / lustre / quota / qsd_request.c
index d1cd515..8f451f8 100644 (file)
@@ -21,7 +21,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright (c) 2012, 2015, Intel Corporation.
+ * Copyright (c) 2012, 2016, Intel Corporation.
  * Use is subject to license terms.
  *
  * Author: Johann Lombardi <johann.lombardi@intel.com>
@@ -119,8 +119,7 @@ int qsd_send_dqacq(const struct lu_env *env, struct obd_export *exp,
 
        ptlrpc_request_set_replen(req);
 
-       CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
-       aa = ptlrpc_req_async_args(req);
+       aa = ptlrpc_req_async_args(aa, req);
        aa->aa_exp = exp;
        aa->aa_qqi = qqi;
        aa->aa_arg = (void *)lqe;
@@ -158,27 +157,45 @@ static int qsd_intent_interpret(const struct lu_env *env,
 {
        struct lustre_handle     *lockh;
        struct quota_body        *rep_qbody = NULL, *req_qbody;
-       struct ldlm_intent       *lit;
        struct qsd_async_args    *aa = (struct qsd_async_args *)arg;
+       struct ldlm_reply        *lockrep;
        __u64                     flags = LDLM_FL_HAS_INTENT;
+       struct ldlm_enqueue_info  einfo = {
+               .ei_type = LDLM_PLAIN,
+               .ei_mode = LCK_CR,
+       };
        ENTRY;
 
        LASSERT(aa->aa_exp);
        lockh = &aa->aa_lockh;
        req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
-       lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
+       req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
 
-       rc = ldlm_cli_enqueue_fini(aa->aa_exp, req, LDLM_PLAIN, 0, LCK_CR,
-                                  &flags, (void *)aa->aa_lvb,
-                                  sizeof(struct lquota_lvb), lockh, rc);
-       if (rc < 0)
+       rc = ldlm_cli_enqueue_fini(aa->aa_exp, req, &einfo, 0, &flags,
+                                  aa->aa_lvb, sizeof(*(aa->aa_lvb)),
+                                  lockh, rc);
+       if (rc < 0) {
                /* the lock has been destroyed, forget about the lock handle */
                memset(lockh, 0, sizeof(*lockh));
+               /*
+                * To avoid the server being fullfilled by LDLM locks, server
+                * may reject the locking request by returning -EINPROGRESS,
+                * this is different from the -EINPROGRESS returned by quota
+                * code.
+                */
+               if (rc == -EINPROGRESS)
+                       rc = -EAGAIN;
+               GOTO(out, rc);
+       }
+
+       lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
+       LASSERT(lockrep != NULL);
+       rc = ptlrpc_status_ntoh(lockrep->lock_policy_res2);
 
        if (rc == 0 || rc == -EDQUOT || rc == -EINPROGRESS)
                rep_qbody = req_capsule_server_get(&req->rq_pill,
                                                   &RMF_QUOTA_BODY);
-
+out:
        aa->aa_completion(env, aa->aa_qqi, req_qbody, rep_qbody, lockh,
                          aa->aa_lvb, aa->aa_arg, rc);
        RETURN(rc);
@@ -308,8 +325,7 @@ int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
                break;
        }
 
-       CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
-       aa = ptlrpc_req_async_args(req);
+       aa = ptlrpc_req_async_args(aa, req);
        aa->aa_exp = exp;
        aa->aa_qqi = qqi;
        aa->aa_arg = arg;
@@ -375,18 +391,16 @@ int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp,
 
        /* allocate bulk descriptor */
        desc = ptlrpc_prep_bulk_imp(req, npages, 1,
-                                   PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+                                   PTLRPC_BULK_PUT_SINK,
                                    MDS_BULK_PORTAL,
                                    &ptlrpc_bulk_kiov_pin_ops);
-       if (desc == NULL) {
-               ptlrpc_request_free(req);
-               RETURN(-ENOMEM);
-       }
+       if (desc == NULL)
+               GOTO(out, rc = -ENOMEM);
 
        /* req now owns desc and will free it when it gets freed */
        for (i = 0; i < npages; i++)
                desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
-                                                PAGE_CACHE_SIZE);
+                                                PAGE_SIZE);
 
        /* pack index information in request */
        req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);