Whamcloud - gitweb
LU-11304 misc: update all url links to whamcloud
[fs/lustre-release.git] / lustre / osc / osc_request.c
index 7ed4225..5c707de 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 
 #define DEBUG_SUBSYSTEM S_OSC
 
-#include <libcfs/libcfs.h>
-
-#include <lustre_dlm.h>
-#include <lustre_net.h>
-#include <lustre/lustre_user.h>
-#include <obd_cksum.h>
-#include <lustre_ha.h>
+#include <linux/workqueue.h>
 #include <lprocfs_status.h>
-#include <lustre_ioctl.h>
 #include <lustre_debug.h>
-#include <lustre_param.h>
+#include <lustre_dlm.h>
 #include <lustre_fid.h>
+#include <lustre_ha.h>
+#include <uapi/linux/lustre/lustre_ioctl.h>
+#include <lustre_net.h>
+#include <lustre_obdo.h>
+#include <uapi/linux/lustre/lustre_param.h>
+#include <obd.h>
+#include <obd_cksum.h>
 #include <obd_class.h>
+#include <lustre_osc.h>
+
 #include "osc_internal.h"
-#include "osc_cl_internal.h"
-
-struct osc_brw_async_args {
-       struct obdo              *aa_oa;
-       int                       aa_requested_nob;
-       int                       aa_nio_count;
-       obd_count                 aa_page_count;
-       int                       aa_resends;
-       struct brw_page **aa_ppga;
-       struct client_obd        *aa_cli;
-       struct list_head          aa_oaps;
-       struct list_head          aa_exts;
-       struct obd_capa  *aa_ocapa;
-       struct cl_req            *aa_clerq;
-};
 
-#define osc_grant_args osc_brw_async_args
+atomic_t osc_pool_req_count;
+unsigned int osc_reqpool_maxreqcount;
+struct ptlrpc_request_pool *osc_rq_pool;
 
-struct osc_async_args {
-       struct obd_info *aa_oi;
-};
+/* max memory used for request pool, unit is MB */
+static unsigned int osc_reqpool_mem_max = 5;
+module_param(osc_reqpool_mem_max, uint, 0444);
+
+static int osc_idle_timeout = 20;
+module_param(osc_idle_timeout, uint, 0644);
+
+#define osc_grant_args osc_brw_async_args
 
 struct osc_setattr_args {
        struct obdo             *sa_oa;
@@ -79,212 +69,114 @@ struct osc_setattr_args {
 };
 
 struct osc_fsync_args {
-       struct obd_info *fa_oi;
-       obd_enqueue_update_f     fa_upcall;
+       struct osc_object       *fa_obj;
+       struct obdo             *fa_oa;
+       obd_enqueue_update_f    fa_upcall;
        void                    *fa_cookie;
 };
 
-struct osc_enqueue_args {
-       struct obd_export       *oa_exp;
-       ldlm_type_t             oa_type;
-       ldlm_mode_t             oa_mode;
-       __u64                   *oa_flags;
-       osc_enqueue_upcall_f    oa_upcall;
-       void                    *oa_cookie;
-       struct ost_lvb          *oa_lvb;
-       struct lustre_handle    oa_lockh;
-       unsigned int            oa_agl:1;
+struct osc_ladvise_args {
+       struct obdo             *la_oa;
+       obd_enqueue_update_f     la_upcall;
+       void                    *la_cookie;
 };
 
-static void osc_release_ppga(struct brw_page **ppga, obd_count count);
+static void osc_release_ppga(struct brw_page **ppga, size_t count);
 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
                         void *data, int rc);
 
-static inline void osc_pack_capa(struct ptlrpc_request *req,
-                                 struct ost_body *body, void *capa)
-{
-        struct obd_capa *oc = (struct obd_capa *)capa;
-        struct lustre_capa *c;
-
-        if (!capa)
-                return;
-
-        c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
-        LASSERT(c);
-        capa_cpy(c, oc);
-        body->oa.o_valid |= OBD_MD_FLOSSCAPA;
-        DEBUG_CAPA(D_SEC, c, "pack");
-}
-
-static inline void osc_pack_req_body(struct ptlrpc_request *req,
-                                     struct obd_info *oinfo)
+void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
 {
        struct ost_body *body;
 
        body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
        LASSERT(body);
 
-       lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
-                            oinfo->oi_oa);
-       osc_pack_capa(req, body, oinfo->oi_capa);
-}
-
-static inline void osc_set_capa_size(struct ptlrpc_request *req,
-                                     const struct req_msg_field *field,
-                                     struct obd_capa *oc)
-{
-        if (oc == NULL)
-                req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
-        else
-                /* it is already calculated as sizeof struct obd_capa */
-                ;
-}
-
-static int osc_getattr_interpret(const struct lu_env *env,
-                                 struct ptlrpc_request *req,
-                                 struct osc_async_args *aa, int rc)
-{
-        struct ost_body *body;
-        ENTRY;
-
-        if (rc != 0)
-                GOTO(out, rc);
-
-        body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-        if (body) {
-               CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
-               lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
-                                    aa->aa_oi->oi_oa, &body->oa);
-
-               /* This should really be sent by the OST */
-               aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
-               aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
-        } else {
-                CDEBUG(D_INFO, "can't unpack ost_body\n");
-                rc = -EPROTO;
-                aa->aa_oi->oi_oa->o_valid = 0;
-        }
-out:
-        rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
-        RETURN(rc);
-}
-
-static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
-                             struct ptlrpc_request_set *set)
-{
-        struct ptlrpc_request *req;
-        struct osc_async_args *aa;
-        int                    rc;
-        ENTRY;
-
-        req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
-        if (req == NULL)
-                RETURN(-ENOMEM);
-
-        osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
-        rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
-        if (rc) {
-                ptlrpc_request_free(req);
-                RETURN(rc);
-        }
-
-        osc_pack_req_body(req, oinfo);
-
-        ptlrpc_request_set_replen(req);
-        req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
-
-        CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
-        aa = ptlrpc_req_async_args(req);
-        aa->aa_oi = oinfo;
-
-        ptlrpc_set_add_req(set, req);
-        RETURN(0);
+       lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
 }
 
 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
-                       struct obd_info *oinfo)
+                      struct obdo *oa)
 {
-        struct ptlrpc_request *req;
-        struct ost_body       *body;
-        int                    rc;
-        ENTRY;
+       struct ptlrpc_request   *req;
+       struct ost_body         *body;
+       int                      rc;
 
-        req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
-        if (req == NULL)
-                RETURN(-ENOMEM);
+       ENTRY;
+       req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
+       if (req == NULL)
+               RETURN(-ENOMEM);
 
-        osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
-        rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
-        if (rc) {
-                ptlrpc_request_free(req);
-                RETURN(rc);
-        }
+       rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
+       if (rc) {
+               ptlrpc_request_free(req);
+               RETURN(rc);
+       }
 
-        osc_pack_req_body(req, oinfo);
+       osc_pack_req_body(req, oa);
 
-        ptlrpc_request_set_replen(req);
+       ptlrpc_request_set_replen(req);
 
-        rc = ptlrpc_queue_wait(req);
-        if (rc)
-                GOTO(out, rc);
+       rc = ptlrpc_queue_wait(req);
+       if (rc)
+               GOTO(out, rc);
 
-        body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-        if (body == NULL)
-                GOTO(out, rc = -EPROTO);
+       body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+       if (body == NULL)
+               GOTO(out, rc = -EPROTO);
 
        CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
-       lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
-                            &body->oa);
+       lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
 
-       oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
-       oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
+       oa->o_blksize = cli_brw_size(exp->exp_obd);
+       oa->o_valid |= OBD_MD_FLBLKSZ;
 
-        EXIT;
- out:
-        ptlrpc_req_finished(req);
-        return rc;
+       EXIT;
+out:
+       ptlrpc_req_finished(req);
+
+       return rc;
 }
 
 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
-                       struct obd_info *oinfo, struct obd_trans_info *oti)
+                      struct obdo *oa)
 {
-        struct ptlrpc_request *req;
-        struct ost_body       *body;
-        int                    rc;
-        ENTRY;
+       struct ptlrpc_request   *req;
+       struct ost_body         *body;
+       int                      rc;
 
-        LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
+       ENTRY;
+       LASSERT(oa->o_valid & OBD_MD_FLGROUP);
 
-        req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
-        if (req == NULL)
-                RETURN(-ENOMEM);
+       req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
+       if (req == NULL)
+               RETURN(-ENOMEM);
 
-        osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
-        rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
-        if (rc) {
-                ptlrpc_request_free(req);
-                RETURN(rc);
-        }
+       rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
+       if (rc) {
+               ptlrpc_request_free(req);
+               RETURN(rc);
+       }
 
-        osc_pack_req_body(req, oinfo);
+       osc_pack_req_body(req, oa);
 
-        ptlrpc_request_set_replen(req);
+       ptlrpc_request_set_replen(req);
 
-        rc = ptlrpc_queue_wait(req);
-        if (rc)
-                GOTO(out, rc);
+       rc = ptlrpc_queue_wait(req);
+       if (rc)
+               GOTO(out, rc);
 
-        body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-        if (body == NULL)
-                GOTO(out, rc = -EPROTO);
+       body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+       if (body == NULL)
+               GOTO(out, rc = -EPROTO);
 
-       lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
-                            &body->oa);
+       lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
 
-        EXIT;
+       EXIT;
 out:
-        ptlrpc_req_finished(req);
-        RETURN(rc);
+       ptlrpc_req_finished(req);
+
+       RETURN(rc);
 }
 
 static int osc_setattr_interpret(const struct lu_env *env,
@@ -308,67 +200,143 @@ out:
         RETURN(rc);
 }
 
-int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
-                           struct obd_trans_info *oti,
-                           obd_enqueue_update_f upcall, void *cookie,
-                           struct ptlrpc_request_set *rqset)
+int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
+                     obd_enqueue_update_f upcall, void *cookie,
+                     struct ptlrpc_request_set *rqset)
 {
-        struct ptlrpc_request   *req;
-        struct osc_setattr_args *sa;
-        int                      rc;
-        ENTRY;
+       struct ptlrpc_request   *req;
+       struct osc_setattr_args *sa;
+       int                      rc;
 
-        req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
-        if (req == NULL)
-                RETURN(-ENOMEM);
+       ENTRY;
 
-        osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
-        rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
-        if (rc) {
-                ptlrpc_request_free(req);
-                RETURN(rc);
-        }
+       req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
+       if (req == NULL)
+               RETURN(-ENOMEM);
 
-        if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
-                oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
+       rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
+       if (rc) {
+               ptlrpc_request_free(req);
+               RETURN(rc);
+       }
 
-        osc_pack_req_body(req, oinfo);
+       osc_pack_req_body(req, oa);
 
-        ptlrpc_request_set_replen(req);
+       ptlrpc_request_set_replen(req);
 
-        /* do mds to ost setattr asynchronously */
-        if (!rqset) {
-                /* Do not wait for response. */
-                ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
-        } else {
-                req->rq_interpret_reply =
-                        (ptlrpc_interpterer_t)osc_setattr_interpret;
-
-                CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
-                sa = ptlrpc_req_async_args(req);
-                sa->sa_oa = oinfo->oi_oa;
-                sa->sa_upcall = upcall;
-                sa->sa_cookie = cookie;
-
-                if (rqset == PTLRPCD_SET)
-                        ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
-                else
-                        ptlrpc_set_add_req(rqset, req);
-        }
+       /* do mds to ost setattr asynchronously */
+       if (!rqset) {
+               /* Do not wait for response. */
+               ptlrpcd_add_req(req);
+       } else {
+               req->rq_interpret_reply =
+                       (ptlrpc_interpterer_t)osc_setattr_interpret;
 
-        RETURN(0);
+               CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
+               sa = ptlrpc_req_async_args(req);
+               sa->sa_oa = oa;
+               sa->sa_upcall = upcall;
+               sa->sa_cookie = cookie;
+
+               if (rqset == PTLRPCD_SET)
+                       ptlrpcd_add_req(req);
+               else
+                       ptlrpc_set_add_req(rqset, req);
+       }
+
+       RETURN(0);
 }
 
-static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
-                             struct obd_trans_info *oti,
-                             struct ptlrpc_request_set *rqset)
+static int osc_ladvise_interpret(const struct lu_env *env,
+                                struct ptlrpc_request *req,
+                                void *arg, int rc)
 {
-        return osc_setattr_async_base(exp, oinfo, oti,
-                                      oinfo->oi_cb_up, oinfo, rqset);
+       struct osc_ladvise_args *la = arg;
+       struct ost_body *body;
+       ENTRY;
+
+       if (rc != 0)
+               GOTO(out, rc);
+
+       body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+       if (body == NULL)
+               GOTO(out, rc = -EPROTO);
+
+       *la->la_oa = body->oa;
+out:
+       rc = la->la_upcall(la->la_cookie, rc);
+       RETURN(rc);
+}
+
+/**
+ * If rqset is NULL, do not wait for response. Upcall and cookie could also
+ * be NULL in this case
+ */
+int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
+                    struct ladvise_hdr *ladvise_hdr,
+                    obd_enqueue_update_f upcall, void *cookie,
+                    struct ptlrpc_request_set *rqset)
+{
+       struct ptlrpc_request   *req;
+       struct ost_body         *body;
+       struct osc_ladvise_args *la;
+       int                      rc;
+       struct lu_ladvise       *req_ladvise;
+       struct lu_ladvise       *ladvise = ladvise_hdr->lah_advise;
+       int                      num_advise = ladvise_hdr->lah_count;
+       struct ladvise_hdr      *req_ladvise_hdr;
+       ENTRY;
+
+       req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
+       if (req == NULL)
+               RETURN(-ENOMEM);
+
+       req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
+                            num_advise * sizeof(*ladvise));
+       rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
+       if (rc != 0) {
+               ptlrpc_request_free(req);
+               RETURN(rc);
+       }
+       req->rq_request_portal = OST_IO_PORTAL;
+       ptlrpc_at_set_req_timeout(req);
+
+       body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+       LASSERT(body);
+       lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
+                            oa);
+
+       req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
+                                                &RMF_OST_LADVISE_HDR);
+       memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
+
+       req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
+       memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
+       ptlrpc_request_set_replen(req);
+
+       if (rqset == NULL) {
+               /* Do not wait for response. */
+               ptlrpcd_add_req(req);
+               RETURN(0);
+       }
+
+       req->rq_interpret_reply = osc_ladvise_interpret;
+       CLASSERT(sizeof(*la) <= sizeof(req->rq_async_args));
+       la = ptlrpc_req_async_args(req);
+       la->la_oa = oa;
+       la->la_upcall = upcall;
+       la->la_cookie = cookie;
+
+       if (rqset == PTLRPCD_SET)
+               ptlrpcd_add_req(req);
+       else
+               ptlrpc_set_add_req(rqset, req);
+
+       RETURN(0);
 }
 
 static int osc_create(const struct lu_env *env, struct obd_export *exp,
-                     struct obdo *oa, struct obd_trans_info *oti)
+                     struct obdo *oa)
 {
         struct ptlrpc_request *req;
         struct ost_body       *body;
@@ -396,14 +364,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
 
         ptlrpc_request_set_replen(req);
 
-        if ((oa->o_valid & OBD_MD_FLFLAGS) &&
-            oa->o_flags == OBD_FL_DELORPHAN) {
-                DEBUG_REQ(D_HA, req,
-                          "delorphan from OST integration");
-                /* Don't resend the delorphan req */
-                req->rq_no_resend = req->rq_no_delay = 1;
-        }
-
         rc = ptlrpc_queue_wait(req);
         if (rc)
                 GOTO(out_req, rc);
@@ -418,95 +378,102 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
        oa->o_blksize = cli_brw_size(exp->exp_obd);
        oa->o_valid |= OBD_MD_FLBLKSZ;
 
-        if (oti != NULL) {
-                if (oa->o_valid & OBD_MD_FLCOOKIE) {
-                       if (oti->oti_logcookies == NULL)
-                               oti->oti_logcookies = &oti->oti_onecookie;
-
-                        *oti->oti_logcookies = oa->o_lcookie;
-                }
-        }
-
-        CDEBUG(D_HA, "transno: "LPD64"\n",
-               lustre_msg_get_transno(req->rq_repmsg));
+       CDEBUG(D_HA, "transno: %lld\n",
+              lustre_msg_get_transno(req->rq_repmsg));
 out_req:
-        ptlrpc_req_finished(req);
+       ptlrpc_req_finished(req);
 out:
        RETURN(rc);
 }
 
-int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
-                   obd_enqueue_update_f upcall, void *cookie,
-                   struct ptlrpc_request_set *rqset)
+int osc_punch_send(struct obd_export *exp, struct obdo *oa,
+                  obd_enqueue_update_f upcall, void *cookie)
 {
-        struct ptlrpc_request   *req;
-        struct osc_setattr_args *sa;
-        struct ost_body         *body;
-        int                      rc;
-        ENTRY;
+       struct ptlrpc_request *req;
+       struct osc_setattr_args *sa;
+       struct obd_import *imp = class_exp2cliimp(exp);
+       struct ost_body *body;
+       int rc;
 
-        req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
-        if (req == NULL)
-                RETURN(-ENOMEM);
+       ENTRY;
 
-        osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
-        rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
-        if (rc) {
-                ptlrpc_request_free(req);
-                RETURN(rc);
-        }
-        req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
-        ptlrpc_at_set_req_timeout(req);
+       req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
+       if (req == NULL)
+               RETURN(-ENOMEM);
+
+       rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
+       if (rc < 0) {
+               ptlrpc_request_free(req);
+               RETURN(rc);
+       }
+
+       osc_set_io_portal(req);
+
+       ptlrpc_at_set_req_timeout(req);
 
        body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
-       LASSERT(body);
-       lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
-                            oinfo->oi_oa);
-       osc_pack_capa(req, body, oinfo->oi_capa);
 
-        ptlrpc_request_set_replen(req);
+       lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
 
-        req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
-        CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
-        sa = ptlrpc_req_async_args(req);
-        sa->sa_oa     = oinfo->oi_oa;
-        sa->sa_upcall = upcall;
-        sa->sa_cookie = cookie;
-        if (rqset == PTLRPCD_SET)
-                ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
-        else
-                ptlrpc_set_add_req(rqset, req);
+       ptlrpc_request_set_replen(req);
 
-        RETURN(0);
+       req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
+       CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
+       sa = ptlrpc_req_async_args(req);
+       sa->sa_oa = oa;
+       sa->sa_upcall = upcall;
+       sa->sa_cookie = cookie;
+
+       ptlrpcd_add_req(req);
+
+       RETURN(0);
 }
+EXPORT_SYMBOL(osc_punch_send);
 
 static int osc_sync_interpret(const struct lu_env *env,
                               struct ptlrpc_request *req,
                               void *arg, int rc)
 {
-       struct osc_fsync_args *fa = arg;
-        struct ost_body *body;
-        ENTRY;
+       struct osc_fsync_args   *fa = arg;
+       struct ost_body         *body;
+       struct cl_attr          *attr = &osc_env_info(env)->oti_attr;
+       unsigned long           valid = 0;
+       struct cl_object        *obj;
+       ENTRY;
 
-        if (rc)
-                GOTO(out, rc);
+       if (rc != 0)
+               GOTO(out, rc);
 
-        body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-        if (body == NULL) {
-                CERROR ("can't unpack ost_body\n");
-                GOTO(out, rc = -EPROTO);
-        }
+       body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+       if (body == NULL) {
+               CERROR("can't unpack ost_body\n");
+               GOTO(out, rc = -EPROTO);
+       }
+
+       *fa->fa_oa = body->oa;
+       obj = osc2cl(fa->fa_obj);
+
+       /* Update osc object's blocks attribute */
+       cl_object_attr_lock(obj);
+       if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
+               attr->cat_blocks = body->oa.o_blocks;
+               valid |= CAT_BLOCKS;
+       }
+
+       if (valid != 0)
+               cl_object_attr_update(env, obj, attr, valid);
+       cl_object_attr_unlock(obj);
 
-       *fa->fa_oi->oi_oa = body->oa;
 out:
        rc = fa->fa_upcall(fa->fa_cookie, rc);
        RETURN(rc);
 }
 
-int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_sync_base(struct osc_object *obj, struct obdo *oa,
                  obd_enqueue_update_f upcall, void *cookie,
                   struct ptlrpc_request_set *rqset)
 {
+       struct obd_export     *exp = osc_export(obj);
        struct ptlrpc_request *req;
        struct ost_body       *body;
        struct osc_fsync_args *fa;
@@ -517,7 +484,6 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
         if (req == NULL)
                 RETURN(-ENOMEM);
 
-        osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
         if (rc) {
                 ptlrpc_request_free(req);
@@ -527,21 +493,20 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
        /* overload the size and blocks fields in the oa with start/end */
        body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
        LASSERT(body);
-       lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
-                            oinfo->oi_oa);
-       osc_pack_capa(req, body, oinfo->oi_capa);
+       lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
 
-        ptlrpc_request_set_replen(req);
-        req->rq_interpret_reply = osc_sync_interpret;
+       ptlrpc_request_set_replen(req);
+       req->rq_interpret_reply = osc_sync_interpret;
 
        CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
        fa = ptlrpc_req_async_args(req);
-       fa->fa_oi = oinfo;
+       fa->fa_obj = obj;
+       fa->fa_oa = oa;
        fa->fa_upcall = upcall;
        fa->fa_cookie = cookie;
 
        if (rqset == PTLRPCD_SET)
-               ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+               ptlrpcd_add_req(req);
        else
                ptlrpc_set_add_req(rqset, req);
 
@@ -553,13 +518,13 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
  * locks added to @cancels list. */
 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
                                   struct list_head *cancels,
-                                  ldlm_mode_t mode, __u64 lock_flags)
+                                  enum ldlm_mode mode, __u64 lock_flags)
 {
-        struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
-        struct ldlm_res_id res_id;
-        struct ldlm_resource *res;
-        int count;
-        ENTRY;
+       struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+       struct ldlm_res_id res_id;
+       struct ldlm_resource *res;
+       int count;
+       ENTRY;
 
        /* Return, i.e. cancel nothing, only if ELC is supported (flag in
         * export) but disabled through procfs (flag in NS).
@@ -612,18 +577,8 @@ static int osc_can_send_destroy(struct client_obd *cli)
        return 0;
 }
 
-/* Destroy requests can be async always on the client, and we don't even really
- * care about the return code since the client cannot do anything at all about
- * a destroy failure.
- * When the MDS is unlinking a filename, it saves the file objects into a
- * recovery llog, and these object records are cancelled when the OST reports
- * they were destroyed and sync'd to disk (i.e. transaction committed).
- * If the client dies, or the OST is down when the object should be destroyed,
- * the records are not cancelled, and when the OST reconnects to the MDS next,
- * it will retrieve the llog unlink logs and then sends the log cancellation
- * cookies to the MDS after committing destroy transactions. */
 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
-                      struct obdo *oa, struct obd_trans_info *oti)
+                      struct obdo *oa)
 {
         struct client_obd     *cli = &exp->exp_obd->u.cli;
         struct ptlrpc_request *req;
@@ -646,7 +601,6 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
                 RETURN(-ENOMEM);
         }
 
-       osc_set_capa_size(req, &RMF_CAPA1, NULL);
         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
                                0, &cancels, count);
         if (rc) {
@@ -657,48 +611,46 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
         ptlrpc_at_set_req_timeout(req);
 
-       if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
-               oa->o_lcookie = *oti->oti_logcookies;
        body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
        LASSERT(body);
        lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
 
         ptlrpc_request_set_replen(req);
 
-       /* If osc_destory is for destroying the unlink orphan,
-        * sent from MDT to OST, which should not be blocked here,
-        * because the process might be triggered by ptlrpcd, and
-        * it is not good to block ptlrpcd thread (b=16006)*/
-       if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
-                req->rq_interpret_reply = osc_destroy_interpret;
-                if (!osc_can_send_destroy(cli)) {
-                        struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
-                                                          NULL);
-
-                        /*
-                         * Wait until the number of on-going destroy RPCs drops
-                         * under max_rpc_in_flight
-                         */
-                        l_wait_event_exclusive(cli->cl_destroy_waitq,
-                                               osc_can_send_destroy(cli), &lwi);
-                }
-        }
+       req->rq_interpret_reply = osc_destroy_interpret;
+       if (!osc_can_send_destroy(cli)) {
+               struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
 
-        /* Do not wait for response */
-        ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
-        RETURN(0);
+               /*
+                * Wait until the number of on-going destroy RPCs drops
+                * under max_rpc_in_flight
+                */
+               rc = l_wait_event_exclusive(cli->cl_destroy_waitq,
+                                           osc_can_send_destroy(cli), &lwi);
+               if (rc) {
+                       ptlrpc_req_finished(req);
+                       RETURN(rc);
+               }
+       }
+
+       /* Do not wait for response */
+       ptlrpcd_add_req(req);
+       RETURN(0);
 }
 
 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
                                 long writing_bytes)
 {
-        obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
+       u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
 
-        LASSERT(!(oa->o_valid & bits));
+       LASSERT(!(oa->o_valid & bits));
 
        oa->o_valid |= bits;
        spin_lock(&cli->cl_loi_list_lock);
-       oa->o_dirty = cli->cl_dirty_pages << PAGE_CACHE_SHIFT;
+       if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data, GRANT_PARAM))
+               oa->o_dirty = cli->cl_dirty_grant;
+       else
+               oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
        if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
                     cli->cl_dirty_max_pages)) {
                CERROR("dirty %lu - %lu > dirty_max %lu\n",
@@ -707,13 +659,12 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
                oa->o_undirty = 0;
        } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
                            atomic_long_read(&obd_dirty_transit_pages) >
-                           (obd_max_dirty_pages + 1))) {
+                           (long)(obd_max_dirty_pages + 1))) {
                /* The atomic_read() allowing the atomic_inc() are
                 * not covered by a lock thus they may safely race and trip
                 * this CERROR() unless we add in a small fudge factor (+1). */
-               CERROR("%s: dirty %ld - %ld > system dirty_max %lu\n",
-                      cli->cl_import->imp_obd->obd_name,
-                      atomic_long_read(&obd_dirty_pages),
+               CERROR("%s: dirty %ld - %ld > system dirty_max %ld\n",
+                      cli_name(cli), atomic_long_read(&obd_dirty_pages),
                       atomic_long_read(&obd_dirty_transit_pages),
                       obd_max_dirty_pages);
                oa->o_undirty = 0;
@@ -723,30 +674,47 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
                       cli->cl_dirty_pages, cli->cl_dirty_max_pages);
                oa->o_undirty = 0;
        } else {
-               unsigned long max_in_flight = (cli->cl_max_pages_per_rpc <<
-                                     PAGE_CACHE_SHIFT) *
-                                    (cli->cl_max_rpcs_in_flight + 1);
-               oa->o_undirty = max(cli->cl_dirty_max_pages << PAGE_CACHE_SHIFT,
-                                   max_in_flight);
+               unsigned long nrpages;
+               unsigned long undirty;
+
+               nrpages = cli->cl_max_pages_per_rpc;
+               nrpages *= cli->cl_max_rpcs_in_flight + 1;
+               nrpages = max(nrpages, cli->cl_dirty_max_pages);
+               undirty = nrpages << PAGE_SHIFT;
+               if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data,
+                                GRANT_PARAM)) {
+                       int nrextents;
+
+                       /* take extent tax into account when asking for more
+                        * grant space */
+                       nrextents = (nrpages + cli->cl_max_extent_pages - 1)  /
+                                    cli->cl_max_extent_pages;
+                       undirty += nrextents * cli->cl_grant_extent_tax;
+               }
+               /* Do not ask for more than OBD_MAX_GRANT - a margin for server
+                * to add extent tax, etc.
+                */
+               oa->o_undirty = min(undirty, OBD_MAX_GRANT -
+                                   (PTLRPC_MAX_BRW_PAGES << PAGE_SHIFT)*4UL);
         }
        oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
         oa->o_dropped = cli->cl_lost_grant;
         cli->cl_lost_grant = 0;
        spin_unlock(&cli->cl_loi_list_lock);
-        CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
+       CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
                oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
-
 }
 
 void osc_update_next_shrink(struct client_obd *cli)
 {
-        cli->cl_next_shrink_grant =
-                cfs_time_shift(cli->cl_grant_shrink_interval);
-        CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
-               cli->cl_next_shrink_grant);
+       cli->cl_next_shrink_grant = ktime_get_seconds() +
+                                   cli->cl_grant_shrink_interval;
+
+       CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
+              cli->cl_next_shrink_grant);
 }
 
-static void __osc_update_grant(struct client_obd *cli, obd_size grant)
+static void __osc_update_grant(struct client_obd *cli, u64 grant)
 {
        spin_lock(&cli->cl_loi_list_lock);
        cli->cl_avail_grant += grant;
@@ -756,14 +724,20 @@ static void __osc_update_grant(struct client_obd *cli, obd_size grant)
 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
 {
         if (body->oa.o_valid & OBD_MD_FLGRANT) {
-                CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
+               CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
                 __osc_update_grant(cli, body->oa.o_grant);
         }
 }
 
-static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
-                              obd_count keylen, void *key, obd_count vallen,
-                              void *val, struct ptlrpc_request_set *set);
+/**
+ * grant thread data for shrinking space.
+ */
+struct grant_thread_data {
+       struct list_head        gtd_clients;
+       struct mutex            gtd_mutex;
+       unsigned long           gtd_stopped:1;
+};
+static struct grant_thread_data client_gtd;
 
 static int osc_shrink_grant_interpret(const struct lu_env *env,
                                       struct ptlrpc_request *req,
@@ -807,11 +781,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
 static int osc_shrink_grant(struct client_obd *cli)
 {
        __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
-                            (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
+                            (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
 
        spin_lock(&cli->cl_loi_list_lock);
        if (cli->cl_avail_grant <= target_bytes)
-               target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+               target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
        spin_unlock(&cli->cl_loi_list_lock);
 
        return osc_shrink_grant_to_target(cli, target_bytes);
@@ -827,8 +801,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
        /* Don't shrink if we are already above or below the desired limit
         * We don't want to shrink below a single RPC, as that will negatively
         * impact block allocation and long-term performance. */
-       if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
-               target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+       if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
+               target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
 
        if (target_bytes >= cli->cl_avail_grant) {
                spin_unlock(&cli->cl_loi_list_lock);
@@ -864,18 +838,20 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
 
 static int osc_should_shrink_grant(struct client_obd *client)
 {
-        cfs_time_t time = cfs_time_current();
-        cfs_time_t next_shrink = client->cl_next_shrink_grant;
+       time64_t next_shrink = client->cl_next_shrink_grant;
+
+       if (client->cl_import == NULL)
+               return 0;
 
         if ((client->cl_import->imp_connect_data.ocd_connect_flags &
              OBD_CONNECT_GRANT_SHRINK) == 0)
                 return 0;
 
-       if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
+       if (ktime_get_seconds() >= next_shrink - 5) {
                /* Get the current RPC size directly, instead of going via:
                 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
                 * Keep comment here so that it can be found by searching. */
-               int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+               int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
 
                if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
                    client->cl_avail_grant > brw_size)
@@ -886,43 +862,86 @@ static int osc_should_shrink_grant(struct client_obd *client)
         return 0;
 }
 
-static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
+#define GRANT_SHRINK_RPC_BATCH 100
+
+static struct delayed_work work;
+
+static void osc_grant_work_handler(struct work_struct *data)
 {
-       struct client_obd *client;
+       struct client_obd *cli;
+       int rpc_sent;
+       bool init_next_shrink = true;
+       time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
+
+       rpc_sent = 0;
+       mutex_lock(&client_gtd.gtd_mutex);
+       list_for_each_entry(cli, &client_gtd.gtd_clients,
+                           cl_grant_chain) {
+               if (++rpc_sent < GRANT_SHRINK_RPC_BATCH &&
+                   osc_should_shrink_grant(cli))
+                       osc_shrink_grant(cli);
 
-       list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
-               if (osc_should_shrink_grant(client))
-                       osc_shrink_grant(client);
+               if (!init_next_shrink) {
+                       if (cli->cl_next_shrink_grant < next_shrink &&
+                           cli->cl_next_shrink_grant > ktime_get_seconds())
+                               next_shrink = cli->cl_next_shrink_grant;
+               } else {
+                       init_next_shrink = false;
+                       next_shrink = cli->cl_next_shrink_grant;
+               }
        }
+       mutex_unlock(&client_gtd.gtd_mutex);
+
+       if (client_gtd.gtd_stopped == 1)
+               return;
+
+       if (next_shrink > ktime_get_seconds())
+               schedule_delayed_work(&work, msecs_to_jiffies(
+                                       (next_shrink - ktime_get_seconds()) *
+                                       MSEC_PER_SEC));
+       else
+               schedule_work(&work.work);
+}
+
+/**
+ * Start grant thread for returing grant to server for idle clients.
+ */
+static int osc_start_grant_work(void)
+{
+       client_gtd.gtd_stopped = 0;
+       mutex_init(&client_gtd.gtd_mutex);
+       INIT_LIST_HEAD(&client_gtd.gtd_clients);
+
+       INIT_DELAYED_WORK(&work, osc_grant_work_handler);
+       schedule_work(&work.work);
+
        return 0;
 }
 
-static int osc_add_shrink_grant(struct client_obd *client)
+static void osc_stop_grant_work(void)
 {
-        int rc;
+       client_gtd.gtd_stopped = 1;
+       cancel_delayed_work_sync(&work);
+}
 
-        rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
-                                       TIMEOUT_GRANT,
-                                       osc_grant_shrink_grant_cb, NULL,
-                                       &client->cl_grant_shrink_list);
-        if (rc) {
-                CERROR("add grant client %s error %d\n",
-                        client->cl_import->imp_obd->obd_name, rc);
-                return rc;
-        }
-        CDEBUG(D_CACHE, "add grant client %s \n",
-               client->cl_import->imp_obd->obd_name);
-        osc_update_next_shrink(client);
-        return 0;
+static void osc_add_grant_list(struct client_obd *client)
+{
+       mutex_lock(&client_gtd.gtd_mutex);
+       list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
+       mutex_unlock(&client_gtd.gtd_mutex);
 }
 
-static int osc_del_shrink_grant(struct client_obd *client)
+static void osc_del_grant_list(struct client_obd *client)
 {
-        return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
-                                         TIMEOUT_GRANT);
+       if (list_empty(&client->cl_grant_chain))
+               return;
+
+       mutex_lock(&client_gtd.gtd_mutex);
+       list_del_init(&client->cl_grant_chain);
+       mutex_unlock(&client_gtd.gtd_mutex);
 }
 
-static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
+void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
 {
        /*
         * ocd_grant is the total grant amount we're expect to hold: if we've
@@ -934,39 +953,57 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
         * left EVICTED state, then cl_dirty_pages must be 0 already.
         */
        spin_lock(&cli->cl_loi_list_lock);
-       if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
-               cli->cl_avail_grant = ocd->ocd_grant;
-       else
-               cli->cl_avail_grant = ocd->ocd_grant -
-                                     (cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
-
-        if (cli->cl_avail_grant < 0) {
-               CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
-                     cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
-                     ocd->ocd_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
-               /* workaround for servers which do not have the patch from
-                * LU-2679 */
-               cli->cl_avail_grant = ocd->ocd_grant;
-        }
+       cli->cl_avail_grant = ocd->ocd_grant;
+       if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
+               cli->cl_avail_grant -= cli->cl_reserved_grant;
+               if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
+                       cli->cl_avail_grant -= cli->cl_dirty_grant;
+               else
+                       cli->cl_avail_grant -=
+                                       cli->cl_dirty_pages << PAGE_SHIFT;
+       }
 
-       /* determine the appropriate chunk size used by osc_extent. */
-       cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
+       if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
+               u64 size;
+               int chunk_mask;
+
+               /* overhead for each extent insertion */
+               cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
+               /* determine the appropriate chunk size used by osc_extent. */
+               cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
+                                         ocd->ocd_grant_blkbits);
+               /* max_pages_per_rpc must be chunk aligned */
+               chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
+               cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
+                                            ~chunk_mask) & chunk_mask;
+               /* determine maximum extent size, in #pages */
+               size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
+               cli->cl_max_extent_pages = size >> PAGE_SHIFT;
+               if (cli->cl_max_extent_pages == 0)
+                       cli->cl_max_extent_pages = 1;
+       } else {
+               cli->cl_grant_extent_tax = 0;
+               cli->cl_chunkbits = PAGE_SHIFT;
+               cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
+       }
        spin_unlock(&cli->cl_loi_list_lock);
 
        CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
-               "chunk bits: %d.\n", cli->cl_import->imp_obd->obd_name,
-               cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
+               "chunk bits: %d cl_max_extent_pages: %d\n",
+               cli_name(cli),
+               cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
+               cli->cl_max_extent_pages);
 
-       if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
-           list_empty(&cli->cl_grant_shrink_list))
-               osc_add_shrink_grant(cli);
+       if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
+               osc_add_grant_list(cli);
 }
+EXPORT_SYMBOL(osc_init_grant);
 
 /* We assume that the reason this OSC got a short read is because it read
  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
  * via the LOV, and it _knows_ it's reading inside the file, it's just that
  * this stripe never got written at or beyond this stripe offset yet. */
-static void handle_short_read(int nob_read, obd_count page_count,
+static void handle_short_read(int nob_read, size_t page_count,
                               struct brw_page **pga)
 {
         char *ptr;
@@ -979,7 +1016,7 @@ static void handle_short_read(int nob_read, obd_count page_count,
                if (pga[i]->count > nob_read) {
                        /* EOF inside this page */
                        ptr = kmap(pga[i]->pg) +
-                               (pga[i]->off & ~CFS_PAGE_MASK);
+                               (pga[i]->off & ~PAGE_MASK);
                        memset(ptr + nob_read, 0, pga[i]->count - nob_read);
                        kunmap(pga[i]->pg);
                        page_count--;
@@ -994,7 +1031,7 @@ static void handle_short_read(int nob_read, obd_count page_count,
 
        /* zero remaining pages */
        while (page_count-- > 0) {
-               ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
+               ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
                memset(ptr, 0, pga[i]->count);
                kunmap(pga[i]->pg);
                i++;
@@ -1002,8 +1039,8 @@ static void handle_short_read(int nob_read, obd_count page_count,
 }
 
 static int check_write_rcs(struct ptlrpc_request *req,
-                           int requested_nob, int niocount,
-                           obd_count page_count, struct brw_page **pga)
+                          int requested_nob, int niocount,
+                          size_t page_count, struct brw_page **pga)
 {
         int     i;
         __u32   *remote_rcs;
@@ -1027,8 +1064,8 @@ static int check_write_rcs(struct ptlrpc_request *req,
                         return(-EPROTO);
                 }
         }
-
-        if (req->rq_bulk->bd_nob_transferred != requested_nob) {
+       if (req->rq_bulk != NULL &&
+           req->rq_bulk->bd_nob_transferred != requested_nob) {
                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
                        req->rq_bulk->bd_nob_transferred, requested_nob);
                 return(-EPROTO);
@@ -1048,7 +1085,7 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
                  * safe to combine */
                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
-                              "report this at https://jira.hpdd.intel.com/\n",
+                              "report this at https://jira.whamcloud.com/\n",
                               p1->flag, p2->flag);
                 }
                 return 0;
@@ -1057,15 +1094,111 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
         return (p1->off + p1->count == p2->off);
 }
 
-static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
-                                  struct brw_page **pga, int opc,
-                                  cksum_type_t cksum_type)
+static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
+                                  size_t pg_count, struct brw_page **pga,
+                                  int opc, obd_dif_csum_fn *fn,
+                                  int sector_size,
+                                  u32 *check_sum)
+{
+       struct cfs_crypto_hash_desc *hdesc;
+       /* Used Adler as the default checksum type on top of DIF tags */
+       unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
+       struct page *__page;
+       unsigned char *buffer;
+       __u16 *guard_start;
+       unsigned int bufsize;
+       int guard_number;
+       int used_number = 0;
+       int used;
+       u32 cksum;
+       int rc = 0;
+       int i = 0;
+
+       LASSERT(pg_count > 0);
+
+       __page = alloc_page(GFP_KERNEL);
+       if (__page == NULL)
+               return -ENOMEM;
+
+       hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+       if (IS_ERR(hdesc)) {
+               rc = PTR_ERR(hdesc);
+               CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
+                      obd_name, cfs_crypto_hash_name(cfs_alg), rc);
+               GOTO(out, rc);
+       }
+
+       buffer = kmap(__page);
+       guard_start = (__u16 *)buffer;
+       guard_number = PAGE_SIZE / sizeof(*guard_start);
+       while (nob > 0 && pg_count > 0) {
+               unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
+
+               /* corrupt the data before we compute the checksum, to
+                * simulate an OST->client data error */
+               if (unlikely(i == 0 && opc == OST_READ &&
+                            OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
+                       unsigned char *ptr = kmap(pga[i]->pg);
+                       int off = pga[i]->off & ~PAGE_MASK;
+
+                       memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
+                       kunmap(pga[i]->pg);
+               }
+
+               /*
+                * The left guard number should be able to hold checksums of a
+                * whole page
+                */
+               rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg, 0,
+                                                 count,
+                                                 guard_start + used_number,
+                                                 guard_number - used_number,
+                                                 &used, sector_size,
+                                                 fn);
+               if (rc)
+                       break;
+
+               used_number += used;
+               if (used_number == guard_number) {
+                       cfs_crypto_hash_update_page(hdesc, __page, 0,
+                               used_number * sizeof(*guard_start));
+                       used_number = 0;
+               }
+
+               nob -= pga[i]->count;
+               pg_count--;
+               i++;
+       }
+       kunmap(__page);
+       if (rc)
+               GOTO(out, rc);
+
+       if (used_number != 0)
+               cfs_crypto_hash_update_page(hdesc, __page, 0,
+                       used_number * sizeof(*guard_start));
+
+       bufsize = sizeof(cksum);
+       cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+
+       /* For sending we only compute the wrong checksum instead
+        * of corrupting the data so it is still correct on a redo */
+       if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
+               cksum++;
+
+       *check_sum = cksum;
+out:
+       __free_page(__page);
+       return rc;
+}
+
+static int osc_checksum_bulk(int nob, size_t pg_count,
+                            struct brw_page **pga, int opc,
+                            enum cksum_types cksum_type,
+                            u32 *cksum)
 {
-       __u32                           cksum;
        int                             i = 0;
        struct cfs_crypto_hash_desc     *hdesc;
        unsigned int                    bufsize;
-       int                             err;
        unsigned char                   cfs_alg = cksum_obd2cfs(cksum_type);
 
        LASSERT(pg_count > 0);
@@ -1085,49 +1218,72 @@ static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
                if (i == 0 && opc == OST_READ &&
                    OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
                        unsigned char *ptr = kmap(pga[i]->pg);
-                       int off = pga[i]->off & ~CFS_PAGE_MASK;
+                       int off = pga[i]->off & ~PAGE_MASK;
 
                        memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
                        kunmap(pga[i]->pg);
                }
                cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
-                                           pga[i]->off & ~CFS_PAGE_MASK,
+                                           pga[i]->off & ~PAGE_MASK,
                                            count);
                LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
-                              (int)(pga[i]->off & ~CFS_PAGE_MASK));
+                              (int)(pga[i]->off & ~PAGE_MASK));
 
                nob -= pga[i]->count;
                pg_count--;
                i++;
        }
 
-       bufsize = sizeof(cksum);
-       err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+       bufsize = sizeof(*cksum);
+       cfs_crypto_hash_final(hdesc, (unsigned char *)cksum, &bufsize);
 
        /* For sending we only compute the wrong checksum instead
         * of corrupting the data so it is still correct on a redo */
        if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
-               cksum++;
+               (*cksum)++;
 
-       return cksum;
+       return 0;
 }
 
-static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
-                                struct lov_stripe_md *lsm, obd_count page_count,
-                                struct brw_page **pga,
-                                struct ptlrpc_request **reqp,
-                                struct obd_capa *ocapa, int reserve,
-                                int resend)
+static int osc_checksum_bulk_rw(const char *obd_name,
+                               enum cksum_types cksum_type,
+                               int nob, size_t pg_count,
+                               struct brw_page **pga, int opc,
+                               u32 *check_sum)
+{
+       obd_dif_csum_fn *fn = NULL;
+       int sector_size = 0;
+       int rc;
+
+       ENTRY;
+       obd_t10_cksum2dif(cksum_type, &fn, &sector_size);
+
+       if (fn)
+               rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
+                                            opc, fn, sector_size, check_sum);
+       else
+               rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
+                                      check_sum);
+
+       RETURN(rc);
+}
+
+static int
+osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
+                    u32 page_count, struct brw_page **pga,
+                    struct ptlrpc_request **reqp, int resend)
 {
         struct ptlrpc_request   *req;
         struct ptlrpc_bulk_desc *desc;
         struct ost_body         *body;
         struct obd_ioobj        *ioobj;
         struct niobuf_remote    *niobuf;
-        int niocount, i, requested_nob, opc, rc;
+       int niocount, i, requested_nob, opc, rc, short_io_size = 0;
         struct osc_brw_async_args *aa;
         struct req_capsule      *pill;
         struct brw_page *pg_prev;
+       void *short_io_buf;
+       const char *obd_name = cli->cl_import->imp_obd->obd_name;
 
         ENTRY;
         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
@@ -1135,15 +1291,15 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
                 RETURN(-EINVAL); /* Fatal */
 
-        if ((cmd & OBD_BRW_WRITE) != 0) {
-                opc = OST_WRITE;
-                req = ptlrpc_request_alloc_pool(cli->cl_import,
-                                                cli->cl_import->imp_rq_pool,
-                                                &RQF_OST_BRW_WRITE);
-        } else {
-                opc = OST_READ;
-                req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
-        }
+       if ((cmd & OBD_BRW_WRITE) != 0) {
+               opc = OST_WRITE;
+               req = ptlrpc_request_alloc_pool(cli->cl_import,
+                                               osc_rq_pool,
+                                               &RQF_OST_BRW_WRITE);
+       } else {
+               opc = OST_READ;
+               req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
+       }
         if (req == NULL)
                 RETURN(-ENOMEM);
 
@@ -1157,28 +1313,51 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
                              sizeof(*ioobj));
         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
                              niocount * sizeof(*niobuf));
-        osc_set_capa_size(req, &RMF_CAPA1, ocapa);
+
+       for (i = 0; i < page_count; i++)
+               short_io_size += pga[i]->count;
+
+       /* Check if we can do a short io. */
+       if (!(short_io_size <= cli->cl_short_io_bytes && niocount == 1 &&
+           imp_connect_shortio(cli->cl_import)))
+               short_io_size = 0;
+
+       req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
+                            opc == OST_READ ? 0 : short_io_size);
+       if (opc == OST_READ)
+               req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
+                                    short_io_size);
 
         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
         if (rc) {
                 ptlrpc_request_free(req);
                 RETURN(rc);
         }
-        req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
-        ptlrpc_at_set_req_timeout(req);
+       osc_set_io_portal(req);
+
+       ptlrpc_at_set_req_timeout(req);
        /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
         * retry logic */
        req->rq_no_retry_einprogress = 1;
 
+       if (short_io_size != 0) {
+               desc = NULL;
+               short_io_buf = NULL;
+               goto no_bulk;
+       }
+
        desc = ptlrpc_prep_bulk_imp(req, page_count,
                cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
-               opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
-               OST_BULK_PORTAL);
+               (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
+                       PTLRPC_BULK_PUT_SINK) |
+                       PTLRPC_BULK_BUF_KIOV,
+               OST_BULK_PORTAL,
+               &ptlrpc_bulk_kiov_pin_ops);
 
         if (desc == NULL)
                 GOTO(out, rc = -ENOMEM);
         /* NB request now owns desc and will free it when it gets freed */
-
+no_bulk:
         body = req_capsule_client_get(pill, &RMF_OST_BODY);
         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
@@ -1186,6 +1365,15 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
 
        lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
 
+       /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
+        * and from_kgid(), because they are asynchronous. Fortunately, variable
+        * oa contains valid o_uid and o_gid in these two operations.
+        * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
+        * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
+        * other process logic */
+       body->oa.o_uid = oa->o_uid;
+       body->oa.o_gid = oa->o_gid;
+
        obdo_to_ioobj(oa, ioobj);
        ioobj->ioo_bufcnt = niocount;
        /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
@@ -1193,35 +1381,63 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
         * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
         * "max - 1" for old client compatibility sending "0", and also so the
         * the actual maximum is a power-of-two number, not one less. LU-1431 */
-       ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
-       osc_pack_capa(req, body, ocapa);
+       if (desc != NULL)
+               ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
+       else /* short io */
+               ioobj_max_brw_set(ioobj, 0);
+
+       if (short_io_size != 0) {
+               if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
+                       body->oa.o_valid |= OBD_MD_FLFLAGS;
+                       body->oa.o_flags = 0;
+               }
+               body->oa.o_flags |= OBD_FL_SHORT_IO;
+               CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
+                      short_io_size);
+               if (opc == OST_WRITE) {
+                       short_io_buf = req_capsule_client_get(pill,
+                                                             &RMF_SHORT_IO);
+                       LASSERT(short_io_buf != NULL);
+               }
+       }
+
        LASSERT(page_count > 0);
        pg_prev = pga[0];
         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
                 struct brw_page *pg = pga[i];
-                int poff = pg->off & ~CFS_PAGE_MASK;
+               int poff = pg->off & ~PAGE_MASK;
 
                 LASSERT(pg->count > 0);
                 /* make sure there is no gap in the middle of page array */
                LASSERTF(page_count == 1 ||
-                        (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
+                        (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
                          ergo(i > 0 && i < page_count - 1,
-                              poff == 0 && pg->count == PAGE_CACHE_SIZE)   &&
+                              poff == 0 && pg->count == PAGE_SIZE)   &&
                          ergo(i == page_count - 1, poff == 0)),
-                        "i: %d/%d pg: %p off: "LPU64", count: %u\n",
+                        "i: %d/%d pg: %p off: %llu, count: %u\n",
                         i, page_count, pg, pg->off, pg->count);
                 LASSERTF(i == 0 || pg->off > pg_prev->off,
-                         "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
-                         " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
+                        "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
+                        " prev_pg %p [pri %lu ind %lu] off %llu\n",
                          i, page_count,
                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
                          pg_prev->pg, page_private(pg_prev->pg),
                          pg_prev->pg->index, pg_prev->off);
                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
                         (pg->flag & OBD_BRW_SRVLOCK));
-
-               ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
-                requested_nob += pg->count;
+               if (short_io_size != 0 && opc == OST_WRITE) {
+                       unsigned char *ptr = ll_kmap_atomic(pg->pg, KM_USER0);
+
+                       LASSERT(short_io_size >= requested_nob + pg->count);
+                       memcpy(short_io_buf + requested_nob,
+                              ptr + poff,
+                              pg->count);
+                       ll_kunmap_atomic(ptr, KM_USER0);
+               } else if (short_io_size == 0) {
+                       desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
+                                                        pg->count);
+               }
+               requested_nob += pg->count;
 
                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
                         niobuf--;
@@ -1257,23 +1473,31 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
                         /* store cl_cksum_type in a local variable since
                          * it can be changed via lprocfs */
-                        cksum_type_t cksum_type = cli->cl_cksum_type;
+                       enum cksum_types cksum_type = cli->cl_cksum_type;
 
-                        if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
-                                oa->o_flags &= OBD_FL_LOCAL_MASK;
+                        if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
                                 body->oa.o_flags = 0;
-                        }
-                        body->oa.o_flags |= cksum_type_pack(cksum_type);
+
+                       body->oa.o_flags |= obd_cksum_type_pack(obd_name,
+                                                               cksum_type);
                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
-                        body->oa.o_cksum = osc_checksum_bulk(requested_nob,
-                                                             page_count, pga,
-                                                             OST_WRITE,
-                                                             cksum_type);
+
+                       rc = osc_checksum_bulk_rw(obd_name, cksum_type,
+                                                 requested_nob, page_count,
+                                                 pga, OST_WRITE,
+                                                 &body->oa.o_cksum);
+                       if (rc < 0) {
+                               CDEBUG(D_PAGE, "failed to checksum, rc = %d\n",
+                                      rc);
+                               GOTO(out, rc);
+                       }
                         CDEBUG(D_PAGE, "checksum at write origin: %x\n",
                                body->oa.o_cksum);
+
                         /* save this in 'oa', too, for later checking */
                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
-                        oa->o_flags |= cksum_type_pack(cksum_type);
+                       oa->o_flags |= obd_cksum_type_pack(obd_name,
+                                                          cksum_type);
                 } else {
                         /* clear out the checksum flag, in case this is a
                          * resend but cl_checksum is no longer set. b=11238 */
@@ -1288,24 +1512,28 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
                                 body->oa.o_flags = 0;
-                        body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
+                       body->oa.o_flags |= obd_cksum_type_pack(obd_name,
+                               cli->cl_cksum_type);
                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
-                }
-        }
-        ptlrpc_request_set_replen(req);
+               }
 
-        CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
-        aa = ptlrpc_req_async_args(req);
-        aa->aa_oa = oa;
-        aa->aa_requested_nob = requested_nob;
-        aa->aa_nio_count = niocount;
-        aa->aa_page_count = page_count;
-        aa->aa_resends = 0;
-        aa->aa_ppga = pga;
-        aa->aa_cli = cli;
+               /* Client cksum has been already copied to wire obdo in previous
+                * lustre_set_wire_obdo(), and in the case a bulk-read is being
+                * resent due to cksum error, this will allow Server to
+                * check+dump pages on its side */
+       }
+       ptlrpc_request_set_replen(req);
+
+       CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+       aa = ptlrpc_req_async_args(req);
+       aa->aa_oa = oa;
+       aa->aa_requested_nob = requested_nob;
+       aa->aa_nio_count = niocount;
+       aa->aa_page_count = page_count;
+       aa->aa_resends = 0;
+       aa->aa_ppga = pga;
+       aa->aa_cli = cli;
        INIT_LIST_HEAD(&aa->aa_oaps);
-        if (ocapa && reserve)
-                aa->aa_ocapa = capa_get(ocapa);
 
        *reqp = req;
        niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
@@ -1319,26 +1547,136 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
         RETURN(rc);
 }
 
-static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
-                                __u32 client_cksum, __u32 server_cksum, int nob,
-                                obd_count page_count, struct brw_page **pga,
-                                cksum_type_t client_cksum_type)
-{
-        __u32 new_cksum;
-        char *msg;
-        cksum_type_t cksum_type;
+char dbgcksum_file_name[PATH_MAX];
+
+static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
+                               struct brw_page **pga, __u32 server_cksum,
+                               __u32 client_cksum)
+{
+       struct file *filp;
+       int rc, i;
+       unsigned int len;
+       char *buf;
+
+       /* will only keep dump of pages on first error for the same range in
+        * file/fid, not during the resends/retries. */
+       snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
+                "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
+                (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0 ?
+                 libcfs_debug_file_path_arr :
+                 LIBCFS_DEBUG_FILE_PATH_DEFAULT),
+                oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
+                oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
+                oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
+                pga[0]->off,
+                pga[page_count-1]->off + pga[page_count-1]->count - 1,
+                client_cksum, server_cksum);
+       filp = filp_open(dbgcksum_file_name,
+                        O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
+       if (IS_ERR(filp)) {
+               rc = PTR_ERR(filp);
+               if (rc == -EEXIST)
+                       CDEBUG(D_INFO, "%s: can't open to dump pages with "
+                              "checksum error: rc = %d\n", dbgcksum_file_name,
+                              rc);
+               else
+                       CERROR("%s: can't open to dump pages with checksum "
+                              "error: rc = %d\n", dbgcksum_file_name, rc);
+               return;
+       }
+
+       for (i = 0; i < page_count; i++) {
+               len = pga[i]->count;
+               buf = kmap(pga[i]->pg);
+               while (len != 0) {
+                       rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
+                       if (rc < 0) {
+                               CERROR("%s: wanted to write %u but got %d "
+                                      "error\n", dbgcksum_file_name, len, rc);
+                               break;
+                       }
+                       len -= rc;
+                       buf += rc;
+                       CDEBUG(D_INFO, "%s: wrote %d bytes\n",
+                              dbgcksum_file_name, rc);
+               }
+               kunmap(pga[i]->pg);
+       }
+
+       rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
+       if (rc)
+               CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
+       filp_close(filp, NULL);
+       return;
+}
+
+static int
+check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
+                    __u32 client_cksum, __u32 server_cksum,
+                    struct osc_brw_async_args *aa)
+{
+       const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
+       enum cksum_types cksum_type;
+       obd_dif_csum_fn *fn = NULL;
+       int sector_size = 0;
+       bool t10pi = false;
+       __u32 new_cksum;
+       char *msg;
+       int rc;
 
         if (server_cksum == client_cksum) {
                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
                 return 0;
         }
 
-        cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
-                                       oa->o_flags : 0);
-        new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
-                                      cksum_type);
+       if (aa->aa_cli->cl_checksum_dump)
+               dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
+                                   server_cksum, client_cksum);
+
+       cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
+                                          oa->o_flags : 0);
+
+       switch (cksum_type) {
+       case OBD_CKSUM_T10IP512:
+               t10pi = true;
+               fn = obd_dif_ip_fn;
+               sector_size = 512;
+               break;
+       case OBD_CKSUM_T10IP4K:
+               t10pi = true;
+               fn = obd_dif_ip_fn;
+               sector_size = 4096;
+               break;
+       case OBD_CKSUM_T10CRC512:
+               t10pi = true;
+               fn = obd_dif_crc_fn;
+               sector_size = 512;
+               break;
+       case OBD_CKSUM_T10CRC4K:
+               t10pi = true;
+               fn = obd_dif_crc_fn;
+               sector_size = 4096;
+               break;
+       default:
+               break;
+       }
+
+       if (t10pi)
+               rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
+                                            aa->aa_page_count,
+                                            aa->aa_ppga,
+                                            OST_WRITE,
+                                            fn,
+                                            sector_size,
+                                            &new_cksum);
+       else
+               rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
+                                      aa->aa_ppga, OST_WRITE, cksum_type,
+                                      &new_cksum);
 
-        if (cksum_type != client_cksum_type)
+       if (rc < 0)
+               msg = "failed to calculate the client write checksum";
+       else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
                 msg = "the server did not use the checksum type specified in "
                       "the original request - likely a protocol problem";
         else if (new_cksum == server_cksum)
@@ -1350,28 +1688,32 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
                 msg = "changed in transit AND doesn't match the original - "
                       "likely false positive due to mmap IO (bug 11742)";
 
-       LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
-                          " object "DOSTID" extent ["LPU64"-"LPU64"]\n",
-                          msg, libcfs_nid2str(peer->nid),
+       LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
+                          DFID " object "DOSTID" extent [%llu-%llu], original "
+                          "client csum %x (type %x), server csum %x (type %x),"
+                          " client csum now %x\n",
+                          obd_name, msg, libcfs_nid2str(peer->nid),
                           oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
                           oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
                           oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
-                          POSTID(&oa->o_oi), pga[0]->off,
-                          pga[page_count-1]->off + pga[page_count-1]->count - 1);
-       CERROR("original client csum %x (type %x), server csum %x (type %x), "
-              "client csum now %x\n", client_cksum, client_cksum_type,
-              server_cksum, cksum_type, new_cksum);
+                          POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
+                          aa->aa_ppga[aa->aa_page_count - 1]->off +
+                               aa->aa_ppga[aa->aa_page_count-1]->count - 1,
+                          client_cksum,
+                          obd_cksum_type_unpack(aa->aa_oa->o_flags),
+                          server_cksum, cksum_type, new_cksum);
        return 1;
 }
 
 /* Note rc enters this function as number of bytes transferred */
 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
 {
-        struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
-        const lnet_process_id_t *peer =
-                        &req->rq_import->imp_connection->c_peer;
-        struct client_obd *cli = aa->aa_cli;
-        struct ost_body *body;
+       struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
+       struct client_obd *cli = aa->aa_cli;
+       const char *obd_name = cli->cl_import->imp_obd->obd_name;
+       const struct lnet_process_id *peer =
+               &req->rq_import->imp_connection->c_peer;
+       struct ost_body *body;
        u32 client_cksum = 0;
         ENTRY;
 
@@ -1387,15 +1729,17 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
                 RETURN(-EPROTO);
         }
 
-        /* set/clear over quota flag for a uid/gid */
-        if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
-            body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
-                unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
-
-                CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n",
-                       body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
-                       body->oa.o_flags);
-                osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
+       /* set/clear over quota flag for a uid/gid/projid */
+       if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
+           body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
+               unsigned qid[LL_MAXQUOTAS] = {
+                                        body->oa.o_uid, body->oa.o_gid,
+                                        body->oa.o_projid };
+               CDEBUG(D_QUOTA, "setdq for [%u %u %u] with valid %#llx, flags %x\n",
+                      body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
+                      body->oa.o_valid, body->oa.o_flags);
+                      osc_quota_setdq(cli, qid, body->oa.o_valid,
+                                      body->oa.o_flags);
         }
 
         osc_update_grant(cli, body);
@@ -1411,16 +1755,14 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
                         CERROR("Unexpected +ve rc %d\n", rc);
                         RETURN(-EPROTO);
                 }
-                LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
 
-                if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
+               if (req->rq_bulk != NULL &&
+                   sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
                         RETURN(-EAGAIN);
 
                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
                     check_write_checksum(&body->oa, peer, client_cksum,
-                                         body->oa.o_cksum, aa->aa_requested_nob,
-                                         aa->aa_page_count, aa->aa_ppga,
-                                         cksum_type_unpack(aa->aa_oa->o_flags)))
+                                        body->oa.o_cksum, aa))
                         RETURN(-EAGAIN);
 
                 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
@@ -1430,8 +1772,14 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
 
         /* The rest of this function executes only for OST_READs */
 
-        /* if unwrap_bulk failed, return -EAGAIN to retry */
-        rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
+       if (req->rq_bulk == NULL) {
+               rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
+                                         RCL_SERVER);
+               LASSERT(rc == req->rq_status);
+       } else {
+               /* if unwrap_bulk failed, return -EAGAIN to retry */
+               rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
+       }
         if (rc < 0)
                 GOTO(out, rc = -EAGAIN);
 
@@ -1441,12 +1789,41 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
                 RETURN(-EPROTO);
         }
 
-        if (rc != req->rq_bulk->bd_nob_transferred) {
+       if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
                 CERROR ("Unexpected rc %d (%d transferred)\n",
                         rc, req->rq_bulk->bd_nob_transferred);
                 return (-EPROTO);
         }
 
+       if (req->rq_bulk == NULL) {
+               /* short io */
+               int nob, pg_count, i = 0;
+               unsigned char *buf;
+
+               CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
+               pg_count = aa->aa_page_count;
+               buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
+                                                  rc);
+               nob = rc;
+               while (nob > 0 && pg_count > 0) {
+                       unsigned char *ptr;
+                       int count = aa->aa_ppga[i]->count > nob ?
+                                   nob : aa->aa_ppga[i]->count;
+
+                       CDEBUG(D_CACHE, "page %p count %d\n",
+                              aa->aa_ppga[i]->pg, count);
+                       ptr = ll_kmap_atomic(aa->aa_ppga[i]->pg, KM_USER0);
+                       memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
+                              count);
+                       ll_kunmap_atomic((void *) ptr, KM_USER0);
+
+                       buf += count;
+                       nob -= count;
+                       i++;
+                       pg_count--;
+               }
+       }
+
         if (rc < aa->aa_requested_nob)
                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
 
@@ -1455,39 +1832,53 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
                u32        server_cksum = body->oa.o_cksum;
                char      *via = "";
                char      *router = "";
-                cksum_type_t cksum_type;
-
-                cksum_type = cksum_type_unpack(body->oa.o_valid &OBD_MD_FLFLAGS?
-                                               body->oa.o_flags : 0);
-                client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
-                                                 aa->aa_ppga, OST_READ,
-                                                 cksum_type);
-
-               if (peer->nid != req->rq_bulk->bd_sender) {
+               enum cksum_types cksum_type;
+               u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
+                       body->oa.o_flags : 0;
+
+               cksum_type = obd_cksum_type_unpack(o_flags);
+               rc = osc_checksum_bulk_rw(obd_name, cksum_type, rc,
+                                         aa->aa_page_count, aa->aa_ppga,
+                                         OST_READ, &client_cksum);
+               if (rc < 0)
+                       GOTO(out, rc);
+
+               if (req->rq_bulk != NULL &&
+                   peer->nid != req->rq_bulk->bd_sender) {
                        via = " via ";
                        router = libcfs_nid2str(req->rq_bulk->bd_sender);
                }
 
                if (server_cksum != client_cksum) {
+                       struct ost_body *clbody;
+                       u32 page_count = aa->aa_page_count;
+
+                       clbody = req_capsule_client_get(&req->rq_pill,
+                                                       &RMF_OST_BODY);
+                       if (cli->cl_checksum_dump)
+                               dump_all_bulk_pages(&clbody->oa, page_count,
+                                                   aa->aa_ppga, server_cksum,
+                                                   client_cksum);
+
                        LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
                                           "%s%s%s inode "DFID" object "DOSTID
-                                          " extent ["LPU64"-"LPU64"]\n",
-                                          req->rq_import->imp_obd->obd_name,
+                                          " extent [%llu-%llu], client %x, "
+                                          "server %x, cksum_type %x\n",
+                                          obd_name,
                                           libcfs_nid2str(peer->nid),
                                           via, router,
-                                          body->oa.o_valid & OBD_MD_FLFID ?
-                                               body->oa.o_parent_seq : (__u64)0,
-                                          body->oa.o_valid & OBD_MD_FLFID ?
-                                               body->oa.o_parent_oid : 0,
-                                          body->oa.o_valid & OBD_MD_FLFID ?
-                                               body->oa.o_parent_ver : 0,
+                                          clbody->oa.o_valid & OBD_MD_FLFID ?
+                                               clbody->oa.o_parent_seq : 0ULL,
+                                          clbody->oa.o_valid & OBD_MD_FLFID ?
+                                               clbody->oa.o_parent_oid : 0,
+                                          clbody->oa.o_valid & OBD_MD_FLFID ?
+                                               clbody->oa.o_parent_ver : 0,
                                           POSTID(&body->oa.o_oi),
                                           aa->aa_ppga[0]->off,
-                                          aa->aa_ppga[aa->aa_page_count-1]->off +
-                                          aa->aa_ppga[aa->aa_page_count-1]->count -
-                                                                       1);
-                       CERROR("client %x, server %x, cksum_type %x\n",
-                              client_cksum, server_cksum, cksum_type);
+                                          aa->aa_ppga[page_count-1]->off +
+                                          aa->aa_ppga[page_count-1]->count - 1,
+                                          client_cksum, server_cksum,
+                                          cksum_type);
                        cksum_counter = 0;
                        aa->aa_oa->o_cksum = client_cksum;
                        rc = -EAGAIN;
@@ -1525,12 +1916,10 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
        DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
                  "redo for recoverable error %d", rc);
 
-        rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
-                                        OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
-                                  aa->aa_cli, aa->aa_oa,
-                                  NULL /* lsm unused by osc currently */,
-                                  aa->aa_page_count, aa->aa_ppga,
-                                  &new_req, aa->aa_ocapa, 0, 1);
+       rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
+                               OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
+                                 aa->aa_cli, aa->aa_oa, aa->aa_page_count,
+                                 aa->aa_ppga, &new_req, 1);
         if (rc)
                 RETURN(rc);
 
@@ -1554,9 +1943,9 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
        /* cap resend delay to the current request timeout, this is similar to
         * what ptlrpc does (see after_reply()) */
        if (aa->aa_resends > new_req->rq_timeout)
-               new_req->rq_sent = cfs_time_current_sec() + new_req->rq_timeout;
+               new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
        else
-               new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
+               new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
         new_req->rq_generation_set = 1;
         new_req->rq_import_generation = request->rq_import_generation;
 
@@ -1575,14 +1964,11 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
                 }
         }
 
-        new_aa->aa_ocapa = aa->aa_ocapa;
-        aa->aa_ocapa = NULL;
-
        /* XXX: This code will run into problem if we're going to support
         * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
         * and wait for all of them to be finished. We should inherit request
         * set from old request. */
-       ptlrpcd_add_req(new_req, PDL_POLICY_SAME, -1);
+       ptlrpcd_add_req(new_req);
 
        DEBUG_REQ(D_INFO, new_req, "new request");
        RETURN(0);
@@ -1619,7 +2005,7 @@ static void sort_brw_pages(struct brw_page **array, int num)
         } while (stride > 1);
 }
 
-static void osc_release_ppga(struct brw_page **ppga, obd_count count)
+static void osc_release_ppga(struct brw_page **ppga, size_t count)
 {
         LASSERT(ppga != NULL);
         OBD_FREE(ppga, sizeof(*ppga) * count);
@@ -1632,13 +2018,14 @@ static int brw_interpret(const struct lu_env *env,
        struct osc_extent *ext;
        struct osc_extent *tmp;
        struct client_obd *cli = aa->aa_cli;
+       unsigned long           transferred = 0;
         ENTRY;
 
         rc = osc_brw_fini_request(req, rc);
         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
         /* When server return -EINPROGRESS, client should always retry
          * regardless of the number of times the bulk was resent already. */
-       if (osc_recoverable_error(rc)) {
+       if (osc_recoverable_error(rc) && !req->rq_no_delay) {
                if (req->rq_import_generation !=
                    req->rq_import->imp_generation) {
                        CDEBUG(D_HA, "%s: resend cross eviction for object: "
@@ -1650,7 +2037,7 @@ static int brw_interpret(const struct lu_env *env,
                        rc = osc_brw_redo_request(req, aa, rc);
                } else {
                        CERROR("%s: too many resent retries for object: "
-                              ""LPU64":"LPU64", rc = %d.\n",
+                              "%llu:%llu, rc = %d.\n",
                               req->rq_import->imp_obd->obd_name,
                               POSTID(&aa->aa_oa->o_oi), rc);
                }
@@ -1661,11 +2048,6 @@ static int brw_interpret(const struct lu_env *env,
                        rc = -EIO;
        }
 
-        if (aa->aa_ocapa) {
-                capa_put(aa->aa_ocapa);
-                aa->aa_ocapa = NULL;
-        }
-
        if (rc == 0) {
                struct obdo *oa = aa->aa_oa;
                struct cl_attr *attr = &osc_env_info(env)->oti_attr;
@@ -1724,15 +2106,18 @@ static int brw_interpret(const struct lu_env *env,
 
        list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
                list_del_init(&ext->oe_link);
-               osc_extent_finish(env, ext, 1, rc);
+               osc_extent_finish(env, ext, 1,
+                                 rc && req->rq_no_delay ? -EWOULDBLOCK : rc);
        }
        LASSERT(list_empty(&aa->aa_exts));
        LASSERT(list_empty(&aa->aa_oaps));
 
-       cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
-                         req->rq_bulk->bd_nob_transferred);
+       transferred = (req->rq_bulk == NULL ? /* short io */
+                      aa->aa_requested_nob :
+                      req->rq_bulk->bd_nob_transferred);
+
        osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
-       ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
+       ptlrpc_lprocfs_brw(req, transferred);
 
        spin_lock(&cli->cl_loi_list_lock);
        /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
@@ -1745,7 +2130,7 @@ static int brw_interpret(const struct lu_env *env,
        osc_wake_cache_waiters(cli);
        spin_unlock(&cli->cl_loi_list_lock);
 
-       osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
+       osc_io_unplug(env, cli, NULL);
        RETURN(rc);
 }
 
@@ -1773,7 +2158,7 @@ static void brw_commit(struct ptlrpc_request *req)
  * Extents in the list must be in OES_RPC state.
  */
 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
-                 struct list_head *ext_list, int cmd, pdl_policy_t pol)
+                 struct list_head *ext_list, int cmd)
 {
        struct ptlrpc_request           *req = NULL;
        struct osc_extent               *ext;
@@ -1781,19 +2166,20 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
        struct osc_brw_async_args       *aa = NULL;
        struct obdo                     *oa = NULL;
        struct osc_async_page           *oap;
-       struct osc_async_page           *tmp;
-       struct cl_req                   *clerq = NULL;
-       enum cl_req_type                crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE :
-                                                                     CRT_READ;
+       struct osc_object               *obj = NULL;
        struct cl_req_attr              *crattr = NULL;
-       obd_off                         starting_offset = OBD_OBJECT_EOF;
-       obd_off                         ending_offset = 0;
+       loff_t                          starting_offset = OBD_OBJECT_EOF;
+       loff_t                          ending_offset = 0;
        int                             mpflag = 0;
        int                             mem_tight = 0;
        int                             page_count = 0;
        bool                            soft_sync = false;
+       bool                            interrupted = false;
+       bool                            ndelay = false;
        int                             i;
+       int                             grant = 0;
        int                             rc;
+       __u32                           layout_version = 0;
        struct list_head                rpc_list = LIST_HEAD_INIT(rpc_list);
        struct ost_body                 *body;
        ENTRY;
@@ -1803,30 +2189,17 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
        list_for_each_entry(ext, ext_list, oe_link) {
                LASSERT(ext->oe_state == OES_RPC);
                mem_tight |= ext->oe_memalloc;
-               list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
-                       ++page_count;
-                       list_add_tail(&oap->oap_rpc_item, &rpc_list);
-                       if (starting_offset > oap->oap_obj_off)
-                               starting_offset = oap->oap_obj_off;
-                       else
-                               LASSERT(oap->oap_page_off == 0);
-                       if (ending_offset < oap->oap_obj_off + oap->oap_count)
-                               ending_offset = oap->oap_obj_off +
-                                               oap->oap_count;
-                       else
-                               LASSERT(oap->oap_page_off + oap->oap_count ==
-                                       PAGE_CACHE_SIZE);
-               }
+               grant += ext->oe_grants;
+               page_count += ext->oe_nr_pages;
+               layout_version = MAX(layout_version, ext->oe_layout_version);
+               if (obj == NULL)
+                       obj = ext->oe_obj;
        }
 
        soft_sync = osc_over_unstable_soft_limit(cli);
        if (mem_tight)
                mpflag = cfs_memory_pressure_get_and_set();
 
-       OBD_ALLOC(crattr, sizeof(*crattr));
-       if (crattr == NULL)
-               GOTO(out, rc = -ENOMEM);
-
        OBD_ALLOC(pga, sizeof(*pga) * page_count);
        if (pga == NULL)
                GOTO(out, rc = -ENOMEM);
@@ -1836,41 +2209,59 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
                GOTO(out, rc = -ENOMEM);
 
        i = 0;
-       list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
-               struct cl_page *page = oap2cl_page(oap);
-               if (clerq == NULL) {
-                       clerq = cl_req_alloc(env, page, crt,
-                                            1 /* only 1-object rpcs for now */);
-                       if (IS_ERR(clerq))
-                               GOTO(out, rc = PTR_ERR(clerq));
+       list_for_each_entry(ext, ext_list, oe_link) {
+               list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+                       if (mem_tight)
+                               oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
+                       if (soft_sync)
+                               oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
+                       pga[i] = &oap->oap_brw_page;
+                       pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
+                       i++;
+
+                       list_add_tail(&oap->oap_rpc_item, &rpc_list);
+                       if (starting_offset == OBD_OBJECT_EOF ||
+                           starting_offset > oap->oap_obj_off)
+                               starting_offset = oap->oap_obj_off;
+                       else
+                               LASSERT(oap->oap_page_off == 0);
+                       if (ending_offset < oap->oap_obj_off + oap->oap_count)
+                               ending_offset = oap->oap_obj_off +
+                                               oap->oap_count;
+                       else
+                               LASSERT(oap->oap_page_off + oap->oap_count ==
+                                       PAGE_SIZE);
+                       if (oap->oap_interrupted)
+                               interrupted = true;
                }
-               if (mem_tight)
-                       oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
-               if (soft_sync)
-                       oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
-               pga[i] = &oap->oap_brw_page;
-               pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
-               CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
-                      pga[i]->pg, page_index(oap->oap_page), oap,
-                      pga[i]->flag);
-               i++;
-               cl_req_page_add(env, clerq, page);
+               if (ext->oe_ndelay)
+                       ndelay = true;
        }
 
-       /* always get the data for the obdo for the rpc */
-       LASSERT(clerq != NULL);
+       /* first page in the list */
+       oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
+
+       crattr = &osc_env_info(env)->oti_req_attr;
+       memset(crattr, 0, sizeof(*crattr));
+       crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
+       crattr->cra_flags = ~0ULL;
+       crattr->cra_page = oap2cl_page(oap);
        crattr->cra_oa = oa;
-       cl_req_attr_set(env, clerq, crattr, ~0ULL);
+       cl_req_attr_set(env, osc2cl(obj), crattr);
 
-       rc = cl_req_prep(env, clerq);
-       if (rc != 0) {
-               CERROR("cl_req_prep failed: %d\n", rc);
-               GOTO(out, rc);
+       if (cmd == OBD_BRW_WRITE) {
+               oa->o_grant_used = grant;
+               if (layout_version > 0) {
+                       CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
+                              PFID(&oa->o_oi.oi_fid), layout_version);
+
+                       oa->o_layout_version = layout_version;
+                       oa->o_valid |= OBD_MD_LAYOUT_VERSION;
+               }
        }
 
        sort_brw_pages(pga, page_count);
-       rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
-                       pga, &req, crattr->cra_capa, 1, 0);
+       rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
        if (rc != 0) {
                CERROR("prep_req failed: %d\n", rc);
                GOTO(out, rc);
@@ -1878,9 +2269,16 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 
        req->rq_commit_cb = brw_commit;
        req->rq_interpret_reply = brw_interpret;
-
-       if (mem_tight != 0)
-               req->rq_memalloc = 1;
+       req->rq_memalloc = mem_tight != 0;
+       oap->oap_request = ptlrpc_request_addref(req);
+       if (interrupted && !req->rq_intr)
+               ptlrpc_mark_interrupted(req);
+       if (ndelay) {
+               req->rq_no_resend = req->rq_no_delay = 1;
+               /* probably set a shorter timeout value.
+                * to handle ETIMEDOUT in brw_interpret() correctly. */
+               /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
+       }
 
        /* Need to update the timestamps after the request is built in case
         * we race with setattr (locally or in queue at OST).  If OST gets
@@ -1889,9 +2287,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
         * way to do this in a single call.  bug 10150 */
        body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
        crattr->cra_oa = &body->oa;
-       cl_req_attr_set(env, clerq, crattr,
-                       OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
-
+       crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
+       cl_req_attr_set(env, osc2cl(obj), crattr);
        lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
 
        CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
@@ -1900,26 +2297,9 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
        list_splice_init(&rpc_list, &aa->aa_oaps);
        INIT_LIST_HEAD(&aa->aa_exts);
        list_splice_init(ext_list, &aa->aa_exts);
-       aa->aa_clerq = clerq;
-
-       /* queued sync pages can be torn down while the pages
-        * were between the pending list and the rpc */
-       tmp = NULL;
-       list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
-               /* only one oap gets a request reference */
-               if (tmp == NULL)
-                       tmp = oap;
-               if (oap->oap_interrupted && !req->rq_intr) {
-                       CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
-                                       oap, req);
-                       ptlrpc_mark_interrupted(req);
-               }
-       }
-       if (tmp != NULL)
-               tmp->oap_request = ptlrpc_request_addref(req);
 
        spin_lock(&cli->cl_loi_list_lock);
-       starting_offset >>= PAGE_CACHE_SHIFT;
+       starting_offset >>= PAGE_SHIFT;
        if (cmd == OBD_BRW_READ) {
                cli->cl_r_in_flight++;
                lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
@@ -1938,20 +2318,9 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
        DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%uw in flight",
                  page_count, aa, cli->cl_r_in_flight,
                  cli->cl_w_in_flight);
+       OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
 
-       /* XXX: Maybe the caller can check the RPC bulk descriptor to
-        * see which CPU/NUMA node the majority of pages were allocated
-        * on, and try to assign the async RPC to the CPU core
-        * (PDL_POLICY_PREFERRED) to reduce cross-CPU memory traffic.
-        *
-        * But on the other hand, we expect that multiple ptlrpcd
-        * threads and the initial write sponsor can run in parallel,
-        * especially when data checksum is enabled, which is CPU-bound
-        * operation and single ptlrpcd thread cannot process in time.
-        * So more ptlrpcd threads sharing BRW load
-        * (with PDL_POLICY_ROUND) seems better.
-        */
-       ptlrpcd_add_req(req, pol, -1);
+       ptlrpcd_add_req(req);
        rc = 0;
        EXIT;
 
@@ -1959,11 +2328,6 @@ out:
        if (mem_tight != 0)
                cfs_memory_pressure_restore(mpflag);
 
-       if (crattr != NULL) {
-               capa_put(crattr->cra_capa);
-               OBD_FREE(crattr, sizeof(*crattr));
-       }
-
        if (rc != 0) {
                LASSERT(req == NULL);
 
@@ -1979,23 +2343,15 @@ out:
                        list_del_init(&ext->oe_link);
                        osc_extent_finish(env, ext, 0, rc);
                }
-               if (clerq && !IS_ERR(clerq))
-                       cl_req_completion(env, clerq, rc);
        }
        RETURN(rc);
 }
 
-static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
-                                        struct ldlm_enqueue_info *einfo)
+static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
 {
-        void *data = einfo->ei_cbdata;
         int set = 0;
 
         LASSERT(lock != NULL);
-        LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
-        LASSERT(lock->l_resource->lr_type == einfo->ei_type);
-        LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
-        LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
 
         lock_res_and_lock(lock);
 
@@ -2009,25 +2365,10 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
        return set;
 }
 
-static int osc_set_data_with_check(struct lustre_handle *lockh,
-                                   struct ldlm_enqueue_info *einfo)
-{
-        struct ldlm_lock *lock = ldlm_handle2lock(lockh);
-        int set = 0;
-
-        if (lock != NULL) {
-                set = osc_set_lock_data_with_check(lock, einfo);
-                LDLM_LOCK_PUT(lock);
-        } else
-                CERROR("lockh %p, data %p - client evicted?\n",
-                       lockh, einfo->ei_cbdata);
-        return set;
-}
-
-static int osc_enqueue_fini(struct ptlrpc_request *req,
-                           osc_enqueue_upcall_f upcall, void *cookie,
-                           struct lustre_handle *lockh, ldlm_mode_t mode,
-                           __u64 *flags, int agl, int errcode)
+int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
+                    void *cookie, struct lustre_handle *lockh,
+                    enum ldlm_mode mode, __u64 *flags, bool speculative,
+                    int errcode)
 {
        bool intent = *flags & LDLM_FL_HAS_INTENT;
        int rc;
@@ -2044,7 +2385,7 @@ static int osc_enqueue_fini(struct ptlrpc_request *req,
                        ptlrpc_status_ntoh(rep->lock_policy_res1);
                if (rep->lock_policy_res1)
                        errcode = rep->lock_policy_res1;
-               if (!agl)
+               if (!speculative)
                        *flags |= LDLM_FL_LVB_READY;
        } else if (errcode == ELDLM_OK) {
                *flags |= LDLM_FL_LVB_READY;
@@ -2059,16 +2400,15 @@ static int osc_enqueue_fini(struct ptlrpc_request *req,
        if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
                ldlm_lock_decref(lockh, mode);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
-static int osc_enqueue_interpret(const struct lu_env *env,
-                                 struct ptlrpc_request *req,
-                                 struct osc_enqueue_args *aa, int rc)
+int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
+                         struct osc_enqueue_args *aa, int rc)
 {
        struct ldlm_lock *lock;
        struct lustre_handle *lockh = &aa->oa_lockh;
-       ldlm_mode_t mode = aa->oa_mode;
+       enum ldlm_mode mode = aa->oa_mode;
        struct ost_lvb *lvb = aa->oa_lvb;
        __u32 lvb_len = sizeof(*lvb);
        __u64 flags = 0;
@@ -2079,7 +2419,7 @@ static int osc_enqueue_interpret(const struct lu_env *env,
         * be valid. */
        lock = ldlm_handle2lock(lockh);
        LASSERTF(lock != NULL,
-                "lockh "LPX64", req %p, aa %p - client evicted?\n",
+                "lockh %#llx, req %p, aa %p - client evicted?\n",
                 lockh->cookie, req, aa);
 
        /* Take an additional reference so that a blocking AST that
@@ -2094,7 +2434,7 @@ static int osc_enqueue_interpret(const struct lu_env *env,
        /* Let CP AST to grant the lock first. */
        OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
 
-       if (aa->oa_agl) {
+       if (aa->oa_speculative) {
                LASSERT(aa->oa_lvb == NULL);
                LASSERT(aa->oa_flags == NULL);
                aa->oa_flags = &flags;
@@ -2106,9 +2446,9 @@ static int osc_enqueue_interpret(const struct lu_env *env,
                                   lockh, rc);
        /* Complete osc stuff. */
        rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
-                             aa->oa_flags, aa->oa_agl, rc);
+                             aa->oa_flags, aa->oa_speculative, rc);
 
-        OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
+       OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
 
        ldlm_lock_decref(lockh, mode);
        LDLM_LOCK_PUT(lock);
@@ -2125,34 +2465,35 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
  * is evicted from the cluster -- such scenarious make the life difficult, so
  * release locks just after they are obtained. */
 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
-                    __u64 *flags, ldlm_policy_data_t *policy,
+                    __u64 *flags, union ldlm_policy_data *policy,
                     struct ost_lvb *lvb, int kms_valid,
                     osc_enqueue_upcall_f upcall, void *cookie,
                     struct ldlm_enqueue_info *einfo,
-                    struct ptlrpc_request_set *rqset, int async, int agl)
+                    struct ptlrpc_request_set *rqset, int async,
+                    bool speculative)
 {
        struct obd_device *obd = exp->exp_obd;
        struct lustre_handle lockh = { 0 };
        struct ptlrpc_request *req = NULL;
        int intent = *flags & LDLM_FL_HAS_INTENT;
-       __u64 match_lvb = agl ? 0 : LDLM_FL_LVB_READY;
-       ldlm_mode_t mode;
+       __u64 match_flags = *flags;
+       enum ldlm_mode mode;
        int rc;
        ENTRY;
 
         /* Filesystem lock extents are extended to page boundaries so that
          * dealing with the page cache is a little smoother.  */
-        policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
-        policy->l_extent.end |= ~CFS_PAGE_MASK;
-
-        /*
-         * kms is not valid when either object is completely fresh (so that no
-         * locks are cached), or object was evicted. In the latter case cached
-         * lock cannot be used, because it would prime inode state with
-         * potentially stale LVB.
-         */
-        if (!kms_valid)
-                goto no_match;
+       policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+       policy->l_extent.end |= ~PAGE_MASK;
+
+       /*
+        * kms is not valid when either object is completely fresh (so that no
+        * locks are cached), or object was evicted. In the latter case cached
+        * lock cannot be used, because it would prime inode state with
+        * potentially stale LVB.
+        */
+       if (!kms_valid)
+               goto no_match;
 
         /* Next, search for already existing extent locks that will cover us */
         /* If we're trying to read, we also search for an existing PW lock.  The
@@ -2169,7 +2510,14 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
         mode = einfo->ei_mode;
         if (einfo->ei_mode == LCK_PR)
                 mode |= LCK_PW;
-        mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
+       /* Normal lock requests must wait for the LVB to be ready before
+        * matching a lock; speculative lock requests do not need to,
+        * because they will not actually use the lock. */
+       if (!speculative)
+               match_flags |= LDLM_FL_LVB_READY;
+       if (intent != 0)
+               match_flags |= LDLM_FL_BLOCK_GRANTED;
+       mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
                               einfo->ei_type, policy, mode, &lockh, 0);
        if (mode) {
                struct ldlm_lock *matched;
@@ -2178,14 +2526,23 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
                        RETURN(ELDLM_OK);
 
                matched = ldlm_handle2lock(&lockh);
-               if (agl) {
-                       /* AGL enqueues DLM locks speculatively. Therefore if
-                        * it already exists a DLM lock, it wll just inform the
-                        * caller to cancel the AGL process for this stripe. */
+               if (speculative) {
+                       /* This DLM lock request is speculative, and does not
+                        * have an associated IO request. Therefore if there
+                        * is already a DLM lock, it wll just inform the
+                        * caller to cancel the request for this stripe.*/
+                       lock_res_and_lock(matched);
+                       if (ldlm_extent_equal(&policy->l_extent,
+                           &matched->l_policy_data.l_extent))
+                               rc = -EEXIST;
+                       else
+                               rc = -ECANCELED;
+                       unlock_res_and_lock(matched);
+
                        ldlm_lock_decref(&lockh, mode);
                        LDLM_LOCK_PUT(matched);
-                       RETURN(-ECANCELED);
-               } else if (osc_set_lock_data_with_check(matched, einfo)) {
+                       RETURN(rc);
+               } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
                        *flags |= LDLM_FL_LVB_READY;
 
                        /* We already have a lock, and it's referenced. */
@@ -2201,7 +2558,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
        }
 
 no_match:
-       if (*flags & LDLM_FL_TEST_LOCK)
+       if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
                RETURN(-ENOLCK);
 
        if (intent) {
@@ -2210,8 +2567,8 @@ no_match:
                if (req == NULL)
                        RETURN(-ENOMEM);
 
-               rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
-               if (rc < 0) {
+               rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
+               if (rc) {
                         ptlrpc_request_free(req);
                         RETURN(rc);
                 }
@@ -2231,38 +2588,38 @@ no_match:
                        struct osc_enqueue_args *aa;
                        CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
                        aa = ptlrpc_req_async_args(req);
-                       aa->oa_exp    = exp;
-                       aa->oa_mode   = einfo->ei_mode;
-                       aa->oa_type   = einfo->ei_type;
+                       aa->oa_exp         = exp;
+                       aa->oa_mode        = einfo->ei_mode;
+                       aa->oa_type        = einfo->ei_type;
                        lustre_handle_copy(&aa->oa_lockh, &lockh);
-                       aa->oa_upcall = upcall;
-                       aa->oa_cookie = cookie;
-                       aa->oa_agl    = !!agl;
-                       if (!agl) {
+                       aa->oa_upcall      = upcall;
+                       aa->oa_cookie      = cookie;
+                       aa->oa_speculative = speculative;
+                       if (!speculative) {
                                aa->oa_flags  = flags;
                                aa->oa_lvb    = lvb;
                        } else {
-                               /* AGL is essentially to enqueue an DLM lock
-                                * in advance, so we don't care about the
-                                * result of AGL enqueue. */
+                               /* speculative locks are essentially to enqueue
+                                * a DLM lock  in advance, so we don't care
+                                * about the result of the enqueue. */
                                aa->oa_lvb    = NULL;
                                aa->oa_flags  = NULL;
                        }
 
-                        req->rq_interpret_reply =
-                                (ptlrpc_interpterer_t)osc_enqueue_interpret;
-                        if (rqset == PTLRPCD_SET)
-                                ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
-                        else
-                                ptlrpc_set_add_req(rqset, req);
-                } else if (intent) {
-                        ptlrpc_req_finished(req);
-                }
-                RETURN(rc);
-        }
+                       req->rq_interpret_reply =
+                               (ptlrpc_interpterer_t)osc_enqueue_interpret;
+                       if (rqset == PTLRPCD_SET)
+                               ptlrpcd_add_req(req);
+                       else
+                               ptlrpc_set_add_req(rqset, req);
+               } else if (intent) {
+                       ptlrpc_req_finished(req);
+               }
+               RETURN(rc);
+       }
 
        rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
-                             flags, agl, rc);
+                             flags, speculative, rc);
        if (intent)
                ptlrpc_req_finished(req);
 
@@ -2270,22 +2627,22 @@ no_match:
 }
 
 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
-                  __u32 type, ldlm_policy_data_t *policy, __u32 mode,
-                  __u64 *flags, void *data, struct lustre_handle *lockh,
-                  int unref)
+                  enum ldlm_type type, union ldlm_policy_data *policy,
+                  enum ldlm_mode mode, __u64 *flags, void *data,
+                  struct lustre_handle *lockh, int unref)
 {
        struct obd_device *obd = exp->exp_obd;
        __u64 lflags = *flags;
-       ldlm_mode_t rc;
+       enum ldlm_mode rc;
        ENTRY;
 
-        if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
-                RETURN(-EIO);
+       if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
+               RETURN(-EIO);
 
-        /* Filesystem lock extents are extended to page boundaries so that
-         * dealing with the page cache is a little smoother */
-        policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
-        policy->l_extent.end |= ~CFS_PAGE_MASK;
+       /* Filesystem lock extents are extended to page boundaries so that
+        * dealing with the page cache is a little smoother */
+       policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+       policy->l_extent.end |= ~PAGE_MASK;
 
         /* Next, search for already existing extent locks that will cover us */
         /* If we're trying to read, we also search for an existing PW lock.  The
@@ -2296,33 +2653,20 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
                 rc |= LCK_PW;
         rc = ldlm_lock_match(obd->obd_namespace, lflags,
                              res_id, type, policy, rc, lockh, unref);
-        if (rc) {
-                if (data != NULL) {
-                        if (!osc_set_data_with_check(lockh, data)) {
-                                if (!(lflags & LDLM_FL_TEST_LOCK))
-                                        ldlm_lock_decref(lockh, rc);
-                                RETURN(0);
-                        }
-                }
-                if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
-                        ldlm_lock_addref(lockh, LCK_PR);
-                        ldlm_lock_decref(lockh, LCK_PW);
-                }
-                RETURN(rc);
-        }
-        RETURN(rc);
-}
-
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
-{
-        ENTRY;
+       if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
+               RETURN(rc);
 
-        if (unlikely(mode == LCK_GROUP))
-                ldlm_lock_decref_and_cancel(lockh, mode);
-        else
-                ldlm_lock_decref(lockh, mode);
+       if (data != NULL) {
+               struct ldlm_lock *lock = ldlm_handle2lock(lockh);
 
-        RETURN(0);
+               LASSERT(lock != NULL);
+               if (!osc_set_lock_data(lock, data)) {
+                       ldlm_lock_decref(lockh, rc);
+                       rc = 0;
+               }
+               LDLM_LOCK_PUT(lock);
+       }
+       RETURN(rc);
 }
 
 static int osc_statfs_interpret(const struct lu_env *env,
@@ -2359,13 +2703,13 @@ out:
 }
 
 static int osc_statfs_async(struct obd_export *exp,
-                            struct obd_info *oinfo, __u64 max_age,
+                           struct obd_info *oinfo, time64_t max_age,
                             struct ptlrpc_request_set *rqset)
 {
         struct obd_device     *obd = class_exp2obd(exp);
         struct ptlrpc_request *req;
         struct osc_async_args *aa;
-        int                    rc;
+       int rc;
         ENTRY;
 
         /* We could possibly pass max_age in the request (as an absolute
@@ -2383,34 +2727,35 @@ static int osc_statfs_async(struct obd_export *exp,
                 ptlrpc_request_free(req);
                 RETURN(rc);
         }
-        ptlrpc_request_set_replen(req);
-        req->rq_request_portal = OST_CREATE_PORTAL;
-        ptlrpc_at_set_req_timeout(req);
+       ptlrpc_request_set_replen(req);
+       req->rq_request_portal = OST_CREATE_PORTAL;
+       ptlrpc_at_set_req_timeout(req);
 
-        if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
-                /* procfs requests not want stat in wait for avoid deadlock */
-                req->rq_no_resend = 1;
-                req->rq_no_delay = 1;
-        }
+       if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
+               /* procfs requests not want stat in wait for avoid deadlock */
+               req->rq_no_resend = 1;
+               req->rq_no_delay = 1;
+       }
 
-        req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
-        CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
-        aa = ptlrpc_req_async_args(req);
-        aa->aa_oi = oinfo;
+       req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
+       CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+       aa = ptlrpc_req_async_args(req);
+       aa->aa_oi = oinfo;
 
-        ptlrpc_set_add_req(rqset, req);
-        RETURN(0);
+       ptlrpc_set_add_req(rqset, req);
+       RETURN(0);
 }
 
 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
-                      struct obd_statfs *osfs, __u64 max_age, __u32 flags)
+                     struct obd_statfs *osfs, time64_t max_age, __u32 flags)
 {
-        struct obd_device     *obd = class_exp2obd(exp);
-        struct obd_statfs     *msfs;
-        struct ptlrpc_request *req;
-        struct obd_import     *imp = NULL;
-        int rc;
-        ENTRY;
+       struct obd_device     *obd = class_exp2obd(exp);
+       struct obd_statfs     *msfs;
+       struct ptlrpc_request *req;
+       struct obd_import     *imp = NULL;
+       int rc;
+       ENTRY;
+
 
         /*Since the request might also come from lprocfs, so we need
          *sync this with client_disconnect_export Bug15684*/
@@ -2421,53 +2766,52 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
         if (!imp)
                 RETURN(-ENODEV);
 
-        /* We could possibly pass max_age in the request (as an absolute
-         * timestamp or a "seconds.usec ago") so the target can avoid doing
-         * extra calls into the filesystem if that isn't necessary (e.g.
-         * during mount that would help a bit).  Having relative timestamps
-         * is not so great if request processing is slow, while absolute
-         * timestamps are not ideal because they need time synchronization. */
-        req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
-
-        class_import_put(imp);
+       /* We could possibly pass max_age in the request (as an absolute
+        * timestamp or a "seconds.usec ago") so the target can avoid doing
+        * extra calls into the filesystem if that isn't necessary (e.g.
+        * during mount that would help a bit).  Having relative timestamps
+        * is not so great if request processing is slow, while absolute
+        * timestamps are not ideal because they need time synchronization. */
+       req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
 
-        if (req == NULL)
-                RETURN(-ENOMEM);
+       class_import_put(imp);
 
-        rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
-        if (rc) {
-                ptlrpc_request_free(req);
-                RETURN(rc);
-        }
-        ptlrpc_request_set_replen(req);
-        req->rq_request_portal = OST_CREATE_PORTAL;
-        ptlrpc_at_set_req_timeout(req);
+       if (req == NULL)
+               RETURN(-ENOMEM);
 
-        if (flags & OBD_STATFS_NODELAY) {
-                /* procfs requests not want stat in wait for avoid deadlock */
-                req->rq_no_resend = 1;
-                req->rq_no_delay = 1;
-        }
+       rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
+       if (rc) {
+               ptlrpc_request_free(req);
+               RETURN(rc);
+       }
+       ptlrpc_request_set_replen(req);
+       req->rq_request_portal = OST_CREATE_PORTAL;
+       ptlrpc_at_set_req_timeout(req);
+
+       if (flags & OBD_STATFS_NODELAY) {
+               /* procfs requests not want stat in wait for avoid deadlock */
+               req->rq_no_resend = 1;
+               req->rq_no_delay = 1;
+       }
 
-        rc = ptlrpc_queue_wait(req);
-        if (rc)
-                GOTO(out, rc);
+       rc = ptlrpc_queue_wait(req);
+       if (rc)
+               GOTO(out, rc);
 
-        msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
-        if (msfs == NULL) {
-                GOTO(out, rc = -EPROTO);
-        }
+       msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
+       if (msfs == NULL)
+               GOTO(out, rc = -EPROTO);
 
-        *osfs = *msfs;
+       *osfs = *msfs;
 
-        EXIT;
- out:
-        ptlrpc_req_finished(req);
-        return rc;
+       EXIT;
+out:
+       ptlrpc_req_finished(req);
+       return rc;
 }
 
 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
-                         void *karg, void *uarg)
+                        void *karg, void __user *uarg)
 {
         struct obd_device *obd = exp->exp_obd;
         struct obd_ioctl_data *data = karg;
@@ -2490,9 +2834,6 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
                 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
                                                data->ioc_offset);
                 GOTO(out, err);
-        case OBD_IOC_POLL_QUOTACHECK:
-                err = osc_quota_poll_check(exp, (struct if_quotacheck *)karg);
-                GOTO(out, err);
         case OBD_IOC_PING_TARGET:
                 err = ptlrpc_obd_ping(obd);
                 GOTO(out, err);
@@ -2506,104 +2847,9 @@ out:
        return err;
 }
 
-static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
-                        obd_count keylen, void *key, __u32 *vallen, void *val,
-                        struct lov_stripe_md *lsm)
-{
-        ENTRY;
-        if (!vallen || !val)
-                RETURN(-EFAULT);
-
-       if (KEY_IS(KEY_FIEMAP)) {
-               struct ll_fiemap_info_key *fm_key =
-                               (struct ll_fiemap_info_key *)key;
-               struct ldlm_res_id       res_id;
-               ldlm_policy_data_t       policy;
-               struct lustre_handle     lockh;
-               ldlm_mode_t              mode = 0;
-               struct ptlrpc_request   *req;
-               struct ll_user_fiemap   *reply;
-               char                    *tmp;
-               int                      rc;
-
-               if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
-                       goto skip_locking;
-
-               policy.l_extent.start = fm_key->fiemap.fm_start &
-                                               CFS_PAGE_MASK;
-
-               if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
-                   fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
-                       policy.l_extent.end = OBD_OBJECT_EOF;
-               else
-                       policy.l_extent.end = (fm_key->fiemap.fm_start +
-                               fm_key->fiemap.fm_length +
-                               PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
-
-               ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
-               mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
-                                      LDLM_FL_BLOCK_GRANTED |
-                                      LDLM_FL_LVB_READY,
-                                      &res_id, LDLM_EXTENT, &policy,
-                                      LCK_PR | LCK_PW, &lockh, 0);
-               if (mode) { /* lock is cached on client */
-                       if (mode != LCK_PR) {
-                               ldlm_lock_addref(&lockh, LCK_PR);
-                               ldlm_lock_decref(&lockh, LCK_PW);
-                       }
-               } else { /* no cached lock, needs acquire lock on server side */
-                       fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
-                       fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
-               }
-
-skip_locking:
-                req = ptlrpc_request_alloc(class_exp2cliimp(exp),
-                                           &RQF_OST_GET_INFO_FIEMAP);
-                if (req == NULL)
-                       GOTO(drop_lock, rc = -ENOMEM);
-
-                req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
-                                     RCL_CLIENT, keylen);
-                req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
-                                     RCL_CLIENT, *vallen);
-                req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
-                                     RCL_SERVER, *vallen);
-
-                rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
-                if (rc) {
-                        ptlrpc_request_free(req);
-                       GOTO(drop_lock, rc);
-                }
-
-                tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
-                memcpy(tmp, key, keylen);
-                tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
-                memcpy(tmp, val, *vallen);
-
-                ptlrpc_request_set_replen(req);
-                rc = ptlrpc_queue_wait(req);
-                if (rc)
-                       GOTO(fini_req, rc);
-
-                reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
-                if (reply == NULL)
-                       GOTO(fini_req, rc = -EPROTO);
-
-                memcpy(val, reply, *vallen);
-fini_req:
-                ptlrpc_req_finished(req);
-drop_lock:
-               if (mode)
-                       ldlm_lock_decref(&lockh, LCK_PR);
-                RETURN(rc);
-        }
-
-        RETURN(-EINVAL);
-}
-
-static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
-                              obd_count keylen, void *key, obd_count vallen,
-                              void *val, struct ptlrpc_request_set *set)
+int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
+                      u32 keylen, void *key, u32 vallen, void *val,
+                      struct ptlrpc_request_set *set)
 {
         struct ptlrpc_request *req;
         struct obd_device     *obd = exp->exp_obd;
@@ -2690,89 +2936,124 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
        tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
                                                        &RMF_OST_BODY :
                                                        &RMF_SETINFO_VAL);
-        memcpy(tmp, val, vallen);
+       memcpy(tmp, val, vallen);
 
        if (KEY_IS(KEY_GRANT_SHRINK)) {
-                struct osc_grant_args *aa;
-                struct obdo *oa;
-
-                CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
-                aa = ptlrpc_req_async_args(req);
-                OBDO_ALLOC(oa);
-                if (!oa) {
-                        ptlrpc_req_finished(req);
-                        RETURN(-ENOMEM);
-                }
-                *oa = ((struct ost_body *)val)->oa;
-                aa->aa_oa = oa;
-                req->rq_interpret_reply = osc_shrink_grant_interpret;
-        }
+               struct osc_grant_args *aa;
+               struct obdo *oa;
+
+               CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+               aa = ptlrpc_req_async_args(req);
+               OBDO_ALLOC(oa);
+               if (!oa) {
+                       ptlrpc_req_finished(req);
+                       RETURN(-ENOMEM);
+               }
+               *oa = ((struct ost_body *)val)->oa;
+               aa->aa_oa = oa;
+               req->rq_interpret_reply = osc_shrink_grant_interpret;
+       }
 
-        ptlrpc_request_set_replen(req);
-        if (!KEY_IS(KEY_GRANT_SHRINK)) {
-                LASSERT(set != NULL);
-                ptlrpc_set_add_req(set, req);
-                ptlrpc_check_set(NULL, set);
-        } else
-                ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+       ptlrpc_request_set_replen(req);
+       if (!KEY_IS(KEY_GRANT_SHRINK)) {
+               LASSERT(set != NULL);
+               ptlrpc_set_add_req(set, req);
+               ptlrpc_check_set(NULL, set);
+       } else {
+               ptlrpcd_add_req(req);
+       }
 
-        RETURN(0);
+       RETURN(0);
 }
+EXPORT_SYMBOL(osc_set_info_async);
 
-static int osc_reconnect(const struct lu_env *env,
-                         struct obd_export *exp, struct obd_device *obd,
-                         struct obd_uuid *cluuid,
-                         struct obd_connect_data *data,
-                         void *localdata)
+int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
+                 struct obd_device *obd, struct obd_uuid *cluuid,
+                 struct obd_connect_data *data, void *localdata)
 {
-        struct client_obd *cli = &obd->u.cli;
+       struct client_obd *cli = &obd->u.cli;
 
-        if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
-                long lost_grant;
+       if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
+               long lost_grant;
+               long grant;
 
                spin_lock(&cli->cl_loi_list_lock);
-               data->ocd_grant = (cli->cl_avail_grant +
-                                 (cli->cl_dirty_pages << PAGE_CACHE_SHIFT)) ?:
-                                 2 * cli_brw_size(obd);
+               grant = cli->cl_avail_grant + cli->cl_reserved_grant;
+               if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM)
+                       grant += cli->cl_dirty_grant;
+               else
+                       grant += cli->cl_dirty_pages << PAGE_SHIFT;
+               data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
                lost_grant = cli->cl_lost_grant;
                cli->cl_lost_grant = 0;
                spin_unlock(&cli->cl_loi_list_lock);
 
-                CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
+               CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
                       " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
                       data->ocd_version, data->ocd_grant, lost_grant);
        }
 
        RETURN(0);
 }
+EXPORT_SYMBOL(osc_reconnect);
 
-static int osc_disconnect(struct obd_export *exp)
+int osc_disconnect(struct obd_export *exp)
 {
        struct obd_device *obd = class_exp2obd(exp);
        int rc;
 
-        rc = client_disconnect_export(exp);
-        /**
-         * Initially we put del_shrink_grant before disconnect_export, but it
-         * causes the following problem if setup (connect) and cleanup
-         * (disconnect) are tangled together.
-         *      connect p1                     disconnect p2
-         *   ptlrpc_connect_import
-         *     ...............               class_manual_cleanup
-         *                                     osc_disconnect
-         *                                     del_shrink_grant
-         *   ptlrpc_connect_interrupt
-         *     init_grant_shrink
-         *   add this client to shrink list
-         *                                      cleanup_osc
-         * Bang! pinger trigger the shrink.
-         * So the osc should be disconnected from the shrink list, after we
-         * are sure the import has been destroyed. BUG18662
-         */
-        if (obd->u.cli.cl_import == NULL)
-                osc_del_shrink_grant(&obd->u.cli);
-        return rc;
+       rc = client_disconnect_export(exp);
+       /**
+        * Initially we put del_shrink_grant before disconnect_export, but it
+        * causes the following problem if setup (connect) and cleanup
+        * (disconnect) are tangled together.
+        *      connect p1                     disconnect p2
+        *   ptlrpc_connect_import
+        *     ...............               class_manual_cleanup
+        *                                     osc_disconnect
+        *                                     del_shrink_grant
+        *   ptlrpc_connect_interrupt
+        *     osc_init_grant
+        *   add this client to shrink list
+        *                                      cleanup_osc
+        * Bang! grant shrink thread trigger the shrink. BUG18662
+        */
+       osc_del_grant_list(&obd->u.cli);
+       return rc;
+}
+EXPORT_SYMBOL(osc_disconnect);
+
+int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                                struct hlist_node *hnode, void *arg)
+{
+       struct lu_env *env = arg;
+       struct ldlm_resource *res = cfs_hash_object(hs, hnode);
+       struct ldlm_lock *lock;
+       struct osc_object *osc = NULL;
+       ENTRY;
+
+       lock_res(res);
+       list_for_each_entry(lock, &res->lr_granted, l_res_link) {
+               if (lock->l_ast_data != NULL && osc == NULL) {
+                       osc = lock->l_ast_data;
+                       cl_object_get(osc2cl(osc));
+               }
+
+               /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
+                * by the 2nd round of ldlm_namespace_clean() call in
+                * osc_import_event(). */
+               ldlm_clear_cleaned(lock);
+       }
+       unlock_res(res);
+
+       if (osc != NULL) {
+               osc_object_invalidate(env, osc);
+               cl_object_put(env, osc2cl(osc));
+       }
+
+       RETURN(0);
 }
+EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
 
 static int osc_import_event(struct obd_device *obd,
                             struct obd_import *imp,
@@ -2794,30 +3075,32 @@ static int osc_import_event(struct obd_device *obd,
                 break;
         }
         case IMP_EVENT_INACTIVE: {
-                rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
+               rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
                 break;
         }
         case IMP_EVENT_INVALIDATE: {
                 struct ldlm_namespace *ns = obd->obd_namespace;
                 struct lu_env         *env;
-                int                    refcheck;
+               __u16                  refcheck;
+
+               ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
 
                 env = cl_env_get(&refcheck);
                 if (!IS_ERR(env)) {
-                        /* Reset grants */
-                        cli = &obd->u.cli;
-                        /* all pages go to failing rpcs due to the invalid
-                         * import */
-                       osc_io_unplug(env, cli, NULL, PDL_POLICY_ROUND);
-
-                        ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
-                        cl_env_put(env, &refcheck);
+                       osc_io_unplug(env, &obd->u.cli, NULL);
+
+                       cfs_hash_for_each_nolock(ns->ns_rs_hash,
+                                                osc_ldlm_resource_invalidate,
+                                                env, 0);
+                       cl_env_put(env, &refcheck);
+
+                       ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
                 } else
                         rc = PTR_ERR(env);
                 break;
         }
         case IMP_EVENT_ACTIVE: {
-                rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
+               rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
                 break;
         }
         case IMP_EVENT_OCD: {
@@ -2830,15 +3113,15 @@ static int osc_import_event(struct obd_device *obd,
                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
 
-                rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
+               rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
                 break;
         }
         case IMP_EVENT_DEACTIVATE: {
-                rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
+               rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
                 break;
         }
         case IMP_EVENT_ACTIVATE: {
-                rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
+               rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
                 break;
         }
         default:
@@ -2874,16 +3157,16 @@ static int brw_queue_work(const struct lu_env *env, void *data)
 
        CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
 
-       osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
+       osc_io_unplug(env, cli, NULL);
        RETURN(0);
 }
 
-int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
+int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
 {
        struct client_obd *cli = &obd->u.cli;
-       struct obd_type   *type;
-       void              *handler;
-       int                rc;
+       void *handler;
+       int rc;
+
        ENTRY;
 
        rc = ptlrpcd_addref();
@@ -2894,9 +3177,10 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
        if (rc)
                GOTO(out_ptlrpcd, rc);
 
+
        handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
        if (IS_ERR(handler))
-               GOTO(out_client_setup, rc = PTR_ERR(handler));
+               GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
        cli->cl_writeback_work = handler;
 
        handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
@@ -2909,50 +3193,9 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
                GOTO(out_ptlrpcd_work, rc);
 
        cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
+       osc_update_next_shrink(cli);
 
-#ifdef CONFIG_PROC_FS
-       obd->obd_vars = lprocfs_osc_obd_vars;
-#endif
-       /* If this is true then both client (osc) and server (osp) are on the
-        * same node. The osp layer if loaded first will register the osc proc
-        * directory. In that case this obd_device will be attached its proc
-        * tree to type->typ_procsym instead of obd->obd_type->typ_procroot. */
-       type = class_search_type(LUSTRE_OSP_NAME);
-       if (type && type->typ_procsym) {
-               obd->obd_proc_entry = lprocfs_register(obd->obd_name,
-                                                      type->typ_procsym,
-                                                      obd->obd_vars, obd);
-               if (IS_ERR(obd->obd_proc_entry)) {
-                       rc = PTR_ERR(obd->obd_proc_entry);
-                       CERROR("error %d setting up lprocfs for %s\n", rc,
-                              obd->obd_name);
-                       obd->obd_proc_entry = NULL;
-               }
-       } else {
-               rc = lprocfs_obd_setup(obd);
-       }
-
-       /* If the basic OSC proc tree construction succeeded then
-        * lets do the rest. */
-       if (rc == 0) {
-               lproc_osc_attach_seqstat(obd);
-               sptlrpc_lprocfs_cliobd_attach(obd);
-               ptlrpc_lprocfs_register_obd(obd);
-       }
-
-       /* We need to allocate a few requests more, because
-        * brw_interpret tries to create new requests before freeing
-        * previous ones, Ideally we want to have 2x max_rpcs_in_flight
-        * reserved, but I'm afraid that might be too much wasted RAM
-        * in fact, so 2 is just my guess and still should work. */
-       cli->cl_import->imp_rq_pool =
-               ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
-                                   OST_MAXREQSIZE,
-                                   ptlrpc_add_rqs_to_pool);
-
-       INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
-       ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
-       RETURN(0);
+       RETURN(rc);
 
 out_ptlrpcd_work:
        if (cli->cl_writeback_work != NULL) {
@@ -2963,66 +3206,107 @@ out_ptlrpcd_work:
                ptlrpcd_destroy_work(cli->cl_lru_work);
                cli->cl_lru_work = NULL;
        }
-out_client_setup:
        client_obd_cleanup(obd);
 out_ptlrpcd:
        ptlrpcd_decref();
        RETURN(rc);
 }
+EXPORT_SYMBOL(osc_setup_common);
 
-static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 {
-        int rc = 0;
-        ENTRY;
+       struct client_obd *cli = &obd->u.cli;
+       int                adding;
+       int                added;
+       int                req_count;
+       int                rc;
 
-        switch (stage) {
-        case OBD_CLEANUP_EARLY: {
-                struct obd_import *imp;
-                imp = obd->u.cli.cl_import;
-                CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
-                /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
-                ptlrpc_deactivate_import(imp);
-               spin_lock(&imp->imp_lock);
-               imp->imp_pingable = 0;
-               spin_unlock(&imp->imp_lock);
-                break;
-        }
-        case OBD_CLEANUP_EXPORTS: {
-                struct client_obd *cli = &obd->u.cli;
-                /* LU-464
-                 * for echo client, export may be on zombie list, wait for
-                 * zombie thread to cull it, because cli.cl_import will be
-                 * cleared in client_disconnect_export():
-                 *   class_export_destroy() -> obd_cleanup() ->
-                 *   echo_device_free() -> echo_client_cleanup() ->
-                 *   obd_disconnect() -> osc_disconnect() ->
-                 *   client_disconnect_export()
-                 */
-                obd_zombie_barrier();
-                if (cli->cl_writeback_work) {
-                        ptlrpcd_destroy_work(cli->cl_writeback_work);
-                        cli->cl_writeback_work = NULL;
-                }
-               if (cli->cl_lru_work) {
-                       ptlrpcd_destroy_work(cli->cl_lru_work);
-                       cli->cl_lru_work = NULL;
-               }
-                obd_cleanup_client_import(obd);
-                ptlrpc_lprocfs_unregister_obd(obd);
-                lprocfs_obd_cleanup(obd);
-                break;
-                }
-        }
-        RETURN(rc);
+       ENTRY;
+
+       rc = osc_setup_common(obd, lcfg);
+       if (rc < 0)
+               RETURN(rc);
+
+       rc = osc_tunables_init(obd);
+       if (rc)
+               RETURN(rc);
+
+       /*
+        * We try to control the total number of requests with a upper limit
+        * osc_reqpool_maxreqcount. There might be some race which will cause
+        * over-limit allocation, but it is fine.
+        */
+       req_count = atomic_read(&osc_pool_req_count);
+       if (req_count < osc_reqpool_maxreqcount) {
+               adding = cli->cl_max_rpcs_in_flight + 2;
+               if (req_count + adding > osc_reqpool_maxreqcount)
+                       adding = osc_reqpool_maxreqcount - req_count;
+
+               added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
+               atomic_add(added, &osc_pool_req_count);
+       }
+
+       ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
+
+       spin_lock(&osc_shrink_lock);
+       list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
+       spin_unlock(&osc_shrink_lock);
+       cli->cl_import->imp_idle_timeout = osc_idle_timeout;
+
+       RETURN(0);
+}
+
+int osc_precleanup_common(struct obd_device *obd)
+{
+       struct client_obd *cli = &obd->u.cli;
+       ENTRY;
+
+       /* LU-464
+        * for echo client, export may be on zombie list, wait for
+        * zombie thread to cull it, because cli.cl_import will be
+        * cleared in client_disconnect_export():
+        *   class_export_destroy() -> obd_cleanup() ->
+        *   echo_device_free() -> echo_client_cleanup() ->
+        *   obd_disconnect() -> osc_disconnect() ->
+        *   client_disconnect_export()
+        */
+       obd_zombie_barrier();
+       if (cli->cl_writeback_work) {
+               ptlrpcd_destroy_work(cli->cl_writeback_work);
+               cli->cl_writeback_work = NULL;
+       }
+
+       if (cli->cl_lru_work) {
+               ptlrpcd_destroy_work(cli->cl_lru_work);
+               cli->cl_lru_work = NULL;
+       }
+
+       obd_cleanup_client_import(obd);
+       RETURN(0);
+}
+EXPORT_SYMBOL(osc_precleanup_common);
+
+static int osc_precleanup(struct obd_device *obd)
+{
+       ENTRY;
+
+       osc_precleanup_common(obd);
+
+       ptlrpc_lprocfs_unregister_obd(obd);
+       RETURN(0);
 }
 
-int osc_cleanup(struct obd_device *obd)
+int osc_cleanup_common(struct obd_device *obd)
 {
        struct client_obd *cli = &obd->u.cli;
        int rc;
 
        ENTRY;
 
+       spin_lock(&osc_shrink_lock);
+       list_del(&cli->cl_shrink_list);
+       spin_unlock(&osc_shrink_lock);
+
        /* lru cleanup */
        if (cli->cl_cache != NULL) {
                LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
@@ -3034,19 +3318,21 @@ int osc_cleanup(struct obd_device *obd)
                cli->cl_cache = NULL;
        }
 
-        /* free memory of osc quota cache */
-        osc_quota_cleanup(obd);
+       /* free memory of osc quota cache */
+       osc_quota_cleanup(obd);
 
-        rc = client_obd_cleanup(obd);
+       rc = client_obd_cleanup(obd);
 
-        ptlrpcd_decref();
-        RETURN(rc);
+       ptlrpcd_decref();
+       RETURN(rc);
 }
+EXPORT_SYMBOL(osc_cleanup_common);
 
 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
 {
-       int rc = class_process_proc_param(PARAM_OSC, obd->obd_vars, lcfg, obd);
-       return rc > 0 ? 0: rc;
+       ssize_t count  = class_modify_config(lcfg, PARAM_OSC,
+                                            &obd->obd_kset.kobj);
+       return count > 0 ? 0 : count;
 }
 
 static int osc_process_config(struct obd_device *obd, size_t len, void *buf)
@@ -3058,7 +3344,7 @@ static struct obd_ops osc_obd_ops = {
         .o_owner                = THIS_MODULE,
         .o_setup                = osc_setup,
         .o_precleanup           = osc_precleanup,
-        .o_cleanup              = osc_cleanup,
+       .o_cleanup              = osc_cleanup_common,
         .o_add_conn             = client_import_add_conn,
         .o_del_conn             = client_import_del_conn,
         .o_connect              = client_connect_import,
@@ -3069,31 +3355,52 @@ static struct obd_ops osc_obd_ops = {
         .o_create               = osc_create,
         .o_destroy              = osc_destroy,
         .o_getattr              = osc_getattr,
-        .o_getattr_async        = osc_getattr_async,
         .o_setattr              = osc_setattr,
-        .o_setattr_async        = osc_setattr_async,
         .o_iocontrol            = osc_iocontrol,
-        .o_get_info             = osc_get_info,
         .o_set_info_async       = osc_set_info_async,
         .o_import_event         = osc_import_event,
         .o_process_config       = osc_process_config,
         .o_quotactl             = osc_quotactl,
-        .o_quotacheck           = osc_quotacheck,
 };
 
+static struct shrinker *osc_cache_shrinker;
+struct list_head osc_shrink_list = LIST_HEAD_INIT(osc_shrink_list);
+DEFINE_SPINLOCK(osc_shrink_lock);
+
+#ifndef HAVE_SHRINKER_COUNT
+static int osc_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+{
+       struct shrink_control scv = {
+               .nr_to_scan = shrink_param(sc, nr_to_scan),
+               .gfp_mask   = shrink_param(sc, gfp_mask)
+       };
+#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
+       struct shrinker *shrinker = NULL;
+#endif
+
+       (void)osc_cache_shrink_scan(shrinker, &scv);
+
+       return osc_cache_shrink_count(shrinker, &scv);
+}
+#endif
+
 static int __init osc_init(void)
 {
        bool enable_proc = true;
        struct obd_type *type;
+       unsigned int reqpool_size;
+       unsigned int reqsize;
        int rc;
+       DEF_SHRINKER_VAR(osc_shvar, osc_cache_shrink,
+                        osc_cache_shrink_count, osc_cache_shrink_scan);
        ENTRY;
 
-        /* print an address of _any_ initialized kernel symbol from this
-         * module, to allow debugging with gdb that doesn't support data
-         * symbols from modules.*/
-        CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
+       /* print an address of _any_ initialized kernel symbol from this
+        * module, to allow debugging with gdb that doesn't support data
+        * symbols from modules.*/
+       CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
 
-        rc = lu_kmem_init(osc_caches);
+       rc = lu_kmem_init(osc_caches);
        if (rc)
                RETURN(rc);
 
@@ -3103,22 +3410,65 @@ static int __init osc_init(void)
 
        rc = class_register_type(&osc_obd_ops, NULL, enable_proc, NULL,
                                 LUSTRE_OSC_NAME, &osc_device_type);
-        if (rc) {
-                lu_kmem_fini(osc_caches);
-                RETURN(rc);
-        }
+       if (rc)
+               GOTO(out_kmem, rc);
+
+       osc_cache_shrinker = set_shrinker(DEFAULT_SEEKS, &osc_shvar);
+
+       /* This is obviously too much memory, only prevent overflow here */
+       if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
+               GOTO(out_type, rc = -EINVAL);
+
+       reqpool_size = osc_reqpool_mem_max << 20;
+
+       reqsize = 1;
+       while (reqsize < OST_IO_MAXREQSIZE)
+               reqsize = reqsize << 1;
+
+       /*
+        * We don't enlarge the request count in OSC pool according to
+        * cl_max_rpcs_in_flight. The allocation from the pool will only be
+        * tried after normal allocation failed. So a small OSC pool won't
+        * cause much performance degression in most of cases.
+        */
+       osc_reqpool_maxreqcount = reqpool_size / reqsize;
+
+       atomic_set(&osc_pool_req_count, 0);
+       osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
+                                         ptlrpc_add_rqs_to_pool);
+
+       if (osc_rq_pool == NULL)
+               GOTO(out_type, rc = -ENOMEM);
+
+       rc = osc_start_grant_work();
+       if (rc != 0)
+               GOTO(out_req_pool, rc);
+
+       RETURN(rc);
+
+out_req_pool:
+       ptlrpc_free_rq_pool(osc_rq_pool);
+out_type:
+       class_unregister_type(LUSTRE_OSC_NAME);
+out_kmem:
+       lu_kmem_fini(osc_caches);
 
        RETURN(rc);
 }
 
-static void /*__exit*/ osc_exit(void)
+static void __exit osc_exit(void)
 {
+       osc_stop_grant_work();
+       remove_shrinker(osc_cache_shrinker);
        class_unregister_type(LUSTRE_OSC_NAME);
        lu_kmem_fini(osc_caches);
+       ptlrpc_free_rq_pool(osc_rq_pool);
 }
 
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
 MODULE_LICENSE("GPL");
 
-cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);
+module_init(osc_init);
+module_exit(osc_exit);