Whamcloud - gitweb
LU-12275 sec: decryption for read path
[fs/lustre-release.git] / lustre / osc / osc_request.c
index 96a077b..cb53cc6 100644 (file)
@@ -33,6 +33,8 @@
 #define DEBUG_SUBSYSTEM S_OSC
 
 #include <linux/workqueue.h>
+#include <libcfs/libcfs.h>
+#include <linux/falloc.h>
 #include <lprocfs_status.h>
 #include <lustre_debug.h>
 #include <lustre_dlm.h>
@@ -45,6 +47,7 @@
 #include <obd_cksum.h>
 #include <obd_class.h>
 #include <lustre_osc.h>
+#include <linux/falloc.h>
 
 #include "osc_internal.h"
 
@@ -236,10 +239,7 @@ int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
                sa->sa_upcall = upcall;
                sa->sa_cookie = cookie;
 
-               if (rqset == PTLRPCD_SET)
-                       ptlrpcd_add_req(req);
-               else
-                       ptlrpc_set_add_req(rqset, req);
+               ptlrpc_set_add_req(rqset, req);
        }
 
        RETURN(0);
@@ -324,10 +324,7 @@ int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
        la->la_upcall = upcall;
        la->la_cookie = cookie;
 
-       if (rqset == PTLRPCD_SET)
-               ptlrpcd_add_req(req);
-       else
-               ptlrpc_set_add_req(rqset, req);
+       ptlrpc_set_add_req(rqset, req);
 
        RETURN(0);
 }
@@ -426,6 +423,71 @@ int osc_punch_send(struct obd_export *exp, struct obdo *oa,
 }
 EXPORT_SYMBOL(osc_punch_send);
 
+/**
+ * osc_fallocate_base() - Handles fallocate request.
+ *
+ * @exp:       Export structure
+ * @oa:                Attributes passed to OSS from client (obdo structure)
+ * @upcall:    Primary & supplementary group information
+ * @cookie:    Exclusive identifier
+ * @rqset:     Request list.
+ * @mode:      Operation done on given range.
+ *
+ * osc_fallocate_base() - Handles fallocate requests only. Only block
+ * allocation or standard preallocate operation is supported currently.
+ * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
+ * is supported via SETATTR request.
+ *
+ * Return: Non-zero on failure and O on success.
+ */
+int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
+                      obd_enqueue_update_f upcall, void *cookie, int mode)
+{
+       struct ptlrpc_request *req;
+       struct osc_setattr_args *sa;
+       struct ost_body *body;
+       struct obd_import *imp = class_exp2cliimp(exp);
+       int rc;
+       ENTRY;
+
+       /*
+        * Only mode == 0 (which is standard prealloc) is supported now.
+        * Punch is not supported yet.
+        */
+       if (mode & ~FALLOC_FL_KEEP_SIZE)
+               RETURN(-EOPNOTSUPP);
+       oa->o_falloc_mode = mode;
+
+       req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+                                  &RQF_OST_FALLOCATE);
+       if (req == NULL)
+               RETURN(-ENOMEM);
+
+       rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
+       if (rc != 0) {
+               ptlrpc_request_free(req);
+               RETURN(rc);
+       }
+
+       body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+       LASSERT(body);
+
+       lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
+
+       ptlrpc_request_set_replen(req);
+
+       req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
+       BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
+       sa = ptlrpc_req_async_args(sa, req);
+       sa->sa_oa = oa;
+       sa->sa_upcall = upcall;
+       sa->sa_cookie = cookie;
+
+       ptlrpcd_add_req(req);
+
+       RETURN(0);
+}
+
 static int osc_sync_interpret(const struct lu_env *env,
                              struct ptlrpc_request *req, void *args, int rc)
 {
@@ -499,10 +561,7 @@ int osc_sync_base(struct osc_object *obj, struct obdo *oa,
        fa->fa_upcall = upcall;
        fa->fa_cookie = cookie;
 
-       if (rqset == PTLRPCD_SET)
-               ptlrpcd_add_req(req);
-       else
-               ptlrpc_set_add_req(rqset, req);
+       ptlrpc_set_add_req(rqset, req);
 
        RETURN (0);
 }
@@ -577,7 +636,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
         struct client_obd     *cli = &exp->exp_obd->u.cli;
         struct ptlrpc_request *req;
         struct ost_body       *body;
-       struct list_head       cancels = LIST_HEAD_INIT(cancels);
+       LIST_HEAD(cancels);
         int rc, count;
         ENTRY;
 
@@ -613,17 +672,16 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
 
        req->rq_interpret_reply = osc_destroy_interpret;
        if (!osc_can_send_destroy(cli)) {
-               struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
                /*
                 * Wait until the number of on-going destroy RPCs drops
                 * under max_rpc_in_flight
                 */
-               rc = l_wait_event_exclusive(cli->cl_destroy_waitq,
-                                           osc_can_send_destroy(cli), &lwi);
+               rc = l_wait_event_abortable_exclusive(
+                       cli->cl_destroy_waitq,
+                       osc_can_send_destroy(cli));
                if (rc) {
                        ptlrpc_req_finished(req);
-                       RETURN(rc);
+                       RETURN(-EINTR);
                }
        }
 
@@ -645,21 +703,18 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
                oa->o_dirty = cli->cl_dirty_grant;
        else
                oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
-       if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
-                    cli->cl_dirty_max_pages)) {
-               CERROR("dirty %lu - %lu > dirty_max %lu\n",
-                      cli->cl_dirty_pages, cli->cl_dirty_transit,
+       if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
+               CERROR("dirty %lu > dirty_max %lu\n",
+                      cli->cl_dirty_pages,
                       cli->cl_dirty_max_pages);
                oa->o_undirty = 0;
-       } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
-                           atomic_long_read(&obd_dirty_transit_pages) >
+       } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
                            (long)(obd_max_dirty_pages + 1))) {
                /* The atomic_read() allowing the atomic_inc() are
                 * not covered by a lock thus they may safely race and trip
                 * this CERROR() unless we add in a small fudge factor (+1). */
-               CERROR("%s: dirty %ld - %ld > system dirty_max %ld\n",
+               CERROR("%s: dirty %ld > system dirty_max %ld\n",
                       cli_name(cli), atomic_long_read(&obd_dirty_pages),
-                      atomic_long_read(&obd_dirty_transit_pages),
                       obd_max_dirty_pages);
                oa->o_undirty = 0;
        } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
@@ -845,9 +900,11 @@ static int osc_should_shrink_grant(struct client_obd *client)
        if (client->cl_import == NULL)
                return 0;
 
-        if ((client->cl_import->imp_connect_data.ocd_connect_flags &
-             OBD_CONNECT_GRANT_SHRINK) == 0)
-                return 0;
+       if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
+           client->cl_import->imp_grant_shrink_disabled) {
+               osc_update_next_shrink(client);
+               return 0;
+       }
 
        if (ktime_get_seconds() >= next_shrink - 5) {
                /* Get the current RPC size directly, instead of going via:
@@ -999,11 +1056,11 @@ void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
        }
        spin_unlock(&cli->cl_loi_list_lock);
 
-       CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
-               "chunk bits: %d cl_max_extent_pages: %d\n",
-               cli_name(cli),
-               cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
-               cli->cl_max_extent_pages);
+       CDEBUG(D_CACHE,
+              "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
+              cli_name(cli),
+              cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
+              cli->cl_max_extent_pages);
 
        if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
                osc_add_grant_list(cli);
@@ -1290,28 +1347,50 @@ static int osc_checksum_bulk_rw(const char *obd_name,
        RETURN(rc);
 }
 
+static inline void osc_release_bounce_pages(struct brw_page **pga,
+                                           u32 page_count)
+{
+#ifdef HAVE_LUSTRE_CRYPTO
+       int i;
+
+       for (i = 0; i < page_count; i++) {
+               if (pga[i]->pg->mapping)
+                       /* bounce pages are unmapped */
+                       continue;
+               if (pga[i]->flag & OBD_BRW_SYNC)
+                       /* sync transfer cannot have encrypted pages */
+                       continue;
+               llcrypt_finalize_bounce_page(&pga[i]->pg);
+               pga[i]->count -= pga[i]->bp_count_diff;
+               pga[i]->off += pga[i]->bp_off_diff;
+       }
+#endif
+}
+
 static int
 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
                     u32 page_count, struct brw_page **pga,
                     struct ptlrpc_request **reqp, int resend)
 {
-        struct ptlrpc_request   *req;
-        struct ptlrpc_bulk_desc *desc;
-        struct ost_body         *body;
-        struct obd_ioobj        *ioobj;
-        struct niobuf_remote    *niobuf;
+       struct ptlrpc_request *req;
+       struct ptlrpc_bulk_desc *desc;
+       struct ost_body *body;
+       struct obd_ioobj *ioobj;
+       struct niobuf_remote *niobuf;
        int niocount, i, requested_nob, opc, rc, short_io_size = 0;
-        struct osc_brw_async_args *aa;
-        struct req_capsule      *pill;
-        struct brw_page *pg_prev;
+       struct osc_brw_async_args *aa;
+       struct req_capsule *pill;
+       struct brw_page *pg_prev;
        void *short_io_buf;
        const char *obd_name = cli->cl_import->imp_obd->obd_name;
+       struct inode *inode;
 
-        ENTRY;
-        if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
-                RETURN(-ENOMEM); /* Recoverable */
-        if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
-                RETURN(-EINVAL); /* Fatal */
+       ENTRY;
+       inode = page2inode(pga[0]->pg);
+       if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
+               RETURN(-ENOMEM); /* Recoverable */
+       if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
+               RETURN(-EINVAL); /* Fatal */
 
        if ((cmd & OBD_BRW_WRITE) != 0) {
                opc = OST_WRITE;
@@ -1325,6 +1404,51 @@ osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
         if (req == NULL)
                 RETURN(-ENOMEM);
 
+       if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
+               for (i = 0; i < page_count; i++) {
+                       struct brw_page *pg = pga[i];
+                       struct page *data_page = NULL;
+                       bool retried = false;
+                       bool lockedbymyself;
+
+retry_encrypt:
+                       /* The page can already be locked when we arrive here.
+                        * This is possible when cl_page_assume/vvp_page_assume
+                        * is stuck on wait_on_page_writeback with page lock
+                        * held. In this case there is no risk for the lock to
+                        * be released while we are doing our encryption
+                        * processing, because writeback against that page will
+                        * end in vvp_page_completion_write/cl_page_completion,
+                        * which means only once the page is fully processed.
+                        */
+                       lockedbymyself = trylock_page(pg->pg);
+                       data_page =
+                               llcrypt_encrypt_pagecache_blocks(pg->pg,
+                                                                PAGE_SIZE, 0,
+                                                                GFP_NOFS);
+                       if (lockedbymyself)
+                               unlock_page(pg->pg);
+                       if (IS_ERR(data_page)) {
+                               rc = PTR_ERR(data_page);
+                               if (rc == -ENOMEM && !retried) {
+                                       retried = true;
+                                       rc = 0;
+                                       goto retry_encrypt;
+                               }
+                               ptlrpc_request_free(req);
+                               RETURN(rc);
+                       }
+                       /* len is forced to PAGE_SIZE, and poff to 0
+                        * so store the old, clear text info
+                        */
+                       pg->pg = data_page;
+                       pg->bp_count_diff = PAGE_SIZE - pg->count;
+                       pg->count = PAGE_SIZE;
+                       pg->bp_off_diff = pg->off & ~PAGE_MASK;
+                       pg->off = pg->off & PAGE_MASK;
+               }
+       }
+
         for (niocount = i = 1; i < page_count; i++) {
                 if (!can_merge_pages(pga[i - 1], pga[i]))
                         niocount++;
@@ -1371,8 +1495,7 @@ osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
        desc = ptlrpc_prep_bulk_imp(req, page_count,
                cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
                (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
-                       PTLRPC_BULK_PUT_SINK) |
-                       PTLRPC_BULK_BUF_KIOV,
+                       PTLRPC_BULK_PUT_SINK),
                OST_BULK_PORTAL,
                &ptlrpc_bulk_kiov_pin_ops);
 
@@ -1448,13 +1571,13 @@ no_bulk:
                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
                         (pg->flag & OBD_BRW_SRVLOCK));
                if (short_io_size != 0 && opc == OST_WRITE) {
-                       unsigned char *ptr = ll_kmap_atomic(pg->pg, KM_USER0);
+                       unsigned char *ptr = kmap_atomic(pg->pg);
 
                        LASSERT(short_io_size >= requested_nob + pg->count);
                        memcpy(short_io_buf + requested_nob,
                               ptr + poff,
                               pg->count);
-                       ll_kunmap_atomic(ptr, KM_USER0);
+                       kunmap_atomic(ptr);
                } else if (short_io_size == 0) {
                        desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
                                                         pg->count);
@@ -1628,7 +1751,6 @@ static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
        if (rc)
                CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
        filp_close(filp, NULL);
-       return;
 }
 
 static int
@@ -1728,6 +1850,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
                &req->rq_import->imp_connection->c_peer;
        struct ost_body *body;
        u32 client_cksum = 0;
+       struct inode *inode;
 
        ENTRY;
 
@@ -1829,10 +1952,10 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
 
                        CDEBUG(D_CACHE, "page %p count %d\n",
                               aa->aa_ppga[i]->pg, count);
-                       ptr = ll_kmap_atomic(aa->aa_ppga[i]->pg, KM_USER0);
+                       ptr = kmap_atomic(aa->aa_ppga[i]->pg);
                        memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
                               count);
-                       ll_kunmap_atomic((void *) ptr, KM_USER0);
+                       kunmap_atomic((void *) ptr);
 
                        buf += count;
                        nob -= count;
@@ -1915,6 +2038,36 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
        } else {
                rc = 0;
        }
+
+       inode = page2inode(aa->aa_ppga[0]->pg);
+       if (inode && IS_ENCRYPTED(inode)) {
+               int idx;
+
+               if (!llcrypt_has_encryption_key(inode)) {
+                       CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
+                       GOTO(out, rc);
+               }
+               for (idx = 0; idx < aa->aa_page_count; idx++) {
+                       struct brw_page *pg = aa->aa_ppga[idx];
+                       __u64 *p, *q;
+
+                       /* do not decrypt if page is all 0s */
+                       p = q = page_address(pg->pg);
+                       while (p - q < PAGE_SIZE / sizeof(*p)) {
+                               if (*p != 0)
+                                       break;
+                               p++;
+                       }
+                       if (p - q == PAGE_SIZE / sizeof(*p))
+                               continue;
+
+                       rc = llcrypt_decrypt_pagecache_blocks(pg->pg,
+                                                             PAGE_SIZE, 0);
+                       if (rc)
+                               GOTO(out, rc);
+               }
+       }
+
 out:
        if (rc >= 0)
                lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
@@ -1943,16 +2096,12 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
                 RETURN(rc);
 
        list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
-                if (oap->oap_request != NULL) {
-                        LASSERTF(request == oap->oap_request,
-                                 "request %p != oap_request %p\n",
-                                 request, oap->oap_request);
-                        if (oap->oap_interrupted) {
-                                ptlrpc_req_finished(new_req);
-                                RETURN(-EINTR);
-                        }
-                }
-        }
+               if (oap->oap_request != NULL) {
+                       LASSERTF(request == oap->oap_request,
+                                "request %p != oap_request %p\n",
+                                request, oap->oap_request);
+               }
+       }
        /*
         * New request takes over pga and oaps from old request.
         * Note that copying a list_head doesn't work, need to move it...
@@ -2028,8 +2177,8 @@ static void sort_brw_pages(struct brw_page **array, int num)
 
 static void osc_release_ppga(struct brw_page **ppga, size_t count)
 {
-        LASSERT(ppga != NULL);
-        OBD_FREE(ppga, sizeof(*ppga) * count);
+       LASSERT(ppga != NULL);
+       OBD_FREE_PTR_ARRAY(ppga, count);
 }
 
 static int brw_interpret(const struct lu_env *env,
@@ -2045,6 +2194,10 @@ static int brw_interpret(const struct lu_env *env,
 
        rc = osc_brw_fini_request(req, rc);
        CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
+
+       /* restore clear text pages */
+       osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
+
        /*
         * When server returns -EINPROGRESS, client should always retry
         * regardless of the number of times the bulk was resent already.
@@ -2195,17 +2348,17 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
        struct cl_req_attr              *crattr = NULL;
        loff_t                          starting_offset = OBD_OBJECT_EOF;
        loff_t                          ending_offset = 0;
-       int                             mpflag = 0;
+       /* '1' for consistency with code that checks !mpflag to restore */
+       int mpflag = 1;
        int                             mem_tight = 0;
        int                             page_count = 0;
        bool                            soft_sync = false;
-       bool                            interrupted = false;
        bool                            ndelay = false;
        int                             i;
        int                             grant = 0;
        int                             rc;
        __u32                           layout_version = 0;
-       struct list_head                rpc_list = LIST_HEAD_INIT(rpc_list);
+       LIST_HEAD(rpc_list);
        struct ost_body                 *body;
        ENTRY;
        LASSERT(!list_empty(ext_list));
@@ -2216,16 +2369,16 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
                mem_tight |= ext->oe_memalloc;
                grant += ext->oe_grants;
                page_count += ext->oe_nr_pages;
-               layout_version = MAX(layout_version, ext->oe_layout_version);
+               layout_version = max(layout_version, ext->oe_layout_version);
                if (obj == NULL)
                        obj = ext->oe_obj;
        }
 
        soft_sync = osc_over_unstable_soft_limit(cli);
        if (mem_tight)
-               mpflag = cfs_memory_pressure_get_and_set();
+               mpflag = memalloc_noreclaim_save();
 
-       OBD_ALLOC(pga, sizeof(*pga) * page_count);
+       OBD_ALLOC_PTR_ARRAY(pga, page_count);
        if (pga == NULL)
                GOTO(out, rc = -ENOMEM);
 
@@ -2256,8 +2409,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
                        else
                                LASSERT(oap->oap_page_off + oap->oap_count ==
                                        PAGE_SIZE);
-                       if (oap->oap_interrupted)
-                               interrupted = true;
                }
                if (ext->oe_ndelay)
                        ndelay = true;
@@ -2296,8 +2447,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
        req->rq_interpret_reply = brw_interpret;
        req->rq_memalloc = mem_tight != 0;
        oap->oap_request = ptlrpc_request_addref(req);
-       if (interrupted && !req->rq_intr)
-               ptlrpc_mark_interrupted(req);
        if (ndelay) {
                req->rq_no_resend = req->rq_no_delay = 1;
                /* probably set a shorter timeout value.
@@ -2349,16 +2498,18 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
        EXIT;
 
 out:
-       if (mem_tight != 0)
-               cfs_memory_pressure_restore(mpflag);
+       if (mem_tight)
+               memalloc_noreclaim_restore(mpflag);
 
        if (rc != 0) {
                LASSERT(req == NULL);
 
                if (oa)
                        OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
-               if (pga)
-                       OBD_FREE(pga, sizeof(*pga) * page_count);
+               if (pga) {
+                       osc_release_bounce_pages(pga, page_count);
+                       osc_release_ppga(pga, page_count);
+               }
                /* this should happen rarely and is pretty bad, it makes the
                 * pending list not follow the dirty order */
                while (!list_empty(ext_list)) {
@@ -2480,8 +2631,6 @@ int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
        RETURN(rc);
 }
 
-struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
-
 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
  * other synchronous requests, however keeping some locks and trying to obtain
@@ -2620,10 +2769,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
                        }
 
                        req->rq_interpret_reply = osc_enqueue_interpret;
-                       if (rqset == PTLRPCD_SET)
-                               ptlrpcd_add_req(req);
-                       else
-                               ptlrpc_set_add_req(rqset, req);
+                       ptlrpc_set_add_req(rqset, req);
                } else if (intent) {
                        ptlrpc_req_finished(req);
                }
@@ -2638,9 +2784,10 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
        RETURN(rc);
 }
 
-int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
-                  enum ldlm_type type, union ldlm_policy_data *policy,
-                  enum ldlm_mode mode, __u64 *flags, void *data,
+int osc_match_base(const struct lu_env *env, struct obd_export *exp,
+                  struct ldlm_res_id *res_id, enum ldlm_type type,
+                  union ldlm_policy_data *policy, enum ldlm_mode mode,
+                  __u64 *flags, struct osc_object *obj,
                   struct lustre_handle *lockh, int unref)
 {
        struct obd_device *obd = exp->exp_obd;
@@ -2668,11 +2815,19 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
        if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
                RETURN(rc);
 
-       if (data != NULL) {
+       if (obj != NULL) {
                struct ldlm_lock *lock = ldlm_handle2lock(lockh);
 
                LASSERT(lock != NULL);
-               if (!osc_set_lock_data(lock, data)) {
+               if (osc_set_lock_data(lock, obj)) {
+                       lock_res_and_lock(lock);
+                       if (!ldlm_is_lvb_cached(lock)) {
+                               LASSERT(lock->l_ast_data == obj);
+                               osc_lock_lvb_update(env, obj, lock, NULL);
+                               ldlm_set_lvb_cached(lock);
+                       }
+                       unlock_res_and_lock(lock);
+               } else {
                        ldlm_lock_decref(lockh, rc);
                        rc = 0;
                }
@@ -2864,7 +3019,7 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
        default:
                rc = -ENOTTY;
                CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
-                      obd->obd_name, cmd, current_comm(), rc);
+                      obd->obd_name, cmd, current->comm, rc);
                break;
        }
 
@@ -3339,7 +3494,7 @@ int osc_cleanup_common(struct obd_device *obd)
 }
 EXPORT_SYMBOL(osc_cleanup_common);
 
-static struct obd_ops osc_obd_ops = {
+static const struct obd_ops osc_obd_ops = {
         .o_owner                = THIS_MODULE,
         .o_setup                = osc_setup,
         .o_precleanup           = osc_precleanup,
@@ -3362,7 +3517,7 @@ static struct obd_ops osc_obd_ops = {
 };
 
 static struct shrinker *osc_cache_shrinker;
-struct list_head osc_shrink_list = LIST_HEAD_INIT(osc_shrink_list);
+LIST_HEAD(osc_shrink_list);
 DEFINE_SPINLOCK(osc_shrink_lock);
 
 #ifndef HAVE_SHRINKER_COUNT
@@ -3372,10 +3527,6 @@ static int osc_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
                .nr_to_scan = shrink_param(sc, nr_to_scan),
                .gfp_mask   = shrink_param(sc, gfp_mask)
        };
-#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
-       struct shrinker *shrinker = NULL;
-#endif
-
        (void)osc_cache_shrink_scan(shrinker, &scv);
 
        return osc_cache_shrink_count(shrinker, &scv);