Whamcloud - gitweb
b=20038
authorjxiong <jxiong>
Wed, 8 Jul 2009 06:32:13 +0000 (06:32 +0000)
committerjxiong <jxiong>
Wed, 8 Jul 2009 06:32:13 +0000 (06:32 +0000)
i=eric.mei
i=rread

- Cleanup client_obd_list_lock
- When a cl_page is to be freed, the page has to clear Uptodate bit as well because it may be NOT under the protection of a ldlm lock
- Add more debug code

14 files changed:
libcfs/include/libcfs/libcfs_private.h
lustre/fld/fld_request.c
lustre/include/cl_object.h
lustre/include/linux/obd.h
lustre/llite/llite_close.c
lustre/llite/vvp_io.c
lustre/llite/vvp_page.c
lustre/lov/lov_page.c
lustre/mdc/lproc_mdc.c
lustre/mdc/mdc_lib.c
lustre/obdclass/cl_lock.c
lustre/obdclass/cl_page.c
lustre/osc/osc_page.c
lustre/osc/osc_request.c

index 3f138fc..336fef7 100644 (file)
@@ -270,6 +270,8 @@ void libcfs_debug_dumplog(void);
 int libcfs_debug_init(unsigned long bufsize);
 int libcfs_debug_cleanup(void);
 
+#define libcfs_debug_dumpstack(tsk)     ((void)0)
+
 /*
  * Generic compiler-dependent macros required for kernel
  * build go below this comment. Actual compiler/compiler version
index 7fd9246..eb02a43 100644 (file)
@@ -74,9 +74,9 @@ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
 {
         int rc;
         ENTRY;
-        spin_lock(&cli->cl_loi_list_lock);
+        client_obd_list_lock(&cli->cl_loi_list_lock);
         rc = list_empty(&mcw->mcw_entry);
-        spin_unlock(&cli->cl_loi_list_lock);
+        client_obd_list_unlock(&cli->cl_loi_list_lock);
         RETURN(rc);
 };
 
@@ -85,15 +85,15 @@ static void fld_enter_request(struct client_obd *cli)
         struct mdc_cache_waiter mcw;
         struct l_wait_info lwi = { 0 };
 
-        spin_lock(&cli->cl_loi_list_lock);
+        client_obd_list_lock(&cli->cl_loi_list_lock);
         if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
                 list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
                 cfs_waitq_init(&mcw.mcw_waitq);
-                spin_unlock(&cli->cl_loi_list_lock);
+                client_obd_list_unlock(&cli->cl_loi_list_lock);
                 l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
         } else {
                 cli->cl_r_in_flight++;
-                spin_unlock(&cli->cl_loi_list_lock);
+                client_obd_list_unlock(&cli->cl_loi_list_lock);
         }
 }
 
@@ -102,7 +102,7 @@ static void fld_exit_request(struct client_obd *cli)
         struct list_head *l, *tmp;
         struct mdc_cache_waiter *mcw;
 
-        spin_lock(&cli->cl_loi_list_lock);
+        client_obd_list_lock(&cli->cl_loi_list_lock);
         cli->cl_r_in_flight--;
         list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
 
@@ -116,7 +116,7 @@ static void fld_exit_request(struct client_obd *cli)
                 cli->cl_r_in_flight++;
                 cfs_waitq_signal(&mcw->mcw_waitq);
         }
-        spin_unlock(&cli->cl_loi_list_lock);
+        client_obd_list_unlock(&cli->cl_loi_list_lock);
 }
 
 static int fld_rrb_hash(struct lu_client_fld *fld,
index fa8c613..958324e 100644 (file)
@@ -736,6 +736,10 @@ struct cl_page {
          */
         struct cl_io            *cp_owner;
         /**
+         * Debug information, the task is owning the page.
+         */
+        cfs_task_t              *cp_task;
+        /**
          * Owning IO request in cl_page_state::CPS_PAGEOUT and
          * cl_page_state::CPS_PAGEIN states. This field is maintained only in
          * the top-level pages. Protected by a VM lock.
@@ -855,15 +859,13 @@ struct cl_page_operations {
                              const struct cl_page_slice *slice,
                              struct cl_io *io);
         /**
-         * Announces that page contains valid data and user space can look and
-         * them without client's involvement from now on. Effectively marks
-         * the page up-to-date. Optional.
+         * Announces whether the page contains valid data or not by @uptodate.
          *
          * \see cl_page_export()
          * \see vvp_page_export()
          */
         void  (*cpo_export)(const struct lu_env *env,
-                            const struct cl_page_slice *slice);
+                            const struct cl_page_slice *slice, int uptodate);
         /**
          * Unmaps page from the user space (if it is mapped).
          *
@@ -2718,7 +2720,8 @@ int     cl_page_unmap        (const struct lu_env *env, struct cl_io *io,
                               struct cl_page *pg);
 int     cl_page_is_vmlocked  (const struct lu_env *env,
                               const struct cl_page *pg);
-void    cl_page_export       (const struct lu_env *env, struct cl_page *pg);
+void    cl_page_export       (const struct lu_env *env,
+                              struct cl_page *pg, int uptodate);
 int     cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
                               struct cl_page *page);
 loff_t  cl_offset            (const struct cl_object *obj, pgoff_t idx);
index 780c2b8..eb0ab9c 100644 (file)
@@ -41,6 +41,8 @@
 #error Do not #include this file directly. #include <obd.h> instead
 #endif
 
+#include <obd_support.h>
+
 #ifdef __KERNEL__
 # include <linux/fs.h>
 # include <linux/list.h>
 # endif
 #endif
 
-typedef spinlock_t client_obd_lock_t;
+typedef struct {
+        spinlock_t          lock;
+        unsigned long       time;
+        struct task_struct *task;
+        const char         *func;
+        int                 line;
+} client_obd_lock_t;
 
 static inline void client_obd_list_lock_init(client_obd_lock_t *lock)
 {
-        spin_lock_init(lock);
+        spin_lock_init(&lock->lock);
 }
 
 static inline void client_obd_list_lock_done(client_obd_lock_t *lock)
 {}
 
-static inline void client_obd_list_lock(client_obd_lock_t *lock)
+static inline void __client_obd_list_lock(client_obd_lock_t *lock,
+                                          const char *func,
+                                          int line)
 {
-        spin_lock(lock);
+        unsigned long cur = jiffies;
+        while (1) {
+                if (spin_trylock(&lock->lock)) {
+                        LASSERT(lock->task == NULL);
+                        lock->task = current;
+                        lock->func = func;
+                        lock->line = line;
+                        lock->time = jiffies;
+                        break;
+                }
+
+                if ((jiffies - cur > 5 * HZ) &&
+                    (jiffies - lock->time > 5 * HZ)) {
+                        LCONSOLE_WARN("LOCK UP! the lock %p was acquired"
+                                      " by <%s:%d:%s:%d> %lu time, I'm %s:%d\n",
+                                      lock, lock->task->comm, lock->task->pid,
+                                      lock->func, lock->line,
+                                      (jiffies - lock->time),
+                                      current->comm, current->pid);
+                        LCONSOLE_WARN("====== for process holding the "
+                                      "lock =====\n");
+                        libcfs_debug_dumpstack(lock->task);
+                        LCONSOLE_WARN("====== for current process =====\n");
+                        libcfs_debug_dumpstack(NULL);
+                        LCONSOLE_WARN("====== end =======\n");
+                        cfs_pause(1000* HZ);
+                }
+        }
 }
 
+#define client_obd_list_lock(lock) \
+        __client_obd_list_lock(lock, __FUNCTION__, __LINE__)
+
 static inline void client_obd_list_unlock(client_obd_lock_t *lock)
 {
-        spin_unlock(lock);
+        LASSERT(lock->task != NULL);
+        lock->task = NULL;
+        lock->time = jiffies;
+        spin_unlock(&lock->lock);
+}
+
+static inline int client_obd_list_is_locked(client_obd_lock_t *lock)
+{
+        return spin_is_locked(&lock->lock);
 }
 
 #if defined(__KERNEL__) && !defined(HAVE_ADLER)
index c9ff428..28f5b07 100644 (file)
@@ -85,9 +85,10 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
 {
         struct ll_inode_info *lli = ll_i2info(inode);
         struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+        ENTRY;
+
         spin_lock(&lli->lli_lock);
         lli->lli_flags |= flags;
-        ENTRY;
 
         if ((lli->lli_flags & LLIF_DONE_WRITING) &&
             list_empty(&club->cob_pending_list)) {
index e398f2c..bbdfb86 100644 (file)
@@ -642,7 +642,7 @@ static int vvp_io_read_page(const struct lu_env *env,
 
         if (cp->cpg_defer_uptodate) {
                 cp->cpg_ra_used = 1;
-                cl_page_export(env, page);
+                cl_page_export(env, page, 1);
         }
         /*
          * Add page into the queue even when it is marked uptodate above.
@@ -732,7 +732,7 @@ static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
                  * details. -jay
                  */
                 if (result == 0)
-                        cl_page_export(env, pg);
+                        cl_page_export(env, pg, 1);
         }
         return result;
 }
@@ -857,7 +857,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
         if (result == 0) {
                 if (size > i_size_read(inode))
                         i_size_write(inode, size);
-                cl_page_export(env, pg);
+                cl_page_export(env, pg, 1);
         } else if (size > i_size_read(inode))
                 cl_page_discard(env, io, pg);
         RETURN(result);
index c57ce84..7285045 100644 (file)
@@ -99,6 +99,12 @@ static void vvp_page_own(const struct lu_env *env,
                                       vmpage, current->comm, current,
                                       vmpage->flags, io);
                         libcfs_debug_dumpstack(NULL);
+                        if (slice->cpl_page->cp_task) {
+                                cfs_task_t *tsk = slice->cpl_page->cp_task;
+                                LCONSOLE_WARN("The page was owned by %s\n",
+                                              tsk->comm);
+                                libcfs_debug_dumpstack(tsk);
+                        }
                         LCONSOLE_WARN("Reproduced bug #18881,please contact:"
                                "jay <jinshan.xiong@sun.com>, thanks\n");
 
@@ -202,13 +208,17 @@ static void vvp_page_delete(const struct lu_env *env,
 }
 
 static void vvp_page_export(const struct lu_env *env,
-                            const struct cl_page_slice *slice)
+                            const struct cl_page_slice *slice,
+                            int uptodate)
 {
         cfs_page_t *vmpage = cl2vm_page(slice);
 
         LASSERT(vmpage != NULL);
         LASSERT(PageLocked(vmpage));
-        SetPageUptodate(vmpage);
+        if (uptodate)
+                SetPageUptodate(vmpage);
+        else
+                ClearPageUptodate(vmpage);
 }
 
 static int vvp_page_is_vmlocked(const struct lu_env *env,
@@ -305,7 +315,7 @@ static void vvp_page_completion_read(const struct lu_env *env,
         if (ioret == 0)  {
                 /* XXX: do we need this for transient pages? */
                 if (!cp->cpg_defer_uptodate)
-                        cl_page_export(env, page);
+                        cl_page_export(env, page, 1);
         } else
                 cp->cpg_defer_uptodate = 0;
         vvp_page_completion_common(env, cp, ioret);
index f50d345..5d50f8a 100644 (file)
@@ -216,7 +216,7 @@ struct cl_page *lov_page_init_empty(const struct lu_env *env,
                 addr = cfs_kmap(vmpage);
                 memset(addr, 0, cl_page_size(obj));
                 cfs_kunmap(vmpage);
-                cl_page_export(env, page);
+                cl_page_export(env, page, 1);
                 result = 0;
         }
         RETURN(ERR_PTR(result));
index 72d3790..789b33c 100644 (file)
@@ -50,9 +50,9 @@ static int mdc_rd_max_rpcs_in_flight(char *page, char **start, off_t off,
         struct client_obd *cli = &dev->u.cli;
         int rc;
 
-        spin_lock(&cli->cl_loi_list_lock);
+        client_obd_list_lock(&cli->cl_loi_list_lock);
         rc = snprintf(page, count, "%u\n", cli->cl_max_rpcs_in_flight);
-        spin_unlock(&cli->cl_loi_list_lock);
+        client_obd_list_unlock(&cli->cl_loi_list_lock);
         return rc;
 }
 
@@ -70,9 +70,9 @@ static int mdc_wr_max_rpcs_in_flight(struct file *file, const char *buffer,
         if (val < 1 || val > MDC_MAX_RIF_MAX)
                 return -ERANGE;
 
-        spin_lock(&cli->cl_loi_list_lock);
+        client_obd_list_lock(&cli->cl_loi_list_lock);
         cli->cl_max_rpcs_in_flight = val;
-        spin_unlock(&cli->cl_loi_list_lock);
+        client_obd_list_unlock(&cli->cl_loi_list_lock);
 
         return count;
 }
index 48b753e..aea8c87 100644 (file)
@@ -493,9 +493,9 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
 {
         int rc;
         ENTRY;
-        spin_lock(&cli->cl_loi_list_lock);
+        client_obd_list_lock(&cli->cl_loi_list_lock);
         rc = list_empty(&mcw->mcw_entry);
-        spin_unlock(&cli->cl_loi_list_lock);
+        client_obd_list_unlock(&cli->cl_loi_list_lock);
         RETURN(rc);
 };
 
@@ -507,15 +507,15 @@ void mdc_enter_request(struct client_obd *cli)
         struct mdc_cache_waiter mcw;
         struct l_wait_info lwi = { 0 };
 
-        spin_lock(&cli->cl_loi_list_lock);
+        client_obd_list_lock(&cli->cl_loi_list_lock);
         if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
                 list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
                 cfs_waitq_init(&mcw.mcw_waitq);
-                spin_unlock(&cli->cl_loi_list_lock);
+                client_obd_list_unlock(&cli->cl_loi_list_lock);
                 l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw), &lwi);
         } else {
                 cli->cl_r_in_flight++;
-                spin_unlock(&cli->cl_loi_list_lock);
+                client_obd_list_unlock(&cli->cl_loi_list_lock);
         }
 }
 
@@ -524,7 +524,7 @@ void mdc_exit_request(struct client_obd *cli)
         struct list_head *l, *tmp;
         struct mdc_cache_waiter *mcw;
 
-        spin_lock(&cli->cl_loi_list_lock);
+        client_obd_list_lock(&cli->cl_loi_list_lock);
         cli->cl_r_in_flight--;
         list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
                 
@@ -540,5 +540,5 @@ void mdc_exit_request(struct client_obd *cli)
         }
         /* Empty waiting list? Decrease reqs in-flight number */
         
-        spin_unlock(&cli->cl_loi_list_lock);
+        client_obd_list_unlock(&cli->cl_loi_list_lock);
 }
index 7832f34..6b0a74e 100644 (file)
@@ -763,8 +763,17 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
                  * and head->coh_nesting == 1 check assumes two level top-sub
                  * hierarchy.
                  */
-                LASSERT(ergo(head->coh_nesting == 1 &&
-                             list_empty(&head->coh_locks), !head->coh_pages));
+                /*
+                 * The count of pages of this object may NOT be zero because
+                 * we don't cleanup the pages if they are in CPS_FREEING state.
+                 * See cl_page_gang_lookup().
+                 *
+                 * It is safe to leave the CPS_FREEING pages in cache w/o
+                 * a lock, because those page must not be uptodate.
+                 * See cl_page_delete0 for details.
+                 */
+                /* LASSERT(!ergo(head->coh_nesting == 1 &&
+                           list_empty(&head->coh_locks), !head->coh_pages)); */
                 spin_unlock(&head->coh_lock_guard);
                 /*
                  * From now on, no new references to this lock can be acquired
index 5dcf62d..2a53616 100644 (file)
@@ -837,6 +837,7 @@ static void cl_page_owner_clear(struct cl_page *page)
                         LASSERT(page->cp_owner->ci_owned_nr > 0);
                         page->cp_owner->ci_owned_nr--;
                         page->cp_owner = NULL;
+                        page->cp_task = NULL;
                 }
         }
         EXIT;
@@ -915,17 +916,22 @@ int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
         pg = cl_page_top(pg);
         io = cl_io_top(io);
 
-        cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_own));
-        PASSERT(env, pg, pg->cp_owner == NULL);
-        PASSERT(env, pg, pg->cp_req == NULL);
-        pg->cp_owner = io;
-        cl_page_owner_set(pg);
-        if (pg->cp_state != CPS_FREEING) {
-                cl_page_state_set(env, pg, CPS_OWNED);
-                result = 0;
-        } else {
-                cl_page_disown0(env, io, pg);
+        if (pg->cp_state == CPS_FREEING) {
                 result = -EAGAIN;
+        } else {
+                cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_own));
+                PASSERT(env, pg, pg->cp_owner == NULL);
+                PASSERT(env, pg, pg->cp_req == NULL);
+                pg->cp_owner = io;
+                pg->cp_task  = current;
+                cl_page_owner_set(pg);
+                if (pg->cp_state != CPS_FREEING) {
+                        cl_page_state_set(env, pg, CPS_OWNED);
+                        result = 0;
+                } else {
+                        cl_page_disown0(env, io, pg);
+                        result = -EAGAIN;
+                }
         }
         PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
         RETURN(result);
@@ -956,6 +962,7 @@ void cl_page_assume(const struct lu_env *env,
 
         cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
         pg->cp_owner = io;
+        pg->cp_task = current;
         cl_page_owner_set(pg);
         cl_page_state_set(env, pg, CPS_OWNED);
         EXIT;
@@ -1044,35 +1051,49 @@ EXPORT_SYMBOL(cl_page_discard);
 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
                             int radix)
 {
+        struct cl_page *tmp = pg;
+        ENTRY;
+
         PASSERT(env, pg, pg == cl_page_top(pg));
         PASSERT(env, pg, pg->cp_state != CPS_FREEING);
 
-        ENTRY;
         /*
          * Severe all ways to obtain new pointers to @pg.
          */
         cl_page_owner_clear(pg);
+
+        /* 
+         * unexport the page firstly before freeing it so that
+         * the page content is considered to be invalid.
+         * We have to do this because a CPS_FREEING cl_page may
+         * be NOT under the protection of a cl_lock.
+         * Afterwards, if this page is found by other threads, then this
+         * page will be forced to reread.
+         */
+        cl_page_export(env, pg, 0);
         cl_page_state_set0(env, pg, CPS_FREEING);
-        CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
-                       (const struct lu_env *, const struct cl_page_slice *));
+
         if (!radix)
                 /*
                  * !radix means that @pg is not yet in the radix tree, skip
                  * removing it.
                  */
-                pg = pg->cp_child;
-        for (; pg != NULL; pg = pg->cp_child) {
+                tmp = pg->cp_child;
+        for (; tmp != NULL; tmp = tmp->cp_child) {
                 void                    *value;
                 struct cl_object_header *hdr;
 
-                hdr = cl_object_header(pg->cp_obj);
+                hdr = cl_object_header(tmp->cp_obj);
                 spin_lock(&hdr->coh_page_guard);
-                value = radix_tree_delete(&hdr->coh_tree, pg->cp_index);
-                PASSERT(env, pg, value == pg);
-                PASSERT(env, pg, hdr->coh_pages > 0);
+                value = radix_tree_delete(&hdr->coh_tree, tmp->cp_index);
+                PASSERT(env, tmp, value == tmp);
+                PASSERT(env, tmp, hdr->coh_pages > 0);
                 hdr->coh_pages--;
                 spin_unlock(&hdr->coh_page_guard);
         }
+
+        CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
+                       (const struct lu_env *, const struct cl_page_slice *));
         EXIT;
 }
 
@@ -1133,17 +1154,17 @@ EXPORT_SYMBOL(cl_page_unmap);
  * Marks page up-to-date.
  *
  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to mark page as up-to-date. From
- * this moment on, page can be shown to the user space without Lustre being
- * notified, hence the name.
+ * layer responsible for VM interaction has to mark/clear page as up-to-date
+ * by the @uptodate argument.
  *
  * \see cl_page_operations::cpo_export()
  */
-void cl_page_export(const struct lu_env *env, struct cl_page *pg)
+void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
 {
         PINVRNT(env, pg, cl_page_invariant(pg));
         CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
-                       (const struct lu_env *, const struct cl_page_slice *));
+                       (const struct lu_env *,
+                        const struct cl_page_slice *, int), uptodate);
 }
 EXPORT_SYMBOL(cl_page_export);
 
index 29d119c..b880749 100644 (file)
 
 #include "osc_cl_internal.h"
 
+/* 
+ * Comment out osc_page_protected because it may sleep inside the
+ * the client_obd_list_lock.
+ * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
+ *   -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
+ *   -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
+ */
+#if 0
 static int osc_page_is_dlocked(const struct lu_env *env,
                                const struct osc_page *opg,
                                enum cl_lock_mode mode, int pending, int unref)
@@ -57,6 +65,8 @@ static int osc_page_is_dlocked(const struct lu_env *env,
         ldlm_mode_t             dlmmode;
         int                     flags;
 
+        might_sleep();
+
         info = osc_env_info(env);
         resname = &info->oti_resname;
         policy = &info->oti_policy;
@@ -131,6 +141,14 @@ static int osc_page_protected(const struct lu_env *env,
         }
         return result;
 }
+#else
+static int osc_page_protected(const struct lu_env *env,
+                              const struct osc_page *opg,
+                              enum cl_lock_mode mode, int unref)
+{
+        return 1;
+}
+#endif
 
 /*****************************************************************************
  *
index e6bbf9a..f2a163d 100644 (file)
@@ -811,7 +811,7 @@ static void osc_update_next_shrink(struct client_obd *cli)
 static void osc_consume_write_grant(struct client_obd *cli,
                                     struct brw_page *pga)
 {
-        LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock);
+        LASSERT(client_obd_list_is_locked(&cli->cl_loi_list_lock));
         LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
         atomic_inc(&obd_dirty_pages);
         cli->cl_dirty += CFS_PAGE_SIZE;
@@ -831,7 +831,7 @@ static void osc_release_write_grant(struct client_obd *cli,
         int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
         ENTRY;
 
-        LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock);
+        LASSERT(client_obd_list_is_locked(&cli->cl_loi_list_lock));
         if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
                 EXIT;
                 return;
@@ -2517,7 +2517,7 @@ osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
         req = osc_build_req(env, cli, &rpc_list, page_count, cmd);
         if (IS_ERR(req)) {
                 LASSERT(list_empty(&rpc_list));
-                loi_list_maint(cli, loi);
+                /* loi_list_maint(cli, loi); */
                 RETURN(PTR_ERR(req));
         }
 
@@ -3354,7 +3354,7 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
         rc = mode;
         if (mode == LCK_PR)
                 rc |= LCK_PW;
-        rc = ldlm_lock_match(obd->obd_namespace, lflags | LDLM_FL_LVB_READY,
+        rc = ldlm_lock_match(obd->obd_namespace, lflags,
                              res_id, type, policy, rc, lockh, unref);
         if (rc) {
                 if (data != NULL)