Whamcloud - gitweb
LU-1299 clio: a combo patch to fix cl_lock
authorJinshan Xiong <jinshan.xiong@intel.com>
Fri, 7 Dec 2012 18:34:24 +0000 (10:34 -0800)
committerOleg Drokin <green@whamcloud.com>
Wed, 12 Dec 2012 02:26:50 +0000 (21:26 -0500)
In this patch, the following issues are fixed:
* redefine cl_lock_error(). There are two kinds of error when
  enqueuing a cl_lock: internal errors return from server side and
  local error for example interrupted by signal when waiting for a
  lock. Obviously the latter one is not fatal so we shouldn't err the
  lock out;
* for fault handling, only signal SIGKILL and SIGTERM is not allowed;
* handle return code of ll_fault correctly;
* redefine ->unuse() to release resource it held if enqueuing lock
  failed;
* in cl_lock_hold_release(), only CLS_CACHED mode lock will be cached,
  for locks in other states, they will be cancelled voluntarily;
* fix the problem that cfs_block_sigs() is wrongly used as
  cfs_restore_sigs() and implementation of cfs_block_sigs() was wrong
* Unuse method releases the underneath resources even error occurs, so
  we should set the lock to be CLS_NEW anyway.
* other minor fixes.

Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Change-Id: Iffce8be356723781b8f33ec9bdc2cf73e9e07138
Reviewed-on: http://review.whamcloud.com/2574
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Lai Siyao <laisiyao@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
15 files changed:
libcfs/include/libcfs/libcfs.h
libcfs/libcfs/darwin/darwin-prim.c
libcfs/libcfs/linux/linux-prim.c
libcfs/libcfs/user-prim.c
lustre/include/cl_object.h
lustre/include/lustre_lib.h
lustre/include/obd_support.h
lustre/llite/llite_mmap.c
lustre/lov/lov_lock.c
lustre/lov/lovsub_lock.c
lustre/obdclass/cl_lock.c
lustre/obdclass/obd_mount.c
lustre/osc/osc_lock.c
lustre/ptlrpc/client.c
lustre/tests/sanity.sh

index 863ca3a..edef5e7 100644 (file)
@@ -226,7 +226,7 @@ void cfs_daemonize(char *str);
 int cfs_daemonize_ctxt(char *str);
 cfs_sigset_t cfs_get_blocked_sigs(void);
 cfs_sigset_t cfs_block_allsigs(void);
-cfs_sigset_t cfs_block_sigs(cfs_sigset_t bits);
+cfs_sigset_t cfs_block_sigs(unsigned long sigs);
 cfs_sigset_t cfs_block_sigsinv(unsigned long sigs);
 void cfs_restore_sigs(cfs_sigset_t);
 int cfs_signal_pending(void);
index 9c7b2e5..f02a97a 100644 (file)
@@ -375,12 +375,12 @@ cfs_sigset_t cfs_block_allsigs()
         return old;
 }
 
-cfs_sigset_t cfs_block_sigs(sigset_t bit)
+cfs_sigset_t cfs_block_sigs(unsigned long sigs)
 {
         cfs_sigset_t    old = 0;
 #ifdef __DARWIN8__
 #else
-        block_procsigmask(current_proc(), bit);
+        block_procsigmask(current_proc(), sigs);
 #endif
         return old;
 }
index 1b59e0b..08fc837 100644 (file)
@@ -310,30 +310,28 @@ cfs_block_allsigs(void)
         return old;
 }
 
-sigset_t
-cfs_block_sigs(sigset_t bits)
+sigset_t cfs_block_sigs(unsigned long sigs)
 {
         unsigned long  flags;
         sigset_t        old;
 
         SIGNAL_MASK_LOCK(current, flags);
         old = current->blocked;
-        current->blocked = bits;
+       sigaddsetmask(&current->blocked, sigs);
         RECALC_SIGPENDING;
         SIGNAL_MASK_UNLOCK(current, flags);
         return old;
 }
 
 /* Block all signals except for the @sigs */
-cfs_sigset_t
-cfs_block_sigsinv(unsigned long sigs)
+sigset_t cfs_block_sigsinv(unsigned long sigs)
 {
         unsigned long flags;
-        cfs_sigset_t old;
+        sigset_t old;
 
         SIGNAL_MASK_LOCK(current, flags);
         old = current->blocked;
-        siginitsetinv(&current->blocked, sigs);
+       sigaddsetmask(&current->blocked, ~sigs);
         RECALC_SIGPENDING;
         SIGNAL_MASK_UNLOCK(current, flags);
 
index ec2447e..2b451f2 100644 (file)
@@ -287,18 +287,19 @@ cfs_sigset_t cfs_block_allsigs(void)
         int            rc;
 
         sigfillset(&all);
-        rc = sigprocmask(SIG_SETMASK, &all, &old);
+        rc = sigprocmask(SIG_BLOCK, &all, &old);
         LASSERT(rc == 0);
 
         return old;
 }
 
-cfs_sigset_t cfs_block_sigs(cfs_sigset_t blocks)
+cfs_sigset_t cfs_block_sigs(unsigned long sigs)
 {
         cfs_sigset_t   old;
+       cfs_sigset_t   blocks = { { sigs } }; /* kludge */
         int   rc;
 
-        rc = sigprocmask(SIG_SETMASK, &blocks, &old);
+        rc = sigprocmask(SIG_BLOCK, &blocks, &old);
         LASSERT (rc == 0);
 
         return old;
index 66b1658..0da530e 100644 (file)
@@ -1797,6 +1797,14 @@ do {                                                                    \
         }                                                               \
 } while (0)
 
+#define CL_LOCK_ASSERT(expr, env, lock) do {                            \
+        if (likely(expr))                                               \
+                break;                                                  \
+                                                                        \
+        CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr);    \
+        LBUG();                                                         \
+} while (0)
+
 /** @} cl_lock */
 
 /** \addtogroup cl_page_list cl_page_list
index 305e69a..7c71629 100644 (file)
@@ -756,7 +756,7 @@ do {                                                                           \
                 }                                                              \
         }                                                                      \
                                                                                \
-        cfs_block_sigs(__blocked);                                             \
+        cfs_restore_sigs(__blocked);                                           \
                                                                                \
         cfs_set_current_state(CFS_TASK_RUNNING);                               \
         cfs_waitq_del(&wq, &__wait);                                           \
index e4457f9..5863332 100644 (file)
@@ -423,6 +423,7 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
 
 #define OBD_FAIL_LLITE                              0x1400
 #define OBD_FAIL_LLITE_FAULT_TRUNC_RACE             0x1401
+#define OBD_FAIL_LOCK_STATE_WAIT_INTR               0x1402
 
 /* Assign references to moved code to reduce code changes */
 #define OBD_FAIL_PRECHECK(id)                   CFS_FAIL_PRECHECK(id)
index 4424382..2f4772d 100644 (file)
@@ -215,6 +215,7 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
         unsigned long           ra_flags;
         pgoff_t                 pg_offset;
         int                     result;
+        cfs_sigset_t            set;
         ENTRY;
 
         pg_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
@@ -230,13 +231,20 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
         vio->u.fault.ft_vma            = vma;
         vio->u.fault.nopage.ft_address = address;
         vio->u.fault.nopage.ft_type    = type;
+        vio->u.fault.ft_vmpage         = NULL;
 
+        set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
         result = cl_io_loop(env, io);
+        cfs_restore_sigs(set);
+
+        page = vio->u.fault.ft_vmpage;
+        if (result != 0 && page != NULL) {
+                page_cache_release(page);
+                page = NOPAGE_SIGBUS;
+        }
 
 out_err:
-        if (result == 0)
-                page = vio->u.fault.ft_vmpage;
-        else if (result == -ENOMEM)
+        if (result == -ENOMEM)
                 page = NOPAGE_OOM;
 
         vma->vm_flags &= ~VM_RAND_READ;
@@ -248,6 +256,26 @@ out_err:
         RETURN(page);
 }
 #else
+
+static inline int to_fault_error(int result)
+{
+        switch(result) {
+        case 0:
+                result = VM_FAULT_LOCKED;
+                break;
+        case -EFAULT:
+                result = VM_FAULT_NOPAGE;
+                break;
+        case -ENOMEM:
+                result = VM_FAULT_OOM;
+                break;
+        default:
+                result = VM_FAULT_SIGBUS;
+                break;
+        }
+        return result;
+}
+
 /**
  * Lustre implementation of a vm_operations_struct::fault() method, called by
  * VM to server page fault (both in kernel and user space).
@@ -264,6 +292,7 @@ int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
         struct lu_env           *env;
         struct cl_io            *io;
         struct vvp_io           *vio = NULL;
+        struct page             *vmpage;
         unsigned long            ra_flags;
         struct cl_env_nest       nest;
         int                      result;
@@ -272,29 +301,30 @@ int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
 
         io = ll_fault_io_init(vma, &env,  &nest, vmf->pgoff, &ra_flags);
         if (IS_ERR(io))
-                RETURN(VM_FAULT_ERROR);
+                RETURN(to_fault_error(PTR_ERR(io)));
 
         result = io->ci_result;
-        if (result < 0)
-                goto out_err;
-
-        vio = vvp_env_io(env);
-        vio->u.fault.ft_vma       = vma;
-        vio->u.fault.ft_vmpage    = NULL;
-        vio->u.fault.fault.ft_vmf = vmf;
-
-        result = cl_io_loop(env, io);
-        fault_ret = vio->u.fault.fault.ft_flags;
-
-out_err:
-        if ((result != 0) && !(fault_ret & VM_FAULT_RETRY))
-                fault_ret |= VM_FAULT_ERROR;
+        if (result == 0) {
+                vio = vvp_env_io(env);
+                vio->u.fault.ft_vma       = vma;
+                vio->u.fault.ft_vmpage    = NULL;
+                vio->u.fault.fault.ft_vmf = vmf;
 
-        vma->vm_flags |= ra_flags;
+                result = cl_io_loop(env, io);
 
+                fault_ret = vio->u.fault.fault.ft_flags;
+                vmpage = vio->u.fault.ft_vmpage;
+                if (result != 0 && vmpage != NULL) {
+                        page_cache_release(vmpage);
+                        vmf->page = NULL;
+                }
+        }
         cl_io_fini(env, io);
         cl_env_nested_put(&nest, env);
 
+        vma->vm_flags |= ra_flags;
+        if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
+                fault_ret |= to_fault_error(result);
         RETURN(fault_ret);
 }
 
@@ -303,6 +333,12 @@ int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         int count = 0;
         bool printed = false;
         int result;
+        cfs_sigset_t set;
+
+        /* Only SIGKILL and SIGTERM is allowed for fault/nopage
+         * so that it can be killed by admin but not cause segfault by
+         * other signals. */
+        set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
 
 restart:
         result = ll_fault0(vma, vmf);
@@ -329,6 +365,7 @@ restart:
 
                 result |= VM_FAULT_LOCKED;
         }
+        cfs_restore_sigs(set);
         return result;
 }
 #endif
index 10189e7..b9b308f 100644 (file)
@@ -697,7 +697,6 @@ static int lov_lock_unuse(const struct lu_env *env,
                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
                 if (rc == 0) {
                         if (lls->sub_flags & LSF_HELD) {
-                                LASSERT(sublock->cll_state == CLS_HELD);
                                 rc = cl_unuse_try(subenv->lse_env, sublock);
                                 rc = lov_sublock_release(env, lck, i, 0, rc);
                         }
@@ -750,17 +749,9 @@ static void lov_lock_cancel(const struct lu_env *env,
 
                         switch(sublock->cll_state) {
                         case CLS_HELD:
-                                rc = cl_unuse_try(subenv->lse_env,
-                                                  sublock);
+                                rc = cl_unuse_try(subenv->lse_env, sublock);
                                 lov_sublock_release(env, lck, i, 0, 0);
                                 break;
-                        case CLS_ENQUEUED:
-                                /* TODO: it's not a good idea to cancel this
-                                 * lock because it's innocent. But it's
-                                 * acceptable. The better way would be to
-                                 * define a new lock method to unhold the
-                                 * dlm lock. */
-                                cl_lock_cancel(env, sublock);
                         default:
                                 lov_sublock_release(env, lck, i, 1, 0);
                                 break;
index 4810da4..583e516 100644 (file)
@@ -301,6 +301,11 @@ static int lovsub_lock_delete_one(const struct lu_env *env,
 
         result = 0;
         switch (parent->cll_state) {
+        case CLS_ENQUEUED:
+                /* See LU-1355 for the case that a glimpse lock is
+                 * interrupted by signal */
+                LASSERT(parent->cll_flags & CLF_CANCELLED);
+                break;
         case CLS_QUEUING:
         case CLS_FREEING:
                 cl_lock_signal(env, parent);
@@ -376,7 +381,6 @@ static int lovsub_lock_delete_one(const struct lu_env *env,
                         }
                 }
                 break;
-        case CLS_ENQUEUED:
         case CLS_HELD:
                 CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n");
         default:
index 5fd0aeb..ff9ffec 100644 (file)
@@ -597,7 +597,6 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
         struct cl_object_header *head;
         struct cl_object        *obj;
         struct cl_lock          *lock;
-        int ok;
 
         obj  = need->cld_obj;
         head = cl_object_header(obj);
@@ -612,20 +611,18 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
         cl_lock_mutex_get(env, lock);
         if (lock->cll_state == CLS_INTRANSIT)
                 cl_lock_state_wait(env, lock); /* Don't care return value. */
-        if (lock->cll_state == CLS_CACHED) {
-                int result;
-                result = cl_use_try(env, lock, 1);
-                if (result < 0)
-                        cl_lock_error(env, lock, result);
-        }
-        ok = lock->cll_state == CLS_HELD;
-        if (ok) {
-                cl_lock_hold_add(env, lock, scope, source);
-                cl_lock_user_add(env, lock);
+        cl_lock_hold_add(env, lock, scope, source);
+        cl_lock_user_add(env, lock);
+        if (lock->cll_state == CLS_CACHED)
+                cl_use_try(env, lock, 1);
+        if (lock->cll_state == CLS_HELD) {
+                cl_lock_mutex_put(env, lock);
+                cl_lock_lockdep_acquire(env, lock, 0);
                 cl_lock_put(env, lock);
-        }
-        cl_lock_mutex_put(env, lock);
-        if (!ok) {
+        } else {
+                cl_unuse_try(env, lock);
+                cl_lock_unhold(env, lock, scope, source);
+                cl_lock_mutex_put(env, lock);
                 cl_lock_put(env, lock);
                 lock = NULL;
         }
@@ -907,8 +904,10 @@ static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
         lu_ref_del(&lock->cll_holders, scope, source);
         cl_lock_hold_mod(env, lock, -1);
         if (lock->cll_holds == 0) {
+                CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
                 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
-                    lock->cll_descr.cld_mode == CLM_GROUP)
+                    lock->cll_descr.cld_mode == CLM_GROUP ||
+                    lock->cll_state != CLS_CACHED)
                         /*
                          * If lock is still phantom or grouplock when user is
                          * done with it---destroy the lock.
@@ -973,12 +972,17 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
                 cl_lock_mutex_put(env, lock);
 
                 LASSERT(cl_lock_nr_mutexed(env) == 0);
-                cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
+
+                result = -EINTR;
+                if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
+                        cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
+                        if (!cfs_signal_pending())
+                                result = 0;
+                }
 
                 cl_lock_mutex_get(env, lock);
                 cfs_set_current_state(CFS_TASK_RUNNING);
                 cfs_waitq_del(&lock->cll_wq, &waiter);
-                result = cfs_signal_pending() ? -EINTR : 0;
 
                 /* Restore old blocked signals */
                 cfs_restore_sigs(blocked);
@@ -1188,12 +1192,12 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
         ENTRY;
         cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
         do {
-                result = 0;
-
                 LINVRNT(cl_lock_is_mutexed(lock));
 
-                if (lock->cll_error != 0)
+                result = lock->cll_error;
+                if (result != 0)
                         break;
+
                 switch (lock->cll_state) {
                 case CLS_NEW:
                         cl_lock_state_set(env, lock, CLS_QUEUING);
@@ -1226,9 +1230,7 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
                         LBUG();
                 }
         } while (result == CLO_REPEAT);
-        if (result < 0)
-                cl_lock_error(env, lock, result);
-        RETURN(result ?: lock->cll_error);
+        RETURN(result);
 }
 EXPORT_SYMBOL(cl_enqueue_try);
 
@@ -1257,6 +1259,7 @@ int cl_lock_enqueue_wait(const struct lu_env *env,
         LASSERT(cl_lock_nr_mutexed(env) == 0);
 
         cl_lock_mutex_get(env, conflict);
+        cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
         cl_lock_cancel(env, conflict);
         cl_lock_delete(env, conflict);
 
@@ -1301,10 +1304,8 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
                 }
                 break;
         } while (1);
-        if (result != 0) {
-                cl_lock_user_del(env, lock);
-                cl_lock_error(env, lock, result);
-        }
+        if (result != 0)
+                cl_unuse_try(env, lock);
         LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
                      lock->cll_state == CLS_HELD));
         RETURN(result);
@@ -1341,13 +1342,11 @@ EXPORT_SYMBOL(cl_enqueue);
 /**
  * Tries to unlock a lock.
  *
- * This function is called repeatedly by cl_unuse() until either lock is
- * unlocked, or error occurs.
- * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
- *
- * \pre  lock->cll_state == CLS_HELD
+ * This function is called to release underlying resource:
+ * 1. for top lock, the resource is sublocks it held;
+ * 2. for sublock, the resource is the reference to dlmlock.
  *
- * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
+ * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
  *
  * \see cl_unuse() cl_lock_operations::clo_unuse()
  * \see cl_lock_state::CLS_CACHED
@@ -1360,12 +1359,18 @@ int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
         ENTRY;
         cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
 
-        LASSERT(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED);
         if (lock->cll_users > 1) {
                 cl_lock_user_del(env, lock);
                 RETURN(0);
         }
 
+        /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
+         * underlying resources. */
+        if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
+                cl_lock_user_del(env, lock);
+                RETURN(0);
+        }
+
         /*
          * New lock users (->cll_users) are not protecting unlocking
          * from proceeding. From this point, lock eventually reaches
@@ -1405,29 +1410,16 @@ int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
                  */
                 result = 0;
         } else {
-                CERROR("result = %d, this is unlikely!\n", result);
-                cl_lock_extransit(env, lock, state);
+                CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
+                /* Set the lock state to CLS_NEW so it will be destroyed.
+                 * In lov_lock_unuse() it will release sublocks even if error
+                 * occurs. */
+                cl_lock_extransit(env, lock, CLS_NEW);
         }
-
-        result = result ?: lock->cll_error;
-        if (result < 0)
-                cl_lock_error(env, lock, result);
-        RETURN(result);
+        RETURN(result ?: lock->cll_error);
 }
 EXPORT_SYMBOL(cl_unuse_try);
 
-static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
-{
-        int result;
-        ENTRY;
-
-        result = cl_unuse_try(env, lock);
-        if (result)
-                CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
-
-        EXIT;
-}
-
 /**
  * Unlocks a lock.
  */
@@ -1435,7 +1427,7 @@ void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
 {
         ENTRY;
         cl_lock_mutex_get(env, lock);
-        cl_unuse_locked(env, lock);
+        cl_unuse_try(env, lock);
         cl_lock_mutex_put(env, lock);
         cl_lock_lockdep_release(env, lock);
         EXIT;
@@ -1468,8 +1460,8 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
                 LASSERT(lock->cll_users > 0);
                 LASSERT(lock->cll_holds > 0);
 
-                result = 0;
-                if (lock->cll_error != 0)
+                result = lock->cll_error;
+                if (result != 0)
                         break;
 
                 if (cl_lock_is_intransit(lock)) {
@@ -1495,7 +1487,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
                         cl_lock_state_set(env, lock, CLS_HELD);
                 }
         } while (result == CLO_REPEAT);
-        RETURN(result ?: lock->cll_error);
+        RETURN(result);
 }
 EXPORT_SYMBOL(cl_wait_try);
 
@@ -1530,8 +1522,7 @@ int cl_wait(const struct lu_env *env, struct cl_lock *lock)
                 break;
         } while (1);
         if (result < 0) {
-                cl_lock_user_del(env, lock);
-                cl_lock_error(env, lock, result);
+                cl_unuse_try(env, lock);
                 cl_lock_lockdep_release(env, lock);
         }
         cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
@@ -1799,8 +1790,8 @@ void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
         LINVRNT(cl_lock_invariant(env, lock));
 
         ENTRY;
-        cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
         if (lock->cll_error == 0 && error != 0) {
+                cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
                 lock->cll_error = error;
                 cl_lock_signal(env, lock);
                 cl_lock_cancel(env, lock);
@@ -2171,7 +2162,7 @@ struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
                                                                 lock, enqflags);
                                         break;
                                 }
-                                cl_unuse_locked(env, lock);
+                                cl_unuse_try(env, lock);
                         }
                         cl_lock_trace(D_DLMTRACE, env, "enqueue failed", lock);
                         cl_lock_hold_release(env, lock, scope, source);
index 24d4dc2..1d14021 100644 (file)
@@ -1449,7 +1449,7 @@ static void server_wait_finished(struct vfsmount *mnt)
                        (atomic_read(&mnt->mnt_count) == 1),
                        cfs_time_seconds(3),
                        rc);
-               cfs_block_sigs(blocked);
+               cfs_restore_sigs(blocked);
                if (rc < 0) {
                        LCONSOLE_EMERG("Danger: interrupted umount %s with "
                                       "%d refs!\n", mnt->mnt_devname,
index 0e4c083..e30e309 100644 (file)
@@ -196,14 +196,21 @@ static int osc_lock_unuse(const struct lu_env *env,
 {
         struct osc_lock *ols = cl2osc_lock(slice);
 
+        LINVRNT(osc_lock_invariant(ols));
         LASSERT(ols->ols_state == OLS_GRANTED ||
+                ols->ols_state == OLS_ENQUEUED ||
                 ols->ols_state == OLS_UPCALL_RECEIVED);
-        LINVRNT(osc_lock_invariant(ols));
+
+        if (ols->ols_state == OLS_ENQUEUED) {
+                ols->ols_state = OLS_CANCELLED;
+                return 0;
+        }
 
         if (ols->ols_glimpse) {
                 LASSERT(ols->ols_hold == 0);
                 return 0;
         }
+
         LASSERT(ols->ols_hold);
 
         /*
@@ -1472,6 +1479,9 @@ static int osc_lock_fits_into(const struct lu_env *env,
         if (need->cld_enq_flags & CEF_NEVER)
                 return 0;
 
+        if (ols->ols_state >= OLS_CANCELLED)
+                return 0;
+
         if (need->cld_mode == CLM_PHANTOM) {
                 /*
                  * Note: the QUEUED lock can't be matched here, otherwise
index 09374c4..af5ca76 100644 (file)
@@ -2068,7 +2068,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
                          * reentrant from userspace again */
                         if (cfs_signal_pending())
                                 ptlrpc_interrupted_set(set);
-                        cfs_block_sigs(blocked_sigs);
+                        cfs_restore_sigs(blocked_sigs);
                 }
 
                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
index d393e5e..a933988 100644 (file)
@@ -8555,6 +8555,18 @@ test_222b() { # LU-1039, MRP-303
 }
 run_test 222b "Don't panic on bulk IO failure"
 
+# LU-1299 Executing or running ldd on a truncated executable does not
+# cause an out-of-memory condition.
+test_222() {
+        dd if=`which date` of=$MOUNT/date bs=1k count=1
+        chmod +x $MOUNT/date
+
+        $MOUNT/date > /dev/null
+        ldd $MOUNT/date > /dev/null
+        rm -f $MOUNT/date
+}
+run_test 222 "running truncated executable does not cause OOM"
+
 #
 # tests that do cleanup/setup should be run at the end
 #