Whamcloud - gitweb
b=22147 do not reset owner for lockless lock.
[fs/lustre-release.git] / lustre / osc / osc_lock.c
index 8cb79b1..26a2d2d 100644 (file)
@@ -135,10 +135,10 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
 {
         struct ldlm_lock *dlmlock;
 
-        spin_lock(&osc_ast_guard);
+        cfs_spin_lock(&osc_ast_guard);
         dlmlock = olck->ols_lock;
         if (dlmlock == NULL) {
-                spin_unlock(&osc_ast_guard);
+                cfs_spin_unlock(&osc_ast_guard);
                 return;
         }
 
@@ -147,7 +147,7 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
          * call to osc_lock_detach() */
         dlmlock->l_ast_data = NULL;
         olck->ols_handle.cookie = 0ULL;
-        spin_unlock(&osc_ast_guard);
+        cfs_spin_unlock(&osc_ast_guard);
 
         lock_res_and_lock(dlmlock);
         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
@@ -276,14 +276,14 @@ static int osc_enq2ldlm_flags(__u32 enqflags)
  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
  * pointers. Initialized in osc_init().
  */
-spinlock_t osc_ast_guard;
+cfs_spinlock_t osc_ast_guard;
 
 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
 {
         struct osc_lock *olck;
 
         lock_res_and_lock(dlm_lock);
-        spin_lock(&osc_ast_guard);
+        cfs_spin_lock(&osc_ast_guard);
         olck = dlm_lock->l_ast_data;
         if (olck != NULL) {
                 struct cl_lock *lock = olck->ols_cl.cls_lock;
@@ -303,7 +303,7 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
                 } else
                         olck = NULL;
         }
-        spin_unlock(&osc_ast_guard);
+        cfs_spin_unlock(&osc_ast_guard);
         unlock_res_and_lock(dlm_lock);
         return olck;
 }
@@ -451,11 +451,11 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
         LASSERT(dlmlock != NULL);
 
         lock_res_and_lock(dlmlock);
-        spin_lock(&osc_ast_guard);
+        cfs_spin_lock(&osc_ast_guard);
         LASSERT(dlmlock->l_ast_data == olck);
         LASSERT(olck->ols_lock == NULL);
         olck->ols_lock = dlmlock;
-        spin_unlock(&osc_ast_guard);
+        cfs_spin_unlock(&osc_ast_guard);
 
         /*
          * Lock might be not yet granted. In this case, completion ast
@@ -515,11 +515,11 @@ static int osc_lock_upcall(void *cookie, int errcode)
                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
                         if (dlmlock != NULL) {
                                 lock_res_and_lock(dlmlock);
-                                spin_lock(&osc_ast_guard);
+                                cfs_spin_lock(&osc_ast_guard);
                                 LASSERT(olck->ols_lock == NULL);
                                 dlmlock->l_ast_data = NULL;
                                 olck->ols_handle.cookie = 0ULL;
-                                spin_unlock(&osc_ast_guard);
+                                cfs_spin_unlock(&osc_ast_guard);
                                 unlock_res_and_lock(dlmlock);
                                 LDLM_LOCK_PUT(dlmlock);
                         }
@@ -890,7 +890,7 @@ static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
         unsigned long            weight;
         ENTRY;
 
-        might_sleep();
+        cfs_might_sleep();
         /*
          * osc_ldlm_weigh_ast has a complex context since it might be called
          * because of lock canceling, or from user's input. We have to make
@@ -988,14 +988,14 @@ static void osc_lock_to_lockless(const struct lu_env *env,
                         io->ci_lockreq == CILR_NEVER);
 
                 ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
-                ols->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
+                ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
                                 (io->ci_lockreq == CILR_MAYBE) &&
                                 (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
                 if (io->ci_lockreq == CILR_NEVER ||
                         /* lockless IO */
                     (ols->ols_locklessable && osc_object_is_contended(oob)) ||
                         /* lockless truncate */
-                    (io->ci_type == CIT_TRUNC &&
+                    (cl_io_is_trunc(io) &&
                      (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
                       osd->od_lockless_truncate)) {
                         ols->ols_locklessable = 1;
@@ -1020,13 +1020,6 @@ static int osc_lock_compatible(const struct osc_lock *qing,
         return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
 }
 
-#ifndef list_for_each_entry_continue 
-#define list_for_each_entry_continue(pos, head, member)                 \
-        for (pos = list_entry(pos->member.next, typeof(*pos), member);  \
-             prefetch(pos->member.next), &pos->member != (head);        \
-             pos = list_entry(pos->member.next, typeof(*pos), member))
-#endif
-
 /**
  * Cancel all conflicting locks and wait for them to be destroyed.
  *
@@ -1058,8 +1051,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
         if (olck->ols_glimpse)
                 return 0;
 
-        spin_lock(&hdr->coh_lock_guard);
-        list_for_each_entry_continue(scan, &hdr->coh_locks, cll_linkage) {
+        cfs_spin_lock(&hdr->coh_lock_guard);
+        cfs_list_for_each_entry_continue(scan, &hdr->coh_locks, cll_linkage) {
                 struct cl_lock_descr *cld = &scan->cll_descr;
                 const struct osc_lock *scan_ols;
 
@@ -1101,7 +1094,7 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
                 conflict = scan;
                 break;
         }
-        spin_unlock(&hdr->coh_lock_guard);
+        cfs_spin_unlock(&hdr->coh_lock_guard);
 
         if (conflict) {
                 CDEBUG(D_DLMTRACE, "lock %p is confliced with %p, will wait\n",
@@ -1115,68 +1108,9 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
 }
 
 /**
- * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
- *
- *     - Thread0: obtains PR:[0, 10]. Lock is busy.
- *
- *     - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
- *       PR:[0, 10], but cancellation of busy lock is postponed.
- *
- *     - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
- *       PW:[5, 50], and thread0 waits for the lock completion never
- *       releasing PR:[0, 10]---deadlock.
- *
- * The second PR lock can be glimpse (it is to deal with that situation that
- * ll_glimpse_size() has second argument, preventing local match of
- * not-yet-granted locks, see bug 10295). Similar situation is possible in the
- * case of memory mapped user level buffer.
- *
- * To prevent this we can detect a situation when current "thread" or "io"
- * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
- * the ols->ols_flags, or prevent local match with PW locks.
- */
-static int osc_deadlock_is_possible(const struct lu_env *env,
-                                    struct cl_lock *lock)
-{
-        struct cl_object        *obj;
-        struct cl_object_header *head;
-        struct cl_lock          *scan;
-        struct osc_io           *oio;
-
-        int result;
-
-        ENTRY;
-
-        LASSERT(cl_lock_is_mutexed(lock));
-
-        oio  = osc_env_io(env);
-        obj  = lock->cll_descr.cld_obj;
-        head = cl_object_header(obj);
-
-        result = 0;
-        spin_lock(&head->coh_lock_guard);
-        list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
-                if (scan != lock) {
-                        struct osc_lock *oscan;
-
-                        oscan = osc_lock_at(scan);
-                        LASSERT(oscan != NULL);
-                        if (oscan->ols_owner == oio) {
-                                result = 1;
-                                break;
-                        }
-                }
-        }
-        spin_unlock(&head->coh_lock_guard);
-        RETURN(result);
-}
-
-/**
  * Implementation of cl_lock_operations::clo_enqueue() method for osc
  * layer. This initiates ldlm enqueue:
  *
- *     - checks for possible dead-lock conditions (osc_deadlock_is_possible());
- *
  *     - cancels conflicting locks early (osc_lock_enqueue_wait());
  *
  *     - calls osc_enqueue_base() to do actual enqueue.
@@ -1208,8 +1142,6 @@ static int osc_lock_enqueue(const struct lu_env *env,
         osc_lock_build_res(env, obj, resname);
         osc_lock_build_policy(env, lock, policy);
         ols->ols_flags = osc_enq2ldlm_flags(enqflags);
-        if (osc_deadlock_is_possible(env, lock))
-                ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
                 ols->ols_glimpse = 1;
         if (!(enqflags & CEF_MUST))
@@ -1246,6 +1178,7 @@ static int osc_lock_enqueue(const struct lu_env *env,
                         }
                 } else {
                         ols->ols_state = OLS_GRANTED;
+                        ols->ols_owner = osc_env_io(env);
                 }
         }
         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
@@ -1413,12 +1346,13 @@ static int osc_lock_has_pages(struct osc_lock *olck)
                 plist = &osc_env_info(env)->oti_plist;
                 cl_page_list_init(plist);
 
-                mutex_lock(&oob->oo_debug_mutex);
+                cfs_mutex_lock(&oob->oo_debug_mutex);
 
                 io->ci_obj = cl_object_top(obj);
                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
                 cl_page_gang_lookup(env, obj, io,
-                                    descr->cld_start, descr->cld_end, plist, 0);
+                                    descr->cld_start, descr->cld_end, plist, 0,
+                                    NULL);
                 cl_lock_page_list_fixup(env, io, lock, plist);
                 if (plist->pl_nr > 0) {
                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
@@ -1429,7 +1363,7 @@ static int osc_lock_has_pages(struct osc_lock *olck)
                 cl_page_list_disown(env, io, plist);
                 cl_page_list_fini(env, plist);
                 cl_io_fini(env, io);
-                mutex_unlock(&oob->oo_debug_mutex);
+                cfs_mutex_unlock(&oob->oo_debug_mutex);
                 cl_env_nested_put(&nest, env);
         } else
                 result = 0;
@@ -1476,13 +1410,14 @@ static void osc_lock_state(const struct lu_env *env,
                            enum cl_lock_state state)
 {
         struct osc_lock *lock = cl2osc_lock(slice);
-        struct osc_io   *oio  = osc_env_io(env);
 
         /*
          * XXX multiple io contexts can use the lock at the same time.
          */
         LINVRNT(osc_lock_invariant(lock));
         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
+                struct osc_io *oio = osc_env_io(env);
+
                 LASSERT(lock->ols_owner == NULL);
                 lock->ols_owner = oio;
         } else if (state != CLS_HELD)
@@ -1529,20 +1464,17 @@ static int osc_lock_fits_into(const struct lu_env *env,
                  *     will not release sublock1. Bang!
                  */
                 if (ols->ols_state < OLS_GRANTED ||
-                        ols->ols_state > OLS_RELEASED)
+                    ols->ols_state > OLS_RELEASED)
                         return 0;
         } else if (need->cld_enq_flags & CEF_MUST) {
-                 /*
+                /*
                  * If the lock hasn't ever enqueued, it can't be matched
                  * because enqueue process brings in many information
                  * which can be used to determine things such as lockless,
                  * CEF_MUST, etc.
                  */
-                if (ols->ols_state < OLS_GRANTED ||
-                        ols->ols_state > OLS_RELEASED)
-                        return 0;
                 if (ols->ols_state < OLS_UPCALL_RECEIVED &&
-                        ols->ols_locklessable)
+                    ols->ols_locklessable)
                         return 0;
         }
         return 1;
@@ -1614,19 +1546,19 @@ static void osc_lock_lockless_state(const struct lu_env *env,
                                     enum cl_lock_state state)
 {
         struct osc_lock *lock = cl2osc_lock(slice);
-        struct osc_io   *oio  = osc_env_io(env);
 
         LINVRNT(osc_lock_invariant(lock));
         if (state == CLS_HELD) {
-                LASSERT(lock->ols_owner == NULL);
+                struct osc_io *oio  = osc_env_io(env);
+
+                LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
                 lock->ols_owner = oio;
 
                 /* set the io to be lockless if this lock is for io's
                  * host object */
                 if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
                         oio->oi_lockless = 1;
-        } else
-                lock->ols_owner = NULL;
+        }
 }
 
 static int osc_lock_lockless_fits_into(const struct lu_env *env,
@@ -1634,7 +1566,13 @@ static int osc_lock_lockless_fits_into(const struct lu_env *env,
                                        const struct cl_lock_descr *need,
                                        const struct cl_io *io)
 {
-        return 0;
+        struct osc_lock *lock = cl2osc_lock(slice);
+
+        if (!(need->cld_enq_flags & CEF_NEVER))
+                return 0;
+
+        /* lockless lock should only be used by its owning io. b22147 */
+        return (lock->ols_owner == osc_env_io(env));
 }
 
 static const struct cl_lock_operations osc_lock_lockless_ops = {