- * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
- *
- * - Thread0: obtains PR:[0, 10]. Lock is busy.
- *
- * - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
- * PR:[0, 10], but cancellation of busy lock is postponed.
- *
- * - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
- * PW:[5, 50], and thread0 waits for the lock completion never
- * releasing PR:[0, 10]---deadlock.
- *
- * The second PR lock can be glimpse (it is to deal with that situation that
- * ll_glimpse_size() has second argument, preventing local match of
- * not-yet-granted locks, see bug 10295). Similar situation is possible in the
- * case of memory mapped user level buffer.
- *
- * To prevent this we can detect a situation when current "thread" or "io"
- * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
- * the ols->ols_flags, or prevent local match with PW locks.
- */
-static int osc_deadlock_is_possible(const struct lu_env *env,
- struct cl_lock *lock)
-{
- struct cl_object *obj;
- struct cl_object_header *head;
- struct cl_lock *scan;
- struct osc_io *oio;
-
- int result;
-
- ENTRY;
-
- LASSERT(cl_lock_is_mutexed(lock));
-
- oio = osc_env_io(env);
- obj = lock->cll_descr.cld_obj;
- head = cl_object_header(obj);
-
- result = 0;
- cfs_spin_lock(&head->coh_lock_guard);
- cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
- if (scan != lock) {
- struct osc_lock *oscan;
-
- oscan = osc_lock_at(scan);
- LASSERT(oscan != NULL);
- if (oscan->ols_owner == oio) {
- result = 1;
- break;
- }
- }
- }
- cfs_spin_unlock(&head->coh_lock_guard);
- RETURN(result);
-}
-
-/**