Whamcloud - gitweb
LU-1346 libcfs: replace libcfs wrappers with kernel API
[fs/lustre-release.git] / lustre / obdclass / cl_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Extent Lock.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre_fid.h>
46 #include <libcfs/list.h>
47 /* lu_time_global_{init,fini}() */
48 #include <lu_time.h>
49
50 #include <cl_object.h>
51 #include "cl_internal.h"
52
53 /** Lock class of cl_lock::cll_guard */
54 static struct lock_class_key cl_lock_guard_class;
55 static cfs_mem_cache_t *cl_lock_kmem;
56
57 static struct lu_kmem_descr cl_lock_caches[] = {
58         {
59                 .ckd_cache = &cl_lock_kmem,
60                 .ckd_name  = "cl_lock_kmem",
61                 .ckd_size  = sizeof (struct cl_lock)
62         },
63         {
64                 .ckd_cache = NULL
65         }
66 };
67
68 /**
69  * Basic lock invariant that is maintained at all times. Caller either has a
70  * reference to \a lock, or somehow assures that \a lock cannot be freed.
71  *
72  * \see cl_lock_invariant()
73  */
74 static int cl_lock_invariant_trusted(const struct lu_env *env,
75                                      const struct cl_lock *lock)
76 {
77         return  ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
78                 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
79                 lock->cll_holds >= lock->cll_users &&
80                 lock->cll_holds >= 0 &&
81                 lock->cll_users >= 0 &&
82                 lock->cll_depth >= 0;
83 }
84
85 /**
86  * Stronger lock invariant, checking that caller has a reference on a lock.
87  *
88  * \see cl_lock_invariant_trusted()
89  */
90 static int cl_lock_invariant(const struct lu_env *env,
91                              const struct cl_lock *lock)
92 {
93         int result;
94
95         result = cfs_atomic_read(&lock->cll_ref) > 0 &&
96                 cl_lock_invariant_trusted(env, lock);
97         if (!result && env != NULL)
98                 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
99         return result;
100 }
101
102 /**
103  * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
104  */
105 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
106 {
107         return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
108 }
109
110 /**
111  * Returns a set of counters for this lock, depending on a lock nesting.
112  */
113 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
114                                                    const struct cl_lock *lock)
115 {
116         struct cl_thread_info *info;
117         enum clt_nesting_level nesting;
118
119         info = cl_env_info(env);
120         nesting = cl_lock_nesting(lock);
121         LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
122         return &info->clt_counters[nesting];
123 }
124
125 static void cl_lock_trace0(int level, const struct lu_env *env,
126                            const char *prefix, const struct cl_lock *lock,
127                            const char *func, const int line)
128 {
129         struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
130         CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
131                       "(%p/%d/%d) at %s():%d\n",
132                prefix, lock, cfs_atomic_read(&lock->cll_ref),
133                lock->cll_guarder, lock->cll_depth,
134                lock->cll_state, lock->cll_error, lock->cll_holds,
135                lock->cll_users, lock->cll_flags,
136                env, h->coh_nesting, cl_lock_nr_mutexed(env),
137                func, line);
138 }
139 #define cl_lock_trace(level, env, prefix, lock)                         \
140         cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
141
142 #define RETIP ((unsigned long)__builtin_return_address(0))
143
144 #ifdef CONFIG_LOCKDEP
145 static struct lock_class_key cl_lock_key;
146
147 static void cl_lock_lockdep_init(struct cl_lock *lock)
148 {
149         lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
150 }
151
152 static void cl_lock_lockdep_acquire(const struct lu_env *env,
153                                     struct cl_lock *lock, __u32 enqflags)
154 {
155         cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
156 #ifdef HAVE_LOCK_MAP_ACQUIRE
157         lock_map_acquire(&lock->dep_map);
158 #else  /* HAVE_LOCK_MAP_ACQUIRE */
159         lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
160                      /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
161                      /* check: */ 2, RETIP);
162 #endif /* HAVE_LOCK_MAP_ACQUIRE */
163 }
164
165 static void cl_lock_lockdep_release(const struct lu_env *env,
166                                     struct cl_lock *lock)
167 {
168         cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
169         lock_release(&lock->dep_map, 0, RETIP);
170 }
171
172 #else /* !CONFIG_LOCKDEP */
173
174 static void cl_lock_lockdep_init(struct cl_lock *lock)
175 {}
176 static void cl_lock_lockdep_acquire(const struct lu_env *env,
177                                     struct cl_lock *lock, __u32 enqflags)
178 {}
179 static void cl_lock_lockdep_release(const struct lu_env *env,
180                                     struct cl_lock *lock)
181 {}
182
183 #endif /* !CONFIG_LOCKDEP */
184
185 /**
186  * Adds lock slice to the compound lock.
187  *
188  * This is called by cl_object_operations::coo_lock_init() methods to add a
189  * per-layer state to the lock. New state is added at the end of
190  * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
191  *
192  * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
193  */
194 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
195                        struct cl_object *obj,
196                        const struct cl_lock_operations *ops)
197 {
198         ENTRY;
199         slice->cls_lock = lock;
200         cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
201         slice->cls_obj = obj;
202         slice->cls_ops = ops;
203         EXIT;
204 }
205 EXPORT_SYMBOL(cl_lock_slice_add);
206
207 /**
208  * Returns true iff a lock with the mode \a has provides at least the same
209  * guarantees as a lock with the mode \a need.
210  */
211 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
212 {
213         LINVRNT(need == CLM_READ || need == CLM_WRITE ||
214                 need == CLM_PHANTOM || need == CLM_GROUP);
215         LINVRNT(has == CLM_READ || has == CLM_WRITE ||
216                 has == CLM_PHANTOM || has == CLM_GROUP);
217         CLASSERT(CLM_PHANTOM < CLM_READ);
218         CLASSERT(CLM_READ < CLM_WRITE);
219         CLASSERT(CLM_WRITE < CLM_GROUP);
220
221         if (has != CLM_GROUP)
222                 return need <= has;
223         else
224                 return need == has;
225 }
226 EXPORT_SYMBOL(cl_lock_mode_match);
227
228 /**
229  * Returns true iff extent portions of lock descriptions match.
230  */
231 int cl_lock_ext_match(const struct cl_lock_descr *has,
232                       const struct cl_lock_descr *need)
233 {
234         return
235                 has->cld_start <= need->cld_start &&
236                 has->cld_end >= need->cld_end &&
237                 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
238                 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
239 }
240 EXPORT_SYMBOL(cl_lock_ext_match);
241
242 /**
243  * Returns true iff a lock with the description \a has provides at least the
244  * same guarantees as a lock with the description \a need.
245  */
246 int cl_lock_descr_match(const struct cl_lock_descr *has,
247                         const struct cl_lock_descr *need)
248 {
249         return
250                 cl_object_same(has->cld_obj, need->cld_obj) &&
251                 cl_lock_ext_match(has, need);
252 }
253 EXPORT_SYMBOL(cl_lock_descr_match);
254
255 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
256 {
257         struct cl_object *obj = lock->cll_descr.cld_obj;
258
259         LINVRNT(!cl_lock_is_mutexed(lock));
260
261         ENTRY;
262         cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
263         cfs_might_sleep();
264         while (!cfs_list_empty(&lock->cll_layers)) {
265                 struct cl_lock_slice *slice;
266
267                 slice = cfs_list_entry(lock->cll_layers.next,
268                                        struct cl_lock_slice, cls_linkage);
269                 cfs_list_del_init(lock->cll_layers.next);
270                 slice->cls_ops->clo_fini(env, slice);
271         }
272         cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
273         cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
274         lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
275         cl_object_put(env, obj);
276         lu_ref_fini(&lock->cll_reference);
277         lu_ref_fini(&lock->cll_holders);
278         mutex_destroy(&lock->cll_guard);
279         OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
280         EXIT;
281 }
282
283 /**
284  * Releases a reference on a lock.
285  *
286  * When last reference is released, lock is returned to the cache, unless it
287  * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
288  * immediately.
289  *
290  * \see cl_object_put(), cl_page_put()
291  */
292 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
293 {
294         struct cl_object        *obj;
295         struct cl_site          *site;
296
297         LINVRNT(cl_lock_invariant(env, lock));
298         ENTRY;
299         obj = lock->cll_descr.cld_obj;
300         LINVRNT(obj != NULL);
301         site = cl_object_site(obj);
302
303         CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
304                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
305
306         if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
307                 if (lock->cll_state == CLS_FREEING) {
308                         LASSERT(cfs_list_empty(&lock->cll_linkage));
309                         cl_lock_free(env, lock);
310                 }
311                 cfs_atomic_dec(&site->cs_locks.cs_busy);
312         }
313         EXIT;
314 }
315 EXPORT_SYMBOL(cl_lock_put);
316
317 /**
318  * Acquires an additional reference to a lock.
319  *
320  * This can be called only by caller already possessing a reference to \a
321  * lock.
322  *
323  * \see cl_object_get(), cl_page_get()
324  */
325 void cl_lock_get(struct cl_lock *lock)
326 {
327         LINVRNT(cl_lock_invariant(NULL, lock));
328         CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
329                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
330         cfs_atomic_inc(&lock->cll_ref);
331 }
332 EXPORT_SYMBOL(cl_lock_get);
333
334 /**
335  * Acquires a reference to a lock.
336  *
337  * This is much like cl_lock_get(), except that this function can be used to
338  * acquire initial reference to the cached lock. Caller has to deal with all
339  * possible races. Use with care!
340  *
341  * \see cl_page_get_trust()
342  */
343 void cl_lock_get_trust(struct cl_lock *lock)
344 {
345         struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
346
347         CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
348                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
349         if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
350                 cfs_atomic_inc(&site->cs_locks.cs_busy);
351 }
352 EXPORT_SYMBOL(cl_lock_get_trust);
353
354 /**
355  * Helper function destroying the lock that wasn't completely initialized.
356  *
357  * Other threads can acquire references to the top-lock through its
358  * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
359  */
360 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
361 {
362         cl_lock_mutex_get(env, lock);
363         cl_lock_cancel(env, lock);
364         cl_lock_delete(env, lock);
365         cl_lock_mutex_put(env, lock);
366         cl_lock_put(env, lock);
367 }
368
369 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
370                                      struct cl_object *obj,
371                                      const struct cl_io *io,
372                                      const struct cl_lock_descr *descr)
373 {
374         struct cl_lock          *lock;
375         struct lu_object_header *head;
376         struct cl_site          *site = cl_object_site(obj);
377
378         ENTRY;
379         OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
380         if (lock != NULL) {
381                 cfs_atomic_set(&lock->cll_ref, 1);
382                 lock->cll_descr = *descr;
383                 lock->cll_state = CLS_NEW;
384                 cl_object_get(obj);
385                 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
386                                                       "cl_lock", lock);
387                 CFS_INIT_LIST_HEAD(&lock->cll_layers);
388                 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
389                 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
390                 lu_ref_init(&lock->cll_reference);
391                 lu_ref_init(&lock->cll_holders);
392                 mutex_init(&lock->cll_guard);
393                 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
394                 cfs_waitq_init(&lock->cll_wq);
395                 head = obj->co_lu.lo_header;
396                 cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
397                 cfs_atomic_inc(&site->cs_locks.cs_total);
398                 cfs_atomic_inc(&site->cs_locks.cs_created);
399                 cl_lock_lockdep_init(lock);
400                 cfs_list_for_each_entry(obj, &head->loh_layers,
401                                         co_lu.lo_linkage) {
402                         int err;
403
404                         err = obj->co_ops->coo_lock_init(env, obj, lock, io);
405                         if (err != 0) {
406                                 cl_lock_finish(env, lock);
407                                 lock = ERR_PTR(err);
408                                 break;
409                         }
410                 }
411         } else
412                 lock = ERR_PTR(-ENOMEM);
413         RETURN(lock);
414 }
415
416 /**
417  * Transfer the lock into INTRANSIT state and return the original state.
418  *
419  * \pre  state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
420  * \post state: CLS_INTRANSIT
421  * \see CLS_INTRANSIT
422  */
423 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
424                                      struct cl_lock *lock)
425 {
426         enum cl_lock_state state = lock->cll_state;
427
428         LASSERT(cl_lock_is_mutexed(lock));
429         LASSERT(state != CLS_INTRANSIT);
430         LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
431                  "Malformed lock state %d.\n", state);
432
433         cl_lock_state_set(env, lock, CLS_INTRANSIT);
434         lock->cll_intransit_owner = cfs_current();
435         cl_lock_hold_add(env, lock, "intransit", cfs_current());
436         return state;
437 }
438 EXPORT_SYMBOL(cl_lock_intransit);
439
440 /**
441  *  Exit the intransit state and restore the lock state to the original state
442  */
443 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
444                        enum cl_lock_state state)
445 {
446         LASSERT(cl_lock_is_mutexed(lock));
447         LASSERT(lock->cll_state == CLS_INTRANSIT);
448         LASSERT(state != CLS_INTRANSIT);
449         LASSERT(lock->cll_intransit_owner == cfs_current());
450
451         lock->cll_intransit_owner = NULL;
452         cl_lock_state_set(env, lock, state);
453         cl_lock_unhold(env, lock, "intransit", cfs_current());
454 }
455 EXPORT_SYMBOL(cl_lock_extransit);
456
457 /**
458  * Checking whether the lock is intransit state
459  */
460 int cl_lock_is_intransit(struct cl_lock *lock)
461 {
462         LASSERT(cl_lock_is_mutexed(lock));
463         return lock->cll_state == CLS_INTRANSIT &&
464                lock->cll_intransit_owner != cfs_current();
465 }
466 EXPORT_SYMBOL(cl_lock_is_intransit);
467 /**
468  * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
469  * truncate and O_APPEND cannot be reused for read/non-append-write, as they
470  * cover multiple stripes and can trigger cascading timeouts.
471  */
472 static int cl_lock_fits_into(const struct lu_env *env,
473                              const struct cl_lock *lock,
474                              const struct cl_lock_descr *need,
475                              const struct cl_io *io)
476 {
477         const struct cl_lock_slice *slice;
478
479         LINVRNT(cl_lock_invariant_trusted(env, lock));
480         ENTRY;
481         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
482                 if (slice->cls_ops->clo_fits_into != NULL &&
483                     !slice->cls_ops->clo_fits_into(env, slice, need, io))
484                         RETURN(0);
485         }
486         RETURN(1);
487 }
488
489 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
490                                       struct cl_object *obj,
491                                       const struct cl_io *io,
492                                       const struct cl_lock_descr *need)
493 {
494         struct cl_lock          *lock;
495         struct cl_object_header *head;
496         struct cl_site          *site;
497
498         ENTRY;
499
500         head = cl_object_header(obj);
501         site = cl_object_site(obj);
502         LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
503         cfs_atomic_inc(&site->cs_locks.cs_lookup);
504         cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
505                 int matched;
506
507                 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
508                           lock->cll_state < CLS_FREEING &&
509                           lock->cll_error == 0 &&
510                           !(lock->cll_flags & CLF_CANCELLED) &&
511                           cl_lock_fits_into(env, lock, need, io);
512                 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
513                        PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
514                        matched);
515                 if (matched) {
516                         cl_lock_get_trust(lock);
517                         cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
518                         RETURN(lock);
519                 }
520         }
521         RETURN(NULL);
522 }
523
524 /**
525  * Returns a lock matching description \a need.
526  *
527  * This is the main entry point into the cl_lock caching interface. First, a
528  * cache (implemented as a per-object linked list) is consulted. If lock is
529  * found there, it is returned immediately. Otherwise new lock is allocated
530  * and returned. In any case, additional reference to lock is acquired.
531  *
532  * \see cl_object_find(), cl_page_find()
533  */
534 static struct cl_lock *cl_lock_find(const struct lu_env *env,
535                                     const struct cl_io *io,
536                                     const struct cl_lock_descr *need)
537 {
538         struct cl_object_header *head;
539         struct cl_object        *obj;
540         struct cl_lock          *lock;
541         struct cl_site          *site;
542
543         ENTRY;
544
545         obj  = need->cld_obj;
546         head = cl_object_header(obj);
547         site = cl_object_site(obj);
548
549         spin_lock(&head->coh_lock_guard);
550         lock = cl_lock_lookup(env, obj, io, need);
551         spin_unlock(&head->coh_lock_guard);
552
553         if (lock == NULL) {
554                 lock = cl_lock_alloc(env, obj, io, need);
555                 if (!IS_ERR(lock)) {
556                         struct cl_lock *ghost;
557
558                         spin_lock(&head->coh_lock_guard);
559                         ghost = cl_lock_lookup(env, obj, io, need);
560                         if (ghost == NULL) {
561                                 cfs_list_add_tail(&lock->cll_linkage,
562                                                   &head->coh_locks);
563                                 spin_unlock(&head->coh_lock_guard);
564                                 cfs_atomic_inc(&site->cs_locks.cs_busy);
565                         } else {
566                                 spin_unlock(&head->coh_lock_guard);
567                                 /*
568                                  * Other threads can acquire references to the
569                                  * top-lock through its sub-locks. Hence, it
570                                  * cannot be cl_lock_free()-ed immediately.
571                                  */
572                                 cl_lock_finish(env, lock);
573                                 lock = ghost;
574                         }
575                 }
576         }
577         RETURN(lock);
578 }
579
580 /**
581  * Returns existing lock matching given description. This is similar to
582  * cl_lock_find() except that no new lock is created, and returned lock is
583  * guaranteed to be in enum cl_lock_state::CLS_HELD state.
584  */
585 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
586                              const struct cl_lock_descr *need,
587                              const char *scope, const void *source)
588 {
589         struct cl_object_header *head;
590         struct cl_object        *obj;
591         struct cl_lock          *lock;
592
593         obj  = need->cld_obj;
594         head = cl_object_header(obj);
595
596         do {
597                 spin_lock(&head->coh_lock_guard);
598                 lock = cl_lock_lookup(env, obj, io, need);
599                 spin_unlock(&head->coh_lock_guard);
600                 if (lock == NULL)
601                         return NULL;
602
603                 cl_lock_mutex_get(env, lock);
604                 if (lock->cll_state == CLS_INTRANSIT)
605                         /* Don't care return value. */
606                         cl_lock_state_wait(env, lock);
607                 if (lock->cll_state == CLS_FREEING) {
608                         cl_lock_mutex_put(env, lock);
609                         cl_lock_put(env, lock);
610                         lock = NULL;
611                 }
612         } while (lock == NULL);
613
614         cl_lock_hold_add(env, lock, scope, source);
615         cl_lock_user_add(env, lock);
616         if (lock->cll_state == CLS_CACHED)
617                 cl_use_try(env, lock, 1);
618         if (lock->cll_state == CLS_HELD) {
619                 cl_lock_mutex_put(env, lock);
620                 cl_lock_lockdep_acquire(env, lock, 0);
621                 cl_lock_put(env, lock);
622         } else {
623                 cl_unuse_try(env, lock);
624                 cl_lock_unhold(env, lock, scope, source);
625                 cl_lock_mutex_put(env, lock);
626                 cl_lock_put(env, lock);
627                 lock = NULL;
628         }
629
630         return lock;
631 }
632 EXPORT_SYMBOL(cl_lock_peek);
633
634 /**
635  * Returns a slice within a lock, corresponding to the given layer in the
636  * device stack.
637  *
638  * \see cl_page_at()
639  */
640 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
641                                        const struct lu_device_type *dtype)
642 {
643         const struct cl_lock_slice *slice;
644
645         LINVRNT(cl_lock_invariant_trusted(NULL, lock));
646         ENTRY;
647
648         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
649                 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
650                         RETURN(slice);
651         }
652         RETURN(NULL);
653 }
654 EXPORT_SYMBOL(cl_lock_at);
655
656 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
657 {
658         struct cl_thread_counters *counters;
659
660         counters = cl_lock_counters(env, lock);
661         lock->cll_depth++;
662         counters->ctc_nr_locks_locked++;
663         lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
664         cl_lock_trace(D_TRACE, env, "got mutex", lock);
665 }
666
667 /**
668  * Locks cl_lock object.
669  *
670  * This is used to manipulate cl_lock fields, and to serialize state
671  * transitions in the lock state machine.
672  *
673  * \post cl_lock_is_mutexed(lock)
674  *
675  * \see cl_lock_mutex_put()
676  */
677 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
678 {
679         LINVRNT(cl_lock_invariant(env, lock));
680
681         if (lock->cll_guarder == cfs_current()) {
682                 LINVRNT(cl_lock_is_mutexed(lock));
683                 LINVRNT(lock->cll_depth > 0);
684         } else {
685                 struct cl_object_header *hdr;
686                 struct cl_thread_info   *info;
687                 int i;
688
689                 LINVRNT(lock->cll_guarder != cfs_current());
690                 hdr = cl_object_header(lock->cll_descr.cld_obj);
691                 /*
692                  * Check that mutices are taken in the bottom-to-top order.
693                  */
694                 info = cl_env_info(env);
695                 for (i = 0; i < hdr->coh_nesting; ++i)
696                         LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
697                 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
698                 lock->cll_guarder = cfs_current();
699                 LINVRNT(lock->cll_depth == 0);
700         }
701         cl_lock_mutex_tail(env, lock);
702 }
703 EXPORT_SYMBOL(cl_lock_mutex_get);
704
705 /**
706  * Try-locks cl_lock object.
707  *
708  * \retval 0 \a lock was successfully locked
709  *
710  * \retval -EBUSY \a lock cannot be locked right now
711  *
712  * \post ergo(result == 0, cl_lock_is_mutexed(lock))
713  *
714  * \see cl_lock_mutex_get()
715  */
716 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
717 {
718         int result;
719
720         LINVRNT(cl_lock_invariant_trusted(env, lock));
721         ENTRY;
722
723         result = 0;
724         if (lock->cll_guarder == cfs_current()) {
725                 LINVRNT(lock->cll_depth > 0);
726                 cl_lock_mutex_tail(env, lock);
727         } else if (mutex_trylock(&lock->cll_guard)) {
728                 LINVRNT(lock->cll_depth == 0);
729                 lock->cll_guarder = cfs_current();
730                 cl_lock_mutex_tail(env, lock);
731         } else
732                 result = -EBUSY;
733         RETURN(result);
734 }
735 EXPORT_SYMBOL(cl_lock_mutex_try);
736
737 /**
738  {* Unlocks cl_lock object.
739  *
740  * \pre cl_lock_is_mutexed(lock)
741  *
742  * \see cl_lock_mutex_get()
743  */
744 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
745 {
746         struct cl_thread_counters *counters;
747
748         LINVRNT(cl_lock_invariant(env, lock));
749         LINVRNT(cl_lock_is_mutexed(lock));
750         LINVRNT(lock->cll_guarder == cfs_current());
751         LINVRNT(lock->cll_depth > 0);
752
753         counters = cl_lock_counters(env, lock);
754         LINVRNT(counters->ctc_nr_locks_locked > 0);
755
756         cl_lock_trace(D_TRACE, env, "put mutex", lock);
757         lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
758         counters->ctc_nr_locks_locked--;
759         if (--lock->cll_depth == 0) {
760                 lock->cll_guarder = NULL;
761                 mutex_unlock(&lock->cll_guard);
762         }
763 }
764 EXPORT_SYMBOL(cl_lock_mutex_put);
765
766 /**
767  * Returns true iff lock's mutex is owned by the current thread.
768  */
769 int cl_lock_is_mutexed(struct cl_lock *lock)
770 {
771         return lock->cll_guarder == cfs_current();
772 }
773 EXPORT_SYMBOL(cl_lock_is_mutexed);
774
775 /**
776  * Returns number of cl_lock mutices held by the current thread (environment).
777  */
778 int cl_lock_nr_mutexed(const struct lu_env *env)
779 {
780         struct cl_thread_info *info;
781         int i;
782         int locked;
783
784         /*
785          * NOTE: if summation across all nesting levels (currently 2) proves
786          *       too expensive, a summary counter can be added to
787          *       struct cl_thread_info.
788          */
789         info = cl_env_info(env);
790         for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
791                 locked += info->clt_counters[i].ctc_nr_locks_locked;
792         return locked;
793 }
794 EXPORT_SYMBOL(cl_lock_nr_mutexed);
795
796 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
797 {
798         LINVRNT(cl_lock_is_mutexed(lock));
799         LINVRNT(cl_lock_invariant(env, lock));
800         ENTRY;
801         if (!(lock->cll_flags & CLF_CANCELLED)) {
802                 const struct cl_lock_slice *slice;
803
804                 lock->cll_flags |= CLF_CANCELLED;
805                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
806                                                 cls_linkage) {
807                         if (slice->cls_ops->clo_cancel != NULL)
808                                 slice->cls_ops->clo_cancel(env, slice);
809                 }
810         }
811         EXIT;
812 }
813
814 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
815 {
816         struct cl_object_header    *head;
817         const struct cl_lock_slice *slice;
818
819         LINVRNT(cl_lock_is_mutexed(lock));
820         LINVRNT(cl_lock_invariant(env, lock));
821
822         ENTRY;
823         if (lock->cll_state < CLS_FREEING) {
824                 LASSERT(lock->cll_state != CLS_INTRANSIT);
825                 cl_lock_state_set(env, lock, CLS_FREEING);
826
827                 head = cl_object_header(lock->cll_descr.cld_obj);
828
829                 spin_lock(&head->coh_lock_guard);
830                 cfs_list_del_init(&lock->cll_linkage);
831                 spin_unlock(&head->coh_lock_guard);
832
833                 /*
834                  * From now on, no new references to this lock can be acquired
835                  * by cl_lock_lookup().
836                  */
837                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
838                                                 cls_linkage) {
839                         if (slice->cls_ops->clo_delete != NULL)
840                                 slice->cls_ops->clo_delete(env, slice);
841                 }
842                 /*
843                  * From now on, no new references to this lock can be acquired
844                  * by layer-specific means (like a pointer from struct
845                  * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
846                  * lov).
847                  *
848                  * Lock will be finally freed in cl_lock_put() when last of
849                  * existing references goes away.
850                  */
851         }
852         EXIT;
853 }
854
855 /**
856  * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
857  * top-lock (nesting == 0) accounts for this modification in the per-thread
858  * debugging counters. Sub-lock holds can be released by a thread different
859  * from one that acquired it.
860  */
861 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
862                              int delta)
863 {
864         struct cl_thread_counters *counters;
865         enum clt_nesting_level     nesting;
866
867         lock->cll_holds += delta;
868         nesting = cl_lock_nesting(lock);
869         if (nesting == CNL_TOP) {
870                 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
871                 counters->ctc_nr_held += delta;
872                 LASSERT(counters->ctc_nr_held >= 0);
873         }
874 }
875
876 /**
877  * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
878  * cl_lock_hold_mod() for the explanation of the debugging code.
879  */
880 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
881                              int delta)
882 {
883         struct cl_thread_counters *counters;
884         enum clt_nesting_level     nesting;
885
886         lock->cll_users += delta;
887         nesting = cl_lock_nesting(lock);
888         if (nesting == CNL_TOP) {
889                 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
890                 counters->ctc_nr_used += delta;
891                 LASSERT(counters->ctc_nr_used >= 0);
892         }
893 }
894
895 void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
896                           const char *scope, const void *source)
897 {
898         LINVRNT(cl_lock_is_mutexed(lock));
899         LINVRNT(cl_lock_invariant(env, lock));
900         LASSERT(lock->cll_holds > 0);
901
902         ENTRY;
903         cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
904         lu_ref_del(&lock->cll_holders, scope, source);
905         cl_lock_hold_mod(env, lock, -1);
906         if (lock->cll_holds == 0) {
907                 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
908                 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
909                     lock->cll_descr.cld_mode == CLM_GROUP ||
910                     lock->cll_state != CLS_CACHED)
911                         /*
912                          * If lock is still phantom or grouplock when user is
913                          * done with it---destroy the lock.
914                          */
915                         lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
916                 if (lock->cll_flags & CLF_CANCELPEND) {
917                         lock->cll_flags &= ~CLF_CANCELPEND;
918                         cl_lock_cancel0(env, lock);
919                 }
920                 if (lock->cll_flags & CLF_DOOMED) {
921                         /* no longer doomed: it's dead... Jim. */
922                         lock->cll_flags &= ~CLF_DOOMED;
923                         cl_lock_delete0(env, lock);
924                 }
925         }
926         EXIT;
927 }
928 EXPORT_SYMBOL(cl_lock_hold_release);
929
930 /**
931  * Waits until lock state is changed.
932  *
933  * This function is called with cl_lock mutex locked, atomically releases
934  * mutex and goes to sleep, waiting for a lock state change (signaled by
935  * cl_lock_signal()), and re-acquires the mutex before return.
936  *
937  * This function is used to wait until lock state machine makes some progress
938  * and to emulate synchronous operations on top of asynchronous lock
939  * interface.
940  *
941  * \retval -EINTR wait was interrupted
942  *
943  * \retval 0 wait wasn't interrupted
944  *
945  * \pre cl_lock_is_mutexed(lock)
946  *
947  * \see cl_lock_signal()
948  */
949 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
950 {
951         cfs_waitlink_t waiter;
952         cfs_sigset_t blocked;
953         int result;
954
955         ENTRY;
956         LINVRNT(cl_lock_is_mutexed(lock));
957         LINVRNT(cl_lock_invariant(env, lock));
958         LASSERT(lock->cll_depth == 1);
959         LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
960
961         cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
962         result = lock->cll_error;
963         if (result == 0) {
964                 /* To avoid being interrupted by the 'non-fatal' signals
965                  * (SIGCHLD, for instance), we'd block them temporarily.
966                  * LU-305 */
967                 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
968
969                 cfs_waitlink_init(&waiter);
970                 cfs_waitq_add(&lock->cll_wq, &waiter);
971                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
972                 cl_lock_mutex_put(env, lock);
973
974                 LASSERT(cl_lock_nr_mutexed(env) == 0);
975
976                 result = -EINTR;
977                 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
978                         cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
979                         if (!cfs_signal_pending())
980                                 result = 0;
981                 }
982
983                 cl_lock_mutex_get(env, lock);
984                 cfs_set_current_state(CFS_TASK_RUNNING);
985                 cfs_waitq_del(&lock->cll_wq, &waiter);
986
987                 /* Restore old blocked signals */
988                 cfs_restore_sigs(blocked);
989         }
990         RETURN(result);
991 }
992 EXPORT_SYMBOL(cl_lock_state_wait);
993
994 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
995                                  enum cl_lock_state state)
996 {
997         const struct cl_lock_slice *slice;
998
999         ENTRY;
1000         LINVRNT(cl_lock_is_mutexed(lock));
1001         LINVRNT(cl_lock_invariant(env, lock));
1002
1003         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
1004                 if (slice->cls_ops->clo_state != NULL)
1005                         slice->cls_ops->clo_state(env, slice, state);
1006         cfs_waitq_broadcast(&lock->cll_wq);
1007         EXIT;
1008 }
1009
1010 /**
1011  * Notifies waiters that lock state changed.
1012  *
1013  * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1014  * layers about state change by calling cl_lock_operations::clo_state()
1015  * top-to-bottom.
1016  */
1017 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1018 {
1019         ENTRY;
1020         cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1021         cl_lock_state_signal(env, lock, lock->cll_state);
1022         EXIT;
1023 }
1024 EXPORT_SYMBOL(cl_lock_signal);
1025
1026 /**
1027  * Changes lock state.
1028  *
1029  * This function is invoked to notify layers that lock state changed, possible
1030  * as a result of an asynchronous event such as call-back reception.
1031  *
1032  * \post lock->cll_state == state
1033  *
1034  * \see cl_lock_operations::clo_state()
1035  */
1036 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1037                        enum cl_lock_state state)
1038 {
1039         struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1040
1041         ENTRY;
1042         LASSERT(lock->cll_state <= state ||
1043                 (lock->cll_state == CLS_CACHED &&
1044                  (state == CLS_HELD || /* lock found in cache */
1045                   state == CLS_NEW  ||   /* sub-lock canceled */
1046                   state == CLS_INTRANSIT)) ||
1047                 /* lock is in transit state */
1048                 lock->cll_state == CLS_INTRANSIT);
1049
1050         if (lock->cll_state != state) {
1051                 cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
1052                 cfs_atomic_inc(&site->cs_locks_state[state]);
1053
1054                 cl_lock_state_signal(env, lock, state);
1055                 lock->cll_state = state;
1056         }
1057         EXIT;
1058 }
1059 EXPORT_SYMBOL(cl_lock_state_set);
1060
1061 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1062 {
1063         const struct cl_lock_slice *slice;
1064         int result;
1065
1066         do {
1067                 result = 0;
1068
1069                 LINVRNT(cl_lock_is_mutexed(lock));
1070                 LINVRNT(cl_lock_invariant(env, lock));
1071                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1072
1073                 result = -ENOSYS;
1074                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1075                                                 cls_linkage) {
1076                         if (slice->cls_ops->clo_unuse != NULL) {
1077                                 result = slice->cls_ops->clo_unuse(env, slice);
1078                                 if (result != 0)
1079                                         break;
1080                         }
1081                 }
1082                 LASSERT(result != -ENOSYS);
1083         } while (result == CLO_REPEAT);
1084
1085         return result;
1086 }
1087
1088 /**
1089  * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1090  * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1091  * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1092  *  use process atomic
1093  */
1094 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1095 {
1096         const struct cl_lock_slice *slice;
1097         int result;
1098         enum cl_lock_state state;
1099
1100         ENTRY;
1101         cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1102
1103         LASSERT(lock->cll_state == CLS_CACHED);
1104         if (lock->cll_error)
1105                 RETURN(lock->cll_error);
1106
1107         result = -ENOSYS;
1108         state = cl_lock_intransit(env, lock);
1109         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1110                 if (slice->cls_ops->clo_use != NULL) {
1111                         result = slice->cls_ops->clo_use(env, slice);
1112                         if (result != 0)
1113                                 break;
1114                 }
1115         }
1116         LASSERT(result != -ENOSYS);
1117
1118         LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1119                  lock->cll_state);
1120
1121         if (result == 0) {
1122                 state = CLS_HELD;
1123         } else {
1124                 if (result == -ESTALE) {
1125                         /*
1126                          * ESTALE means sublock being cancelled
1127                          * at this time, and set lock state to
1128                          * be NEW here and ask the caller to repeat.
1129                          */
1130                         state = CLS_NEW;
1131                         result = CLO_REPEAT;
1132                 }
1133
1134                 /* @atomic means back-off-on-failure. */
1135                 if (atomic) {
1136                         int rc;
1137                         rc = cl_unuse_try_internal(env, lock);
1138                         /* Vet the results. */
1139                         if (rc < 0 && result > 0)
1140                                 result = rc;
1141                 }
1142
1143         }
1144         cl_lock_extransit(env, lock, state);
1145         RETURN(result);
1146 }
1147 EXPORT_SYMBOL(cl_use_try);
1148
1149 /**
1150  * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1151  * top-to-bottom.
1152  */
1153 static int cl_enqueue_kick(const struct lu_env *env,
1154                            struct cl_lock *lock,
1155                            struct cl_io *io, __u32 flags)
1156 {
1157         int result;
1158         const struct cl_lock_slice *slice;
1159
1160         ENTRY;
1161         result = -ENOSYS;
1162         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1163                 if (slice->cls_ops->clo_enqueue != NULL) {
1164                         result = slice->cls_ops->clo_enqueue(env,
1165                                                              slice, io, flags);
1166                         if (result != 0)
1167                                 break;
1168                 }
1169         }
1170         LASSERT(result != -ENOSYS);
1171         RETURN(result);
1172 }
1173
1174 /**
1175  * Tries to enqueue a lock.
1176  *
1177  * This function is called repeatedly by cl_enqueue() until either lock is
1178  * enqueued, or error occurs. This function does not block waiting for
1179  * networking communication to complete.
1180  *
1181  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1182  *                         lock->cll_state == CLS_HELD)
1183  *
1184  * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1185  * \see cl_lock_state::CLS_ENQUEUED
1186  */
1187 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1188                    struct cl_io *io, __u32 flags)
1189 {
1190         int result;
1191
1192         ENTRY;
1193         cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1194         do {
1195                 LINVRNT(cl_lock_is_mutexed(lock));
1196
1197                 result = lock->cll_error;
1198                 if (result != 0)
1199                         break;
1200
1201                 switch (lock->cll_state) {
1202                 case CLS_NEW:
1203                         cl_lock_state_set(env, lock, CLS_QUEUING);
1204                         /* fall-through */
1205                 case CLS_QUEUING:
1206                         /* kick layers. */
1207                         result = cl_enqueue_kick(env, lock, io, flags);
1208                         /* For AGL case, the cl_lock::cll_state may
1209                          * become CLS_HELD already. */
1210                         if (result == 0 && lock->cll_state == CLS_QUEUING)
1211                                 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1212                         break;
1213                 case CLS_INTRANSIT:
1214                         LASSERT(cl_lock_is_intransit(lock));
1215                         result = CLO_WAIT;
1216                         break;
1217                 case CLS_CACHED:
1218                         /* yank lock from the cache. */
1219                         result = cl_use_try(env, lock, 0);
1220                         break;
1221                 case CLS_ENQUEUED:
1222                 case CLS_HELD:
1223                         result = 0;
1224                         break;
1225                 default:
1226                 case CLS_FREEING:
1227                         /*
1228                          * impossible, only held locks with increased
1229                          * ->cll_holds can be enqueued, and they cannot be
1230                          * freed.
1231                          */
1232                         LBUG();
1233                 }
1234         } while (result == CLO_REPEAT);
1235         RETURN(result);
1236 }
1237 EXPORT_SYMBOL(cl_enqueue_try);
1238
1239 /**
1240  * Cancel the conflicting lock found during previous enqueue.
1241  *
1242  * \retval 0 conflicting lock has been canceled.
1243  * \retval -ve error code.
1244  */
1245 int cl_lock_enqueue_wait(const struct lu_env *env,
1246                          struct cl_lock *lock,
1247                          int keep_mutex)
1248 {
1249         struct cl_lock  *conflict;
1250         int              rc = 0;
1251         ENTRY;
1252
1253         LASSERT(cl_lock_is_mutexed(lock));
1254         LASSERT(lock->cll_state == CLS_QUEUING);
1255         LASSERT(lock->cll_conflict != NULL);
1256
1257         conflict = lock->cll_conflict;
1258         lock->cll_conflict = NULL;
1259
1260         cl_lock_mutex_put(env, lock);
1261         LASSERT(cl_lock_nr_mutexed(env) == 0);
1262
1263         cl_lock_mutex_get(env, conflict);
1264         cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1265         cl_lock_cancel(env, conflict);
1266         cl_lock_delete(env, conflict);
1267
1268         while (conflict->cll_state != CLS_FREEING) {
1269                 rc = cl_lock_state_wait(env, conflict);
1270                 if (rc != 0)
1271                         break;
1272         }
1273         cl_lock_mutex_put(env, conflict);
1274         lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1275         cl_lock_put(env, conflict);
1276
1277         if (keep_mutex)
1278                 cl_lock_mutex_get(env, lock);
1279
1280         LASSERT(rc <= 0);
1281         RETURN(rc);
1282 }
1283 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1284
1285 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1286                              struct cl_io *io, __u32 enqflags)
1287 {
1288         int result;
1289
1290         ENTRY;
1291
1292         LINVRNT(cl_lock_is_mutexed(lock));
1293         LINVRNT(cl_lock_invariant(env, lock));
1294         LASSERT(lock->cll_holds > 0);
1295
1296         cl_lock_user_add(env, lock);
1297         do {
1298                 result = cl_enqueue_try(env, lock, io, enqflags);
1299                 if (result == CLO_WAIT) {
1300                         if (lock->cll_conflict != NULL)
1301                                 result = cl_lock_enqueue_wait(env, lock, 1);
1302                         else
1303                                 result = cl_lock_state_wait(env, lock);
1304                         if (result == 0)
1305                                 continue;
1306                 }
1307                 break;
1308         } while (1);
1309         if (result != 0)
1310                 cl_unuse_try(env, lock);
1311         LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1312                      lock->cll_state == CLS_ENQUEUED ||
1313                      lock->cll_state == CLS_HELD));
1314         RETURN(result);
1315 }
1316
1317 /**
1318  * Enqueues a lock.
1319  *
1320  * \pre current thread or io owns a hold on lock.
1321  *
1322  * \post ergo(result == 0, lock->users increased)
1323  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1324  *                         lock->cll_state == CLS_HELD)
1325  */
1326 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1327                struct cl_io *io, __u32 enqflags)
1328 {
1329         int result;
1330
1331         ENTRY;
1332
1333         cl_lock_lockdep_acquire(env, lock, enqflags);
1334         cl_lock_mutex_get(env, lock);
1335         result = cl_enqueue_locked(env, lock, io, enqflags);
1336         cl_lock_mutex_put(env, lock);
1337         if (result != 0)
1338                 cl_lock_lockdep_release(env, lock);
1339         LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1340                      lock->cll_state == CLS_HELD));
1341         RETURN(result);
1342 }
1343 EXPORT_SYMBOL(cl_enqueue);
1344
1345 /**
1346  * Tries to unlock a lock.
1347  *
1348  * This function is called to release underlying resource:
1349  * 1. for top lock, the resource is sublocks it held;
1350  * 2. for sublock, the resource is the reference to dlmlock.
1351  *
1352  * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1353  *
1354  * \see cl_unuse() cl_lock_operations::clo_unuse()
1355  * \see cl_lock_state::CLS_CACHED
1356  */
1357 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1358 {
1359         int                         result;
1360         enum cl_lock_state          state = CLS_NEW;
1361
1362         ENTRY;
1363         cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1364
1365         if (lock->cll_users > 1) {
1366                 cl_lock_user_del(env, lock);
1367                 RETURN(0);
1368         }
1369
1370         /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1371          * underlying resources. */
1372         if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1373                 cl_lock_user_del(env, lock);
1374                 RETURN(0);
1375         }
1376
1377         /*
1378          * New lock users (->cll_users) are not protecting unlocking
1379          * from proceeding. From this point, lock eventually reaches
1380          * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1381          * CLS_FREEING.
1382          */
1383         state = cl_lock_intransit(env, lock);
1384
1385         result = cl_unuse_try_internal(env, lock);
1386         LASSERT(lock->cll_state == CLS_INTRANSIT);
1387         LASSERT(result != CLO_WAIT);
1388         cl_lock_user_del(env, lock);
1389         if (result == 0 || result == -ESTALE) {
1390                 /*
1391                  * Return lock back to the cache. This is the only
1392                  * place where lock is moved into CLS_CACHED state.
1393                  *
1394                  * If one of ->clo_unuse() methods returned -ESTALE, lock
1395                  * cannot be placed into cache and has to be
1396                  * re-initialized. This happens e.g., when a sub-lock was
1397                  * canceled while unlocking was in progress.
1398                  */
1399                 if (state == CLS_HELD && result == 0)
1400                         state = CLS_CACHED;
1401                 else
1402                         state = CLS_NEW;
1403                 cl_lock_extransit(env, lock, state);
1404
1405                 /*
1406                  * Hide -ESTALE error.
1407                  * If the lock is a glimpse lock, and it has multiple
1408                  * stripes. Assuming that one of its sublock returned -ENAVAIL,
1409                  * and other sublocks are matched write locks. In this case,
1410                  * we can't set this lock to error because otherwise some of
1411                  * its sublocks may not be canceled. This causes some dirty
1412                  * pages won't be written to OSTs. -jay
1413                  */
1414                 result = 0;
1415         } else {
1416                 CERROR("result = %d, this is unlikely!\n", result);
1417                 state = CLS_NEW;
1418                 cl_lock_extransit(env, lock, state);
1419         }
1420         RETURN(result ?: lock->cll_error);
1421 }
1422 EXPORT_SYMBOL(cl_unuse_try);
1423
1424 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1425 {
1426         int result;
1427         ENTRY;
1428
1429         result = cl_unuse_try(env, lock);
1430         if (result)
1431                 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1432
1433         EXIT;
1434 }
1435
1436 /**
1437  * Unlocks a lock.
1438  */
1439 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1440 {
1441         ENTRY;
1442         cl_lock_mutex_get(env, lock);
1443         cl_unuse_locked(env, lock);
1444         cl_lock_mutex_put(env, lock);
1445         cl_lock_lockdep_release(env, lock);
1446         EXIT;
1447 }
1448 EXPORT_SYMBOL(cl_unuse);
1449
1450 /**
1451  * Tries to wait for a lock.
1452  *
1453  * This function is called repeatedly by cl_wait() until either lock is
1454  * granted, or error occurs. This function does not block waiting for network
1455  * communication to complete.
1456  *
1457  * \see cl_wait() cl_lock_operations::clo_wait()
1458  * \see cl_lock_state::CLS_HELD
1459  */
1460 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1461 {
1462         const struct cl_lock_slice *slice;
1463         int                         result;
1464
1465         ENTRY;
1466         cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1467         do {
1468                 LINVRNT(cl_lock_is_mutexed(lock));
1469                 LINVRNT(cl_lock_invariant(env, lock));
1470                 LASSERTF(lock->cll_state == CLS_QUEUING ||
1471                          lock->cll_state == CLS_ENQUEUED ||
1472                          lock->cll_state == CLS_HELD ||
1473                          lock->cll_state == CLS_INTRANSIT,
1474                          "lock state: %d\n", lock->cll_state);
1475                 LASSERT(lock->cll_users > 0);
1476                 LASSERT(lock->cll_holds > 0);
1477
1478                 result = lock->cll_error;
1479                 if (result != 0)
1480                         break;
1481
1482                 if (cl_lock_is_intransit(lock)) {
1483                         result = CLO_WAIT;
1484                         break;
1485                 }
1486
1487                 if (lock->cll_state == CLS_HELD)
1488                         /* nothing to do */
1489                         break;
1490
1491                 result = -ENOSYS;
1492                 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1493                         if (slice->cls_ops->clo_wait != NULL) {
1494                                 result = slice->cls_ops->clo_wait(env, slice);
1495                                 if (result != 0)
1496                                         break;
1497                         }
1498                 }
1499                 LASSERT(result != -ENOSYS);
1500                 if (result == 0) {
1501                         LASSERT(lock->cll_state != CLS_INTRANSIT);
1502                         cl_lock_state_set(env, lock, CLS_HELD);
1503                 }
1504         } while (result == CLO_REPEAT);
1505         RETURN(result);
1506 }
1507 EXPORT_SYMBOL(cl_wait_try);
1508
1509 /**
1510  * Waits until enqueued lock is granted.
1511  *
1512  * \pre current thread or io owns a hold on the lock
1513  * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1514  *                        lock->cll_state == CLS_HELD)
1515  *
1516  * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1517  */
1518 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1519 {
1520         int result;
1521
1522         ENTRY;
1523         cl_lock_mutex_get(env, lock);
1524
1525         LINVRNT(cl_lock_invariant(env, lock));
1526         LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1527                  "Wrong state %d \n", lock->cll_state);
1528         LASSERT(lock->cll_holds > 0);
1529
1530         do {
1531                 result = cl_wait_try(env, lock);
1532                 if (result == CLO_WAIT) {
1533                         result = cl_lock_state_wait(env, lock);
1534                         if (result == 0)
1535                                 continue;
1536                 }
1537                 break;
1538         } while (1);
1539         if (result < 0) {
1540                 cl_unuse_try(env, lock);
1541                 cl_lock_lockdep_release(env, lock);
1542         }
1543         cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1544         cl_lock_mutex_put(env, lock);
1545         LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1546         RETURN(result);
1547 }
1548 EXPORT_SYMBOL(cl_wait);
1549
1550 /**
1551  * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1552  * value.
1553  */
1554 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1555 {
1556         const struct cl_lock_slice *slice;
1557         unsigned long pound;
1558         unsigned long ounce;
1559
1560         ENTRY;
1561         LINVRNT(cl_lock_is_mutexed(lock));
1562         LINVRNT(cl_lock_invariant(env, lock));
1563
1564         pound = 0;
1565         cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1566                 if (slice->cls_ops->clo_weigh != NULL) {
1567                         ounce = slice->cls_ops->clo_weigh(env, slice);
1568                         pound += ounce;
1569                         if (pound < ounce) /* over-weight^Wflow */
1570                                 pound = ~0UL;
1571                 }
1572         }
1573         RETURN(pound);
1574 }
1575 EXPORT_SYMBOL(cl_lock_weigh);
1576
1577 /**
1578  * Notifies layers that lock description changed.
1579  *
1580  * The server can grant client a lock different from one that was requested
1581  * (e.g., larger in extent). This method is called when actually granted lock
1582  * description becomes known to let layers to accommodate for changed lock
1583  * description.
1584  *
1585  * \see cl_lock_operations::clo_modify()
1586  */
1587 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1588                    const struct cl_lock_descr *desc)
1589 {
1590         const struct cl_lock_slice *slice;
1591         struct cl_object           *obj = lock->cll_descr.cld_obj;
1592         struct cl_object_header    *hdr = cl_object_header(obj);
1593         int result;
1594
1595         ENTRY;
1596         cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1597         /* don't allow object to change */
1598         LASSERT(obj == desc->cld_obj);
1599         LINVRNT(cl_lock_is_mutexed(lock));
1600         LINVRNT(cl_lock_invariant(env, lock));
1601
1602         cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1603                 if (slice->cls_ops->clo_modify != NULL) {
1604                         result = slice->cls_ops->clo_modify(env, slice, desc);
1605                         if (result != 0)
1606                                 RETURN(result);
1607                 }
1608         }
1609         CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1610                       PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1611         /*
1612          * Just replace description in place. Nothing more is needed for
1613          * now. If locks were indexed according to their extent and/or mode,
1614          * that index would have to be updated here.
1615          */
1616         spin_lock(&hdr->coh_lock_guard);
1617         lock->cll_descr = *desc;
1618         spin_unlock(&hdr->coh_lock_guard);
1619         RETURN(0);
1620 }
1621 EXPORT_SYMBOL(cl_lock_modify);
1622
1623 /**
1624  * Initializes lock closure with a given origin.
1625  *
1626  * \see cl_lock_closure
1627  */
1628 void cl_lock_closure_init(const struct lu_env *env,
1629                           struct cl_lock_closure *closure,
1630                           struct cl_lock *origin, int wait)
1631 {
1632         LINVRNT(cl_lock_is_mutexed(origin));
1633         LINVRNT(cl_lock_invariant(env, origin));
1634
1635         CFS_INIT_LIST_HEAD(&closure->clc_list);
1636         closure->clc_origin = origin;
1637         closure->clc_wait   = wait;
1638         closure->clc_nr     = 0;
1639 }
1640 EXPORT_SYMBOL(cl_lock_closure_init);
1641
1642 /**
1643  * Builds a closure of \a lock.
1644  *
1645  * Building of a closure consists of adding initial lock (\a lock) into it,
1646  * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1647  * methods might call cl_lock_closure_build() recursively again, adding more
1648  * locks to the closure, etc.
1649  *
1650  * \see cl_lock_closure
1651  */
1652 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1653                           struct cl_lock_closure *closure)
1654 {
1655         const struct cl_lock_slice *slice;
1656         int result;
1657
1658         ENTRY;
1659         LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1660         LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1661
1662         result = cl_lock_enclosure(env, lock, closure);
1663         if (result == 0) {
1664                 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1665                         if (slice->cls_ops->clo_closure != NULL) {
1666                                 result = slice->cls_ops->clo_closure(env, slice,
1667                                                                      closure);
1668                                 if (result != 0)
1669                                         break;
1670                         }
1671                 }
1672         }
1673         if (result != 0)
1674                 cl_lock_disclosure(env, closure);
1675         RETURN(result);
1676 }
1677 EXPORT_SYMBOL(cl_lock_closure_build);
1678
1679 /**
1680  * Adds new lock to a closure.
1681  *
1682  * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1683  * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1684  * until next try-lock is likely to succeed.
1685  */
1686 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1687                       struct cl_lock_closure *closure)
1688 {
1689         int result = 0;
1690         ENTRY;
1691         cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1692         if (!cl_lock_mutex_try(env, lock)) {
1693                 /*
1694                  * If lock->cll_inclosure is not empty, lock is already in
1695                  * this closure.
1696                  */
1697                 if (cfs_list_empty(&lock->cll_inclosure)) {
1698                         cl_lock_get_trust(lock);
1699                         lu_ref_add(&lock->cll_reference, "closure", closure);
1700                         cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1701                         closure->clc_nr++;
1702                 } else
1703                         cl_lock_mutex_put(env, lock);
1704                 result = 0;
1705         } else {
1706                 cl_lock_disclosure(env, closure);
1707                 if (closure->clc_wait) {
1708                         cl_lock_get_trust(lock);
1709                         lu_ref_add(&lock->cll_reference, "closure-w", closure);
1710                         cl_lock_mutex_put(env, closure->clc_origin);
1711
1712                         LASSERT(cl_lock_nr_mutexed(env) == 0);
1713                         cl_lock_mutex_get(env, lock);
1714                         cl_lock_mutex_put(env, lock);
1715
1716                         cl_lock_mutex_get(env, closure->clc_origin);
1717                         lu_ref_del(&lock->cll_reference, "closure-w", closure);
1718                         cl_lock_put(env, lock);
1719                 }
1720                 result = CLO_REPEAT;
1721         }
1722         RETURN(result);
1723 }
1724 EXPORT_SYMBOL(cl_lock_enclosure);
1725
1726 /** Releases mutices of enclosed locks. */
1727 void cl_lock_disclosure(const struct lu_env *env,
1728                         struct cl_lock_closure *closure)
1729 {
1730         struct cl_lock *scan;
1731         struct cl_lock *temp;
1732
1733         cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1734         cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1735                                      cll_inclosure){
1736                 cfs_list_del_init(&scan->cll_inclosure);
1737                 cl_lock_mutex_put(env, scan);
1738                 lu_ref_del(&scan->cll_reference, "closure", closure);
1739                 cl_lock_put(env, scan);
1740                 closure->clc_nr--;
1741         }
1742         LASSERT(closure->clc_nr == 0);
1743 }
1744 EXPORT_SYMBOL(cl_lock_disclosure);
1745
1746 /** Finalizes a closure. */
1747 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1748 {
1749         LASSERT(closure->clc_nr == 0);
1750         LASSERT(cfs_list_empty(&closure->clc_list));
1751 }
1752 EXPORT_SYMBOL(cl_lock_closure_fini);
1753
1754 /**
1755  * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1756  * destroyed, then destroy the lock. If there are holds on the lock, postpone
1757  * destruction until all holds are released. This is called when a decision is
1758  * made to destroy the lock in the future. E.g., when a blocking AST is
1759  * received on it, or fatal communication error happens.
1760  *
1761  * Caller must have a reference on this lock to prevent a situation, when
1762  * deleted lock lingers in memory for indefinite time, because nobody calls
1763  * cl_lock_put() to finish it.
1764  *
1765  * \pre atomic_read(&lock->cll_ref) > 0
1766  * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1767  *           cl_lock_nr_mutexed(env) == 1)
1768  *      [i.e., if a top-lock is deleted, mutices of no other locks can be
1769  *      held, as deletion of sub-locks might require releasing a top-lock
1770  *      mutex]
1771  *
1772  * \see cl_lock_operations::clo_delete()
1773  * \see cl_lock::cll_holds
1774  */
1775 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1776 {
1777         LINVRNT(cl_lock_is_mutexed(lock));
1778         LINVRNT(cl_lock_invariant(env, lock));
1779         LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1780                      cl_lock_nr_mutexed(env) == 1));
1781
1782         ENTRY;
1783         cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1784         if (lock->cll_holds == 0)
1785                 cl_lock_delete0(env, lock);
1786         else
1787                 lock->cll_flags |= CLF_DOOMED;
1788         EXIT;
1789 }
1790 EXPORT_SYMBOL(cl_lock_delete);
1791
1792 /**
1793  * Mark lock as irrecoverably failed, and mark it for destruction. This
1794  * happens when, e.g., server fails to grant a lock to us, or networking
1795  * time-out happens.
1796  *
1797  * \pre atomic_read(&lock->cll_ref) > 0
1798  *
1799  * \see clo_lock_delete()
1800  * \see cl_lock::cll_holds
1801  */
1802 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1803 {
1804         LINVRNT(cl_lock_is_mutexed(lock));
1805         LINVRNT(cl_lock_invariant(env, lock));
1806
1807         ENTRY;
1808         if (lock->cll_error == 0 && error != 0) {
1809                 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1810                 lock->cll_error = error;
1811                 cl_lock_signal(env, lock);
1812                 cl_lock_cancel(env, lock);
1813                 cl_lock_delete(env, lock);
1814         }
1815         EXIT;
1816 }
1817 EXPORT_SYMBOL(cl_lock_error);
1818
1819 /**
1820  * Cancels this lock. Notifies layers
1821  * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1822  * there are holds on the lock, postpone cancellation until
1823  * all holds are released.
1824  *
1825  * Cancellation notification is delivered to layers at most once.
1826  *
1827  * \see cl_lock_operations::clo_cancel()
1828  * \see cl_lock::cll_holds
1829  */
1830 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1831 {
1832         LINVRNT(cl_lock_is_mutexed(lock));
1833         LINVRNT(cl_lock_invariant(env, lock));
1834
1835         ENTRY;
1836         cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1837         if (lock->cll_holds == 0)
1838                 cl_lock_cancel0(env, lock);
1839         else
1840                 lock->cll_flags |= CLF_CANCELPEND;
1841         EXIT;
1842 }
1843 EXPORT_SYMBOL(cl_lock_cancel);
1844
1845 /**
1846  * Finds an existing lock covering given index and optionally different from a
1847  * given \a except lock.
1848  */
1849 struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1850                                  struct cl_object *obj, pgoff_t index,
1851                                  struct cl_lock *except,
1852                                  int pending, int canceld)
1853 {
1854         struct cl_object_header *head;
1855         struct cl_lock          *scan;
1856         struct cl_lock          *lock;
1857         struct cl_lock_descr    *need;
1858
1859         ENTRY;
1860
1861         head = cl_object_header(obj);
1862         need = &cl_env_info(env)->clt_descr;
1863         lock = NULL;
1864
1865         need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1866                                     * not PHANTOM */
1867         need->cld_start = need->cld_end = index;
1868         need->cld_enq_flags = 0;
1869
1870         spin_lock(&head->coh_lock_guard);
1871         /* It is fine to match any group lock since there could be only one
1872          * with a uniq gid and it conflicts with all other lock modes too */
1873         cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1874                 if (scan != except &&
1875                     (scan->cll_descr.cld_mode == CLM_GROUP ||
1876                     cl_lock_ext_match(&scan->cll_descr, need)) &&
1877                     scan->cll_state >= CLS_HELD &&
1878                     scan->cll_state < CLS_FREEING &&
1879                     /*
1880                      * This check is racy as the lock can be canceled right
1881                      * after it is done, but this is fine, because page exists
1882                      * already.
1883                      */
1884                     (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1885                     (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1886                         /* Don't increase cs_hit here since this
1887                          * is just a helper function. */
1888                         cl_lock_get_trust(scan);
1889                         lock = scan;
1890                         break;
1891                 }
1892         }
1893         spin_unlock(&head->coh_lock_guard);
1894         RETURN(lock);
1895 }
1896 EXPORT_SYMBOL(cl_lock_at_pgoff);
1897
1898 /**
1899  * Calculate the page offset at the layer of @lock.
1900  * At the time of this writing, @page is top page and @lock is sub lock.
1901  */
1902 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1903 {
1904         struct lu_device_type *dtype;
1905         const struct cl_page_slice *slice;
1906
1907         dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1908         slice = cl_page_at(page, dtype);
1909         LASSERT(slice != NULL);
1910         return slice->cpl_page->cp_index;
1911 }
1912
1913 /**
1914  * Check if page @page is covered by an extra lock or discard it.
1915  */
1916 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1917                                 struct cl_page *page, void *cbdata)
1918 {
1919         struct cl_thread_info *info = cl_env_info(env);
1920         struct cl_lock *lock = cbdata;
1921         pgoff_t index = pgoff_at_lock(page, lock);
1922
1923         if (index >= info->clt_fn_index) {
1924                 struct cl_lock *tmp;
1925
1926                 /* refresh non-overlapped index */
1927                 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
1928                                         lock, 1, 0);
1929                 if (tmp != NULL) {
1930                         /* Cache the first-non-overlapped index so as to skip
1931                          * all pages within [index, clt_fn_index). This
1932                          * is safe because if tmp lock is canceled, it will
1933                          * discard these pages. */
1934                         info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1935                         if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1936                                 info->clt_fn_index = CL_PAGE_EOF;
1937                         cl_lock_put(env, tmp);
1938                 } else if (cl_page_own(env, io, page) == 0) {
1939                         /* discard the page */
1940                         cl_page_unmap(env, io, page);
1941                         cl_page_discard(env, io, page);
1942                         cl_page_disown(env, io, page);
1943                 } else {
1944                         LASSERT(page->cp_state == CPS_FREEING);
1945                 }
1946         }
1947
1948         info->clt_next_index = index + 1;
1949         return CLP_GANG_OKAY;
1950 }
1951
1952 static int discard_cb(const struct lu_env *env, struct cl_io *io,
1953                       struct cl_page *page, void *cbdata)
1954 {
1955         struct cl_thread_info *info = cl_env_info(env);
1956         struct cl_lock *lock   = cbdata;
1957
1958         LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
1959         KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1960                       !PageWriteback(cl_page_vmpage(env, page))));
1961         KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1962                       !PageDirty(cl_page_vmpage(env, page))));
1963
1964         info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1965         if (cl_page_own(env, io, page) == 0) {
1966                 /* discard the page */
1967                 cl_page_unmap(env, io, page);
1968                 cl_page_discard(env, io, page);
1969                 cl_page_disown(env, io, page);
1970         } else {
1971                 LASSERT(page->cp_state == CPS_FREEING);
1972         }
1973
1974         return CLP_GANG_OKAY;
1975 }
1976
1977 /**
1978  * Discard pages protected by the given lock. This function traverses radix
1979  * tree to find all covering pages and discard them. If a page is being covered
1980  * by other locks, it should remain in cache.
1981  *
1982  * If error happens on any step, the process continues anyway (the reasoning
1983  * behind this being that lock cancellation cannot be delayed indefinitely).
1984  */
1985 int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
1986 {
1987         struct cl_thread_info *info  = cl_env_info(env);
1988         struct cl_io          *io    = &info->clt_io;
1989         struct cl_lock_descr  *descr = &lock->cll_descr;
1990         cl_page_gang_cb_t      cb;
1991         int res;
1992         int result;
1993
1994         LINVRNT(cl_lock_invariant(env, lock));
1995         ENTRY;
1996
1997         io->ci_obj = cl_object_top(descr->cld_obj);
1998         io->ci_ignore_layout = 1;
1999         result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
2000         if (result != 0)
2001                 GOTO(out, result);
2002
2003         cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
2004         info->clt_fn_index = info->clt_next_index = descr->cld_start;
2005         do {
2006                 res = cl_page_gang_lookup(env, descr->cld_obj, io,
2007                                           info->clt_next_index, descr->cld_end,
2008                                           cb, (void *)lock);
2009                 if (info->clt_next_index > descr->cld_end)
2010                         break;
2011
2012                 if (res == CLP_GANG_RESCHED)
2013                         cfs_cond_resched();
2014         } while (res != CLP_GANG_OKAY);
2015 out:
2016         cl_io_fini(env, io);
2017         RETURN(result);
2018 }
2019 EXPORT_SYMBOL(cl_lock_discard_pages);
2020
2021 /**
2022  * Eliminate all locks for a given object.
2023  *
2024  * Caller has to guarantee that no lock is in active use.
2025  *
2026  * \param cancel when this is set, cl_locks_prune() cancels locks before
2027  *               destroying.
2028  */
2029 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
2030 {
2031         struct cl_object_header *head;
2032         struct cl_lock          *lock;
2033
2034         ENTRY;
2035         head = cl_object_header(obj);
2036         /*
2037          * If locks are destroyed without cancellation, all pages must be
2038          * already destroyed (as otherwise they will be left unprotected).
2039          */
2040         LASSERT(ergo(!cancel,
2041                      head->coh_tree.rnode == NULL && head->coh_pages == 0));
2042
2043         spin_lock(&head->coh_lock_guard);
2044         while (!cfs_list_empty(&head->coh_locks)) {
2045                 lock = container_of(head->coh_locks.next,
2046                                     struct cl_lock, cll_linkage);
2047                 cl_lock_get_trust(lock);
2048                 spin_unlock(&head->coh_lock_guard);
2049                 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2050
2051 again:
2052                 cl_lock_mutex_get(env, lock);
2053                 if (lock->cll_state < CLS_FREEING) {
2054                         LASSERT(lock->cll_users <= 1);
2055                         if (unlikely(lock->cll_users == 1)) {
2056                                 struct l_wait_info lwi = { 0 };
2057
2058                                 cl_lock_mutex_put(env, lock);
2059                                 l_wait_event(lock->cll_wq,
2060                                              lock->cll_users == 0,
2061                                              &lwi);
2062                                 goto again;
2063                         }
2064
2065                         if (cancel)
2066                                 cl_lock_cancel(env, lock);
2067                         cl_lock_delete(env, lock);
2068                 }
2069                 cl_lock_mutex_put(env, lock);
2070                 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2071                 cl_lock_put(env, lock);
2072                 spin_lock(&head->coh_lock_guard);
2073         }
2074         spin_unlock(&head->coh_lock_guard);
2075         EXIT;
2076 }
2077 EXPORT_SYMBOL(cl_locks_prune);
2078
2079 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2080                                           const struct cl_io *io,
2081                                           const struct cl_lock_descr *need,
2082                                           const char *scope, const void *source)
2083 {
2084         struct cl_lock *lock;
2085
2086         ENTRY;
2087
2088         while (1) {
2089                 lock = cl_lock_find(env, io, need);
2090                 if (IS_ERR(lock))
2091                         break;
2092                 cl_lock_mutex_get(env, lock);
2093                 if (lock->cll_state < CLS_FREEING &&
2094                     !(lock->cll_flags & CLF_CANCELLED)) {
2095                         cl_lock_hold_mod(env, lock, +1);
2096                         lu_ref_add(&lock->cll_holders, scope, source);
2097                         lu_ref_add(&lock->cll_reference, scope, source);
2098                         break;
2099                 }
2100                 cl_lock_mutex_put(env, lock);
2101                 cl_lock_put(env, lock);
2102         }
2103         RETURN(lock);
2104 }
2105
2106 /**
2107  * Returns a lock matching \a need description with a reference and a hold on
2108  * it.
2109  *
2110  * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2111  * guarantees that lock is not in the CLS_FREEING state on return.
2112  */
2113 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2114                              const struct cl_lock_descr *need,
2115                              const char *scope, const void *source)
2116 {
2117         struct cl_lock *lock;
2118
2119         ENTRY;
2120
2121         lock = cl_lock_hold_mutex(env, io, need, scope, source);
2122         if (!IS_ERR(lock))
2123                 cl_lock_mutex_put(env, lock);
2124         RETURN(lock);
2125 }
2126 EXPORT_SYMBOL(cl_lock_hold);
2127
2128 /**
2129  * Main high-level entry point of cl_lock interface that finds existing or
2130  * enqueues new lock matching given description.
2131  */
2132 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2133                                 const struct cl_lock_descr *need,
2134                                 const char *scope, const void *source)
2135 {
2136         struct cl_lock       *lock;
2137         int                   rc;
2138         __u32                 enqflags = need->cld_enq_flags;
2139
2140         ENTRY;
2141         do {
2142                 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2143                 if (IS_ERR(lock))
2144                         break;
2145
2146                 rc = cl_enqueue_locked(env, lock, io, enqflags);
2147                 if (rc == 0) {
2148                         if (cl_lock_fits_into(env, lock, need, io)) {
2149                                 if (!(enqflags & CEF_AGL)) {
2150                                         cl_lock_mutex_put(env, lock);
2151                                         cl_lock_lockdep_acquire(env, lock,
2152                                                                 enqflags);
2153                                         break;
2154                                 }
2155                                 rc = 1;
2156                         }
2157                         cl_unuse_locked(env, lock);
2158                 }
2159                 cl_lock_trace(D_DLMTRACE, env,
2160                               rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2161                 cl_lock_hold_release(env, lock, scope, source);
2162                 cl_lock_mutex_put(env, lock);
2163                 lu_ref_del(&lock->cll_reference, scope, source);
2164                 cl_lock_put(env, lock);
2165                 if (rc > 0) {
2166                         LASSERT(enqflags & CEF_AGL);
2167                         lock = NULL;
2168                 } else if (rc != 0) {
2169                         lock = ERR_PTR(rc);
2170                 }
2171         } while (rc == 0);
2172         RETURN(lock);
2173 }
2174 EXPORT_SYMBOL(cl_lock_request);
2175
2176 /**
2177  * Adds a hold to a known lock.
2178  */
2179 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2180                       const char *scope, const void *source)
2181 {
2182         LINVRNT(cl_lock_is_mutexed(lock));
2183         LINVRNT(cl_lock_invariant(env, lock));
2184         LASSERT(lock->cll_state != CLS_FREEING);
2185
2186         ENTRY;
2187         cl_lock_hold_mod(env, lock, +1);
2188         cl_lock_get(lock);
2189         lu_ref_add(&lock->cll_holders, scope, source);
2190         lu_ref_add(&lock->cll_reference, scope, source);
2191         EXIT;
2192 }
2193 EXPORT_SYMBOL(cl_lock_hold_add);
2194
2195 /**
2196  * Releases a hold and a reference on a lock, on which caller acquired a
2197  * mutex.
2198  */
2199 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2200                     const char *scope, const void *source)
2201 {
2202         LINVRNT(cl_lock_invariant(env, lock));
2203         ENTRY;
2204         cl_lock_hold_release(env, lock, scope, source);
2205         lu_ref_del(&lock->cll_reference, scope, source);
2206         cl_lock_put(env, lock);
2207         EXIT;
2208 }
2209 EXPORT_SYMBOL(cl_lock_unhold);
2210
2211 /**
2212  * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2213  */
2214 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2215                      const char *scope, const void *source)
2216 {
2217         LINVRNT(cl_lock_invariant(env, lock));
2218         ENTRY;
2219         cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2220         cl_lock_mutex_get(env, lock);
2221         cl_lock_hold_release(env, lock, scope, source);
2222         cl_lock_mutex_put(env, lock);
2223         lu_ref_del(&lock->cll_reference, scope, source);
2224         cl_lock_put(env, lock);
2225         EXIT;
2226 }
2227 EXPORT_SYMBOL(cl_lock_release);
2228
2229 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2230 {
2231         LINVRNT(cl_lock_is_mutexed(lock));
2232         LINVRNT(cl_lock_invariant(env, lock));
2233
2234         ENTRY;
2235         cl_lock_used_mod(env, lock, +1);
2236         EXIT;
2237 }
2238 EXPORT_SYMBOL(cl_lock_user_add);
2239
2240 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2241 {
2242         LINVRNT(cl_lock_is_mutexed(lock));
2243         LINVRNT(cl_lock_invariant(env, lock));
2244         LASSERT(lock->cll_users > 0);
2245
2246         ENTRY;
2247         cl_lock_used_mod(env, lock, -1);
2248         if (lock->cll_users == 0)
2249                 cfs_waitq_broadcast(&lock->cll_wq);
2250         EXIT;
2251 }
2252 EXPORT_SYMBOL(cl_lock_user_del);
2253
2254 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2255 {
2256         static const char *names[] = {
2257                 [CLM_PHANTOM] = "P",
2258                 [CLM_READ]    = "R",
2259                 [CLM_WRITE]   = "W",
2260                 [CLM_GROUP]   = "G"
2261         };
2262         if (0 <= mode && mode < ARRAY_SIZE(names))
2263                 return names[mode];
2264         else
2265                 return "U";
2266 }
2267 EXPORT_SYMBOL(cl_lock_mode_name);
2268
2269 /**
2270  * Prints human readable representation of a lock description.
2271  */
2272 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2273                        lu_printer_t printer,
2274                        const struct cl_lock_descr *descr)
2275 {
2276         const struct lu_fid  *fid;
2277
2278         fid = lu_object_fid(&descr->cld_obj->co_lu);
2279         (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2280 }
2281 EXPORT_SYMBOL(cl_lock_descr_print);
2282
2283 /**
2284  * Prints human readable representation of \a lock to the \a f.
2285  */
2286 void cl_lock_print(const struct lu_env *env, void *cookie,
2287                    lu_printer_t printer, const struct cl_lock *lock)
2288 {
2289         const struct cl_lock_slice *slice;
2290         (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2291                    lock, cfs_atomic_read(&lock->cll_ref),
2292                    lock->cll_state, lock->cll_error, lock->cll_holds,
2293                    lock->cll_users, lock->cll_flags);
2294         cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2295         (*printer)(env, cookie, " {\n");
2296
2297         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2298                 (*printer)(env, cookie, "    %s@%p: ",
2299                            slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2300                            slice);
2301                 if (slice->cls_ops->clo_print != NULL)
2302                         slice->cls_ops->clo_print(env, cookie, printer, slice);
2303                 (*printer)(env, cookie, "\n");
2304         }
2305         (*printer)(env, cookie, "} lock@%p\n", lock);
2306 }
2307 EXPORT_SYMBOL(cl_lock_print);
2308
2309 int cl_lock_init(void)
2310 {
2311         return lu_kmem_init(cl_lock_caches);
2312 }
2313
2314 void cl_lock_fini(void)
2315 {
2316         lu_kmem_fini(cl_lock_caches);
2317 }