Whamcloud - gitweb
LU-812 kernel: AUTOCONF_INCLUDED removed
[fs/lustre-release.git] / lustre / obdclass / cl_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Extent Lock.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre_fid.h>
46 #include <libcfs/list.h>
47 /* lu_time_global_{init,fini}() */
48 #include <lu_time.h>
49
50 #include <cl_object.h>
51 #include "cl_internal.h"
52
53 /** Lock class of cl_lock::cll_guard */
54 static cfs_lock_class_key_t cl_lock_guard_class;
55 static cfs_mem_cache_t *cl_lock_kmem;
56
57 static struct lu_kmem_descr cl_lock_caches[] = {
58         {
59                 .ckd_cache = &cl_lock_kmem,
60                 .ckd_name  = "cl_lock_kmem",
61                 .ckd_size  = sizeof (struct cl_lock)
62         },
63         {
64                 .ckd_cache = NULL
65         }
66 };
67
68 /**
69  * Basic lock invariant that is maintained at all times. Caller either has a
70  * reference to \a lock, or somehow assures that \a lock cannot be freed.
71  *
72  * \see cl_lock_invariant()
73  */
74 static int cl_lock_invariant_trusted(const struct lu_env *env,
75                                      const struct cl_lock *lock)
76 {
77         return  ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
78                 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
79                 lock->cll_holds >= lock->cll_users &&
80                 lock->cll_holds >= 0 &&
81                 lock->cll_users >= 0 &&
82                 lock->cll_depth >= 0;
83 }
84
85 /**
86  * Stronger lock invariant, checking that caller has a reference on a lock.
87  *
88  * \see cl_lock_invariant_trusted()
89  */
90 static int cl_lock_invariant(const struct lu_env *env,
91                              const struct cl_lock *lock)
92 {
93         int result;
94
95         result = cfs_atomic_read(&lock->cll_ref) > 0 &&
96                 cl_lock_invariant_trusted(env, lock);
97         if (!result && env != NULL)
98                 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
99         return result;
100 }
101
102 /**
103  * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
104  */
105 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
106 {
107         return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
108 }
109
110 /**
111  * Returns a set of counters for this lock, depending on a lock nesting.
112  */
113 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
114                                                    const struct cl_lock *lock)
115 {
116         struct cl_thread_info *info;
117         enum clt_nesting_level nesting;
118
119         info = cl_env_info(env);
120         nesting = cl_lock_nesting(lock);
121         LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
122         return &info->clt_counters[nesting];
123 }
124
125 static void cl_lock_trace0(int level, const struct lu_env *env,
126                            const char *prefix, const struct cl_lock *lock,
127                            const char *func, const int line)
128 {
129         struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
130         CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
131                       "(%p/%d/%d) at %s():%d\n",
132                prefix, lock, cfs_atomic_read(&lock->cll_ref),
133                lock->cll_guarder, lock->cll_depth,
134                lock->cll_state, lock->cll_error, lock->cll_holds,
135                lock->cll_users, lock->cll_flags,
136                env, h->coh_nesting, cl_lock_nr_mutexed(env),
137                func, line);
138 }
139 #define cl_lock_trace(level, env, prefix, lock)                         \
140         cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
141
142 #define RETIP ((unsigned long)__builtin_return_address(0))
143
144 #ifdef CONFIG_LOCKDEP
145 static cfs_lock_class_key_t cl_lock_key;
146
147 static void cl_lock_lockdep_init(struct cl_lock *lock)
148 {
149         lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
150 }
151
152 static void cl_lock_lockdep_acquire(const struct lu_env *env,
153                                     struct cl_lock *lock, __u32 enqflags)
154 {
155         cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
156 #ifdef HAVE_LOCK_MAP_ACQUIRE
157         lock_map_acquire(&lock->dep_map);
158 #else  /* HAVE_LOCK_MAP_ACQUIRE */
159         lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
160                      /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
161                      /* check: */ 2, RETIP);
162 #endif /* HAVE_LOCK_MAP_ACQUIRE */
163 }
164
165 static void cl_lock_lockdep_release(const struct lu_env *env,
166                                     struct cl_lock *lock)
167 {
168         cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
169         lock_release(&lock->dep_map, 0, RETIP);
170 }
171
172 #else /* !CONFIG_LOCKDEP */
173
174 static void cl_lock_lockdep_init(struct cl_lock *lock)
175 {}
176 static void cl_lock_lockdep_acquire(const struct lu_env *env,
177                                     struct cl_lock *lock, __u32 enqflags)
178 {}
179 static void cl_lock_lockdep_release(const struct lu_env *env,
180                                     struct cl_lock *lock)
181 {}
182
183 #endif /* !CONFIG_LOCKDEP */
184
185 /**
186  * Adds lock slice to the compound lock.
187  *
188  * This is called by cl_object_operations::coo_lock_init() methods to add a
189  * per-layer state to the lock. New state is added at the end of
190  * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
191  *
192  * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
193  */
194 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
195                        struct cl_object *obj,
196                        const struct cl_lock_operations *ops)
197 {
198         ENTRY;
199         slice->cls_lock = lock;
200         cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
201         slice->cls_obj = obj;
202         slice->cls_ops = ops;
203         EXIT;
204 }
205 EXPORT_SYMBOL(cl_lock_slice_add);
206
207 /**
208  * Returns true iff a lock with the mode \a has provides at least the same
209  * guarantees as a lock with the mode \a need.
210  */
211 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
212 {
213         LINVRNT(need == CLM_READ || need == CLM_WRITE ||
214                 need == CLM_PHANTOM || need == CLM_GROUP);
215         LINVRNT(has == CLM_READ || has == CLM_WRITE ||
216                 has == CLM_PHANTOM || has == CLM_GROUP);
217         CLASSERT(CLM_PHANTOM < CLM_READ);
218         CLASSERT(CLM_READ < CLM_WRITE);
219         CLASSERT(CLM_WRITE < CLM_GROUP);
220
221         if (has != CLM_GROUP)
222                 return need <= has;
223         else
224                 return need == has;
225 }
226 EXPORT_SYMBOL(cl_lock_mode_match);
227
228 /**
229  * Returns true iff extent portions of lock descriptions match.
230  */
231 int cl_lock_ext_match(const struct cl_lock_descr *has,
232                       const struct cl_lock_descr *need)
233 {
234         return
235                 has->cld_start <= need->cld_start &&
236                 has->cld_end >= need->cld_end &&
237                 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
238                 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
239 }
240 EXPORT_SYMBOL(cl_lock_ext_match);
241
242 /**
243  * Returns true iff a lock with the description \a has provides at least the
244  * same guarantees as a lock with the description \a need.
245  */
246 int cl_lock_descr_match(const struct cl_lock_descr *has,
247                         const struct cl_lock_descr *need)
248 {
249         return
250                 cl_object_same(has->cld_obj, need->cld_obj) &&
251                 cl_lock_ext_match(has, need);
252 }
253 EXPORT_SYMBOL(cl_lock_descr_match);
254
255 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
256 {
257         struct cl_object *obj = lock->cll_descr.cld_obj;
258
259         LINVRNT(!cl_lock_is_mutexed(lock));
260
261         ENTRY;
262         cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
263         cfs_might_sleep();
264         while (!cfs_list_empty(&lock->cll_layers)) {
265                 struct cl_lock_slice *slice;
266
267                 slice = cfs_list_entry(lock->cll_layers.next,
268                                        struct cl_lock_slice, cls_linkage);
269                 cfs_list_del_init(lock->cll_layers.next);
270                 slice->cls_ops->clo_fini(env, slice);
271         }
272         cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
273         cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
274         lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
275         cl_object_put(env, obj);
276         lu_ref_fini(&lock->cll_reference);
277         lu_ref_fini(&lock->cll_holders);
278         cfs_mutex_destroy(&lock->cll_guard);
279         OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
280         EXIT;
281 }
282
283 /**
284  * Releases a reference on a lock.
285  *
286  * When last reference is released, lock is returned to the cache, unless it
287  * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
288  * immediately.
289  *
290  * \see cl_object_put(), cl_page_put()
291  */
292 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
293 {
294         struct cl_object        *obj;
295         struct cl_site          *site;
296
297         LINVRNT(cl_lock_invariant(env, lock));
298         ENTRY;
299         obj = lock->cll_descr.cld_obj;
300         LINVRNT(obj != NULL);
301         site = cl_object_site(obj);
302
303         CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
304                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
305
306         if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
307                 if (lock->cll_state == CLS_FREEING) {
308                         LASSERT(cfs_list_empty(&lock->cll_linkage));
309                         cl_lock_free(env, lock);
310                 }
311                 cfs_atomic_dec(&site->cs_locks.cs_busy);
312         }
313         EXIT;
314 }
315 EXPORT_SYMBOL(cl_lock_put);
316
317 /**
318  * Acquires an additional reference to a lock.
319  *
320  * This can be called only by caller already possessing a reference to \a
321  * lock.
322  *
323  * \see cl_object_get(), cl_page_get()
324  */
325 void cl_lock_get(struct cl_lock *lock)
326 {
327         LINVRNT(cl_lock_invariant(NULL, lock));
328         CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
329                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
330         cfs_atomic_inc(&lock->cll_ref);
331 }
332 EXPORT_SYMBOL(cl_lock_get);
333
334 /**
335  * Acquires a reference to a lock.
336  *
337  * This is much like cl_lock_get(), except that this function can be used to
338  * acquire initial reference to the cached lock. Caller has to deal with all
339  * possible races. Use with care!
340  *
341  * \see cl_page_get_trust()
342  */
343 void cl_lock_get_trust(struct cl_lock *lock)
344 {
345         struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
346
347         CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
348                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
349         if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
350                 cfs_atomic_inc(&site->cs_locks.cs_busy);
351 }
352 EXPORT_SYMBOL(cl_lock_get_trust);
353
354 /**
355  * Helper function destroying the lock that wasn't completely initialized.
356  *
357  * Other threads can acquire references to the top-lock through its
358  * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
359  */
360 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
361 {
362         cl_lock_mutex_get(env, lock);
363         cl_lock_cancel(env, lock);
364         cl_lock_delete(env, lock);
365         cl_lock_mutex_put(env, lock);
366         cl_lock_put(env, lock);
367 }
368
369 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
370                                      struct cl_object *obj,
371                                      const struct cl_io *io,
372                                      const struct cl_lock_descr *descr)
373 {
374         struct cl_lock          *lock;
375         struct lu_object_header *head;
376         struct cl_site          *site = cl_object_site(obj);
377
378         ENTRY;
379         OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
380         if (lock != NULL) {
381                 cfs_atomic_set(&lock->cll_ref, 1);
382                 lock->cll_descr = *descr;
383                 lock->cll_state = CLS_NEW;
384                 cl_object_get(obj);
385                 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
386                                                       "cl_lock", lock);
387                 CFS_INIT_LIST_HEAD(&lock->cll_layers);
388                 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
389                 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
390                 lu_ref_init(&lock->cll_reference);
391                 lu_ref_init(&lock->cll_holders);
392                 cfs_mutex_init(&lock->cll_guard);
393                 cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
394                 cfs_waitq_init(&lock->cll_wq);
395                 head = obj->co_lu.lo_header;
396                 cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
397                 cfs_atomic_inc(&site->cs_locks.cs_total);
398                 cfs_atomic_inc(&site->cs_locks.cs_created);
399                 cl_lock_lockdep_init(lock);
400                 cfs_list_for_each_entry(obj, &head->loh_layers,
401                                         co_lu.lo_linkage) {
402                         int err;
403
404                         err = obj->co_ops->coo_lock_init(env, obj, lock, io);
405                         if (err != 0) {
406                                 cl_lock_finish(env, lock);
407                                 lock = ERR_PTR(err);
408                                 break;
409                         }
410                 }
411         } else
412                 lock = ERR_PTR(-ENOMEM);
413         RETURN(lock);
414 }
415
416 /**
417  * Transfer the lock into INTRANSIT state and return the original state.
418  *
419  * \pre  state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
420  * \post state: CLS_INTRANSIT
421  * \see CLS_INTRANSIT
422  */
423 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
424                                      struct cl_lock *lock)
425 {
426         enum cl_lock_state state = lock->cll_state;
427
428         LASSERT(cl_lock_is_mutexed(lock));
429         LASSERT(state != CLS_INTRANSIT);
430         LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
431                  "Malformed lock state %d.\n", state);
432
433         cl_lock_state_set(env, lock, CLS_INTRANSIT);
434         lock->cll_intransit_owner = cfs_current();
435         cl_lock_hold_add(env, lock, "intransit", cfs_current());
436         return state;
437 }
438 EXPORT_SYMBOL(cl_lock_intransit);
439
440 /**
441  *  Exit the intransit state and restore the lock state to the original state
442  */
443 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
444                        enum cl_lock_state state)
445 {
446         LASSERT(cl_lock_is_mutexed(lock));
447         LASSERT(lock->cll_state == CLS_INTRANSIT);
448         LASSERT(state != CLS_INTRANSIT);
449         LASSERT(lock->cll_intransit_owner == cfs_current());
450
451         lock->cll_intransit_owner = NULL;
452         cl_lock_state_set(env, lock, state);
453         cl_lock_unhold(env, lock, "intransit", cfs_current());
454 }
455 EXPORT_SYMBOL(cl_lock_extransit);
456
457 /**
458  * Checking whether the lock is intransit state
459  */
460 int cl_lock_is_intransit(struct cl_lock *lock)
461 {
462         LASSERT(cl_lock_is_mutexed(lock));
463         return lock->cll_state == CLS_INTRANSIT &&
464                lock->cll_intransit_owner != cfs_current();
465 }
466 EXPORT_SYMBOL(cl_lock_is_intransit);
467 /**
468  * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
469  * truncate and O_APPEND cannot be reused for read/non-append-write, as they
470  * cover multiple stripes and can trigger cascading timeouts.
471  */
472 static int cl_lock_fits_into(const struct lu_env *env,
473                              const struct cl_lock *lock,
474                              const struct cl_lock_descr *need,
475                              const struct cl_io *io)
476 {
477         const struct cl_lock_slice *slice;
478
479         LINVRNT(cl_lock_invariant_trusted(env, lock));
480         ENTRY;
481         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
482                 if (slice->cls_ops->clo_fits_into != NULL &&
483                     !slice->cls_ops->clo_fits_into(env, slice, need, io))
484                         RETURN(0);
485         }
486         RETURN(1);
487 }
488
489 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
490                                       struct cl_object *obj,
491                                       const struct cl_io *io,
492                                       const struct cl_lock_descr *need)
493 {
494         struct cl_lock          *lock;
495         struct cl_object_header *head;
496         struct cl_site          *site;
497
498         ENTRY;
499
500         head = cl_object_header(obj);
501         site = cl_object_site(obj);
502         LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
503         cfs_atomic_inc(&site->cs_locks.cs_lookup);
504         cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
505                 int matched;
506
507                 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
508                           lock->cll_state < CLS_FREEING &&
509                           lock->cll_error == 0 &&
510                           !(lock->cll_flags & CLF_CANCELLED) &&
511                           cl_lock_fits_into(env, lock, need, io);
512                 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
513                        PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
514                        matched);
515                 if (matched) {
516                         cl_lock_get_trust(lock);
517                         cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
518                         RETURN(lock);
519                 }
520         }
521         RETURN(NULL);
522 }
523
524 /**
525  * Returns a lock matching description \a need.
526  *
527  * This is the main entry point into the cl_lock caching interface. First, a
528  * cache (implemented as a per-object linked list) is consulted. If lock is
529  * found there, it is returned immediately. Otherwise new lock is allocated
530  * and returned. In any case, additional reference to lock is acquired.
531  *
532  * \see cl_object_find(), cl_page_find()
533  */
534 static struct cl_lock *cl_lock_find(const struct lu_env *env,
535                                     const struct cl_io *io,
536                                     const struct cl_lock_descr *need)
537 {
538         struct cl_object_header *head;
539         struct cl_object        *obj;
540         struct cl_lock          *lock;
541         struct cl_site          *site;
542
543         ENTRY;
544
545         obj  = need->cld_obj;
546         head = cl_object_header(obj);
547         site = cl_object_site(obj);
548
549         cfs_spin_lock(&head->coh_lock_guard);
550         lock = cl_lock_lookup(env, obj, io, need);
551         cfs_spin_unlock(&head->coh_lock_guard);
552
553         if (lock == NULL) {
554                 lock = cl_lock_alloc(env, obj, io, need);
555                 if (!IS_ERR(lock)) {
556                         struct cl_lock *ghost;
557
558                         cfs_spin_lock(&head->coh_lock_guard);
559                         ghost = cl_lock_lookup(env, obj, io, need);
560                         if (ghost == NULL) {
561                                 cfs_list_add_tail(&lock->cll_linkage,
562                                                   &head->coh_locks);
563                                 cfs_spin_unlock(&head->coh_lock_guard);
564                                 cfs_atomic_inc(&site->cs_locks.cs_busy);
565                         } else {
566                                 cfs_spin_unlock(&head->coh_lock_guard);
567                                 /*
568                                  * Other threads can acquire references to the
569                                  * top-lock through its sub-locks. Hence, it
570                                  * cannot be cl_lock_free()-ed immediately.
571                                  */
572                                 cl_lock_finish(env, lock);
573                                 lock = ghost;
574                         }
575                 }
576         }
577         RETURN(lock);
578 }
579
580 /**
581  * Returns existing lock matching given description. This is similar to
582  * cl_lock_find() except that no new lock is created, and returned lock is
583  * guaranteed to be in enum cl_lock_state::CLS_HELD state.
584  */
585 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
586                              const struct cl_lock_descr *need,
587                              const char *scope, const void *source)
588 {
589         struct cl_object_header *head;
590         struct cl_object        *obj;
591         struct cl_lock          *lock;
592
593         obj  = need->cld_obj;
594         head = cl_object_header(obj);
595
596         cfs_spin_lock(&head->coh_lock_guard);
597         lock = cl_lock_lookup(env, obj, io, need);
598         cfs_spin_unlock(&head->coh_lock_guard);
599
600         if (lock == NULL)
601                 return NULL;
602
603         cl_lock_mutex_get(env, lock);
604         if (lock->cll_state == CLS_INTRANSIT)
605                 cl_lock_state_wait(env, lock); /* Don't care return value. */
606         cl_lock_hold_add(env, lock, scope, source);
607         cl_lock_user_add(env, lock);
608         if (lock->cll_state == CLS_CACHED)
609                 cl_use_try(env, lock, 1);
610         if (lock->cll_state == CLS_HELD) {
611                 cl_lock_mutex_put(env, lock);
612                 cl_lock_lockdep_acquire(env, lock, 0);
613                 cl_lock_put(env, lock);
614         } else {
615                 cl_unuse_try(env, lock);
616                 cl_lock_unhold(env, lock, scope, source);
617                 cl_lock_mutex_put(env, lock);
618                 cl_lock_put(env, lock);
619                 lock = NULL;
620         }
621
622         return lock;
623 }
624 EXPORT_SYMBOL(cl_lock_peek);
625
626 /**
627  * Returns a slice within a lock, corresponding to the given layer in the
628  * device stack.
629  *
630  * \see cl_page_at()
631  */
632 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
633                                        const struct lu_device_type *dtype)
634 {
635         const struct cl_lock_slice *slice;
636
637         LINVRNT(cl_lock_invariant_trusted(NULL, lock));
638         ENTRY;
639
640         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
641                 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
642                         RETURN(slice);
643         }
644         RETURN(NULL);
645 }
646 EXPORT_SYMBOL(cl_lock_at);
647
648 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
649 {
650         struct cl_thread_counters *counters;
651
652         counters = cl_lock_counters(env, lock);
653         lock->cll_depth++;
654         counters->ctc_nr_locks_locked++;
655         lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
656         cl_lock_trace(D_TRACE, env, "got mutex", lock);
657 }
658
659 /**
660  * Locks cl_lock object.
661  *
662  * This is used to manipulate cl_lock fields, and to serialize state
663  * transitions in the lock state machine.
664  *
665  * \post cl_lock_is_mutexed(lock)
666  *
667  * \see cl_lock_mutex_put()
668  */
669 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
670 {
671         LINVRNT(cl_lock_invariant(env, lock));
672
673         if (lock->cll_guarder == cfs_current()) {
674                 LINVRNT(cl_lock_is_mutexed(lock));
675                 LINVRNT(lock->cll_depth > 0);
676         } else {
677                 struct cl_object_header *hdr;
678                 struct cl_thread_info   *info;
679                 int i;
680
681                 LINVRNT(lock->cll_guarder != cfs_current());
682                 hdr = cl_object_header(lock->cll_descr.cld_obj);
683                 /*
684                  * Check that mutices are taken in the bottom-to-top order.
685                  */
686                 info = cl_env_info(env);
687                 for (i = 0; i < hdr->coh_nesting; ++i)
688                         LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
689                 cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
690                 lock->cll_guarder = cfs_current();
691                 LINVRNT(lock->cll_depth == 0);
692         }
693         cl_lock_mutex_tail(env, lock);
694 }
695 EXPORT_SYMBOL(cl_lock_mutex_get);
696
697 /**
698  * Try-locks cl_lock object.
699  *
700  * \retval 0 \a lock was successfully locked
701  *
702  * \retval -EBUSY \a lock cannot be locked right now
703  *
704  * \post ergo(result == 0, cl_lock_is_mutexed(lock))
705  *
706  * \see cl_lock_mutex_get()
707  */
708 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
709 {
710         int result;
711
712         LINVRNT(cl_lock_invariant_trusted(env, lock));
713         ENTRY;
714
715         result = 0;
716         if (lock->cll_guarder == cfs_current()) {
717                 LINVRNT(lock->cll_depth > 0);
718                 cl_lock_mutex_tail(env, lock);
719         } else if (cfs_mutex_trylock(&lock->cll_guard)) {
720                 LINVRNT(lock->cll_depth == 0);
721                 lock->cll_guarder = cfs_current();
722                 cl_lock_mutex_tail(env, lock);
723         } else
724                 result = -EBUSY;
725         RETURN(result);
726 }
727 EXPORT_SYMBOL(cl_lock_mutex_try);
728
729 /**
730  {* Unlocks cl_lock object.
731  *
732  * \pre cl_lock_is_mutexed(lock)
733  *
734  * \see cl_lock_mutex_get()
735  */
736 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
737 {
738         struct cl_thread_counters *counters;
739
740         LINVRNT(cl_lock_invariant(env, lock));
741         LINVRNT(cl_lock_is_mutexed(lock));
742         LINVRNT(lock->cll_guarder == cfs_current());
743         LINVRNT(lock->cll_depth > 0);
744
745         counters = cl_lock_counters(env, lock);
746         LINVRNT(counters->ctc_nr_locks_locked > 0);
747
748         cl_lock_trace(D_TRACE, env, "put mutex", lock);
749         lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
750         counters->ctc_nr_locks_locked--;
751         if (--lock->cll_depth == 0) {
752                 lock->cll_guarder = NULL;
753                 cfs_mutex_unlock(&lock->cll_guard);
754         }
755 }
756 EXPORT_SYMBOL(cl_lock_mutex_put);
757
758 /**
759  * Returns true iff lock's mutex is owned by the current thread.
760  */
761 int cl_lock_is_mutexed(struct cl_lock *lock)
762 {
763         return lock->cll_guarder == cfs_current();
764 }
765 EXPORT_SYMBOL(cl_lock_is_mutexed);
766
767 /**
768  * Returns number of cl_lock mutices held by the current thread (environment).
769  */
770 int cl_lock_nr_mutexed(const struct lu_env *env)
771 {
772         struct cl_thread_info *info;
773         int i;
774         int locked;
775
776         /*
777          * NOTE: if summation across all nesting levels (currently 2) proves
778          *       too expensive, a summary counter can be added to
779          *       struct cl_thread_info.
780          */
781         info = cl_env_info(env);
782         for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
783                 locked += info->clt_counters[i].ctc_nr_locks_locked;
784         return locked;
785 }
786 EXPORT_SYMBOL(cl_lock_nr_mutexed);
787
788 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
789 {
790         LINVRNT(cl_lock_is_mutexed(lock));
791         LINVRNT(cl_lock_invariant(env, lock));
792         ENTRY;
793         if (!(lock->cll_flags & CLF_CANCELLED)) {
794                 const struct cl_lock_slice *slice;
795
796                 lock->cll_flags |= CLF_CANCELLED;
797                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
798                                                 cls_linkage) {
799                         if (slice->cls_ops->clo_cancel != NULL)
800                                 slice->cls_ops->clo_cancel(env, slice);
801                 }
802         }
803         EXIT;
804 }
805
806 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
807 {
808         struct cl_object_header    *head;
809         const struct cl_lock_slice *slice;
810
811         LINVRNT(cl_lock_is_mutexed(lock));
812         LINVRNT(cl_lock_invariant(env, lock));
813
814         ENTRY;
815         if (lock->cll_state < CLS_FREEING) {
816                 LASSERT(lock->cll_state != CLS_INTRANSIT);
817                 cl_lock_state_set(env, lock, CLS_FREEING);
818
819                 head = cl_object_header(lock->cll_descr.cld_obj);
820
821                 cfs_spin_lock(&head->coh_lock_guard);
822                 cfs_list_del_init(&lock->cll_linkage);
823
824                 cfs_spin_unlock(&head->coh_lock_guard);
825                 /*
826                  * From now on, no new references to this lock can be acquired
827                  * by cl_lock_lookup().
828                  */
829                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
830                                                 cls_linkage) {
831                         if (slice->cls_ops->clo_delete != NULL)
832                                 slice->cls_ops->clo_delete(env, slice);
833                 }
834                 /*
835                  * From now on, no new references to this lock can be acquired
836                  * by layer-specific means (like a pointer from struct
837                  * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
838                  * lov).
839                  *
840                  * Lock will be finally freed in cl_lock_put() when last of
841                  * existing references goes away.
842                  */
843         }
844         EXIT;
845 }
846
847 /**
848  * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
849  * top-lock (nesting == 0) accounts for this modification in the per-thread
850  * debugging counters. Sub-lock holds can be released by a thread different
851  * from one that acquired it.
852  */
853 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
854                              int delta)
855 {
856         struct cl_thread_counters *counters;
857         enum clt_nesting_level     nesting;
858
859         lock->cll_holds += delta;
860         nesting = cl_lock_nesting(lock);
861         if (nesting == CNL_TOP) {
862                 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
863                 counters->ctc_nr_held += delta;
864                 LASSERT(counters->ctc_nr_held >= 0);
865         }
866 }
867
868 /**
869  * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
870  * cl_lock_hold_mod() for the explanation of the debugging code.
871  */
872 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
873                              int delta)
874 {
875         struct cl_thread_counters *counters;
876         enum clt_nesting_level     nesting;
877
878         lock->cll_users += delta;
879         nesting = cl_lock_nesting(lock);
880         if (nesting == CNL_TOP) {
881                 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
882                 counters->ctc_nr_used += delta;
883                 LASSERT(counters->ctc_nr_used >= 0);
884         }
885 }
886
887 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
888                                  const char *scope, const void *source)
889 {
890         LINVRNT(cl_lock_is_mutexed(lock));
891         LINVRNT(cl_lock_invariant(env, lock));
892         LASSERT(lock->cll_holds > 0);
893
894         ENTRY;
895         cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
896         lu_ref_del(&lock->cll_holders, scope, source);
897         cl_lock_hold_mod(env, lock, -1);
898         if (lock->cll_holds == 0) {
899                 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
900                 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
901                     lock->cll_descr.cld_mode == CLM_GROUP ||
902                     lock->cll_state != CLS_CACHED)
903                         /*
904                          * If lock is still phantom or grouplock when user is
905                          * done with it---destroy the lock.
906                          */
907                         lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
908                 if (lock->cll_flags & CLF_CANCELPEND) {
909                         lock->cll_flags &= ~CLF_CANCELPEND;
910                         cl_lock_cancel0(env, lock);
911                 }
912                 if (lock->cll_flags & CLF_DOOMED) {
913                         /* no longer doomed: it's dead... Jim. */
914                         lock->cll_flags &= ~CLF_DOOMED;
915                         cl_lock_delete0(env, lock);
916                 }
917         }
918         EXIT;
919 }
920
921 /**
922  * Waits until lock state is changed.
923  *
924  * This function is called with cl_lock mutex locked, atomically releases
925  * mutex and goes to sleep, waiting for a lock state change (signaled by
926  * cl_lock_signal()), and re-acquires the mutex before return.
927  *
928  * This function is used to wait until lock state machine makes some progress
929  * and to emulate synchronous operations on top of asynchronous lock
930  * interface.
931  *
932  * \retval -EINTR wait was interrupted
933  *
934  * \retval 0 wait wasn't interrupted
935  *
936  * \pre cl_lock_is_mutexed(lock)
937  *
938  * \see cl_lock_signal()
939  */
940 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
941 {
942         cfs_waitlink_t waiter;
943         cfs_sigset_t blocked;
944         int result;
945
946         ENTRY;
947         LINVRNT(cl_lock_is_mutexed(lock));
948         LINVRNT(cl_lock_invariant(env, lock));
949         LASSERT(lock->cll_depth == 1);
950         LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
951
952         cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
953         result = lock->cll_error;
954         if (result == 0) {
955                 /* To avoid being interrupted by the 'non-fatal' signals
956                  * (SIGCHLD, for instance), we'd block them temporarily.
957                  * LU-305 */
958                 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
959
960                 cfs_waitlink_init(&waiter);
961                 cfs_waitq_add(&lock->cll_wq, &waiter);
962                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
963                 cl_lock_mutex_put(env, lock);
964
965                 LASSERT(cl_lock_nr_mutexed(env) == 0);
966
967                 result = -EINTR;
968                 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
969                         cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
970                         if (!cfs_signal_pending())
971                                 result = 0;
972                 }
973
974                 cl_lock_mutex_get(env, lock);
975                 cfs_set_current_state(CFS_TASK_RUNNING);
976                 cfs_waitq_del(&lock->cll_wq, &waiter);
977
978                 /* Restore old blocked signals */
979                 cfs_restore_sigs(blocked);
980         }
981         RETURN(result);
982 }
983 EXPORT_SYMBOL(cl_lock_state_wait);
984
985 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
986                                  enum cl_lock_state state)
987 {
988         const struct cl_lock_slice *slice;
989
990         ENTRY;
991         LINVRNT(cl_lock_is_mutexed(lock));
992         LINVRNT(cl_lock_invariant(env, lock));
993
994         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
995                 if (slice->cls_ops->clo_state != NULL)
996                         slice->cls_ops->clo_state(env, slice, state);
997         cfs_waitq_broadcast(&lock->cll_wq);
998         EXIT;
999 }
1000
1001 /**
1002  * Notifies waiters that lock state changed.
1003  *
1004  * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1005  * layers about state change by calling cl_lock_operations::clo_state()
1006  * top-to-bottom.
1007  */
1008 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1009 {
1010         ENTRY;
1011         cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1012         cl_lock_state_signal(env, lock, lock->cll_state);
1013         EXIT;
1014 }
1015 EXPORT_SYMBOL(cl_lock_signal);
1016
1017 /**
1018  * Changes lock state.
1019  *
1020  * This function is invoked to notify layers that lock state changed, possible
1021  * as a result of an asynchronous event such as call-back reception.
1022  *
1023  * \post lock->cll_state == state
1024  *
1025  * \see cl_lock_operations::clo_state()
1026  */
1027 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1028                        enum cl_lock_state state)
1029 {
1030         struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1031
1032         ENTRY;
1033         LASSERT(lock->cll_state <= state ||
1034                 (lock->cll_state == CLS_CACHED &&
1035                  (state == CLS_HELD || /* lock found in cache */
1036                   state == CLS_NEW  ||   /* sub-lock canceled */
1037                   state == CLS_INTRANSIT)) ||
1038                 /* lock is in transit state */
1039                 lock->cll_state == CLS_INTRANSIT);
1040
1041         if (lock->cll_state != state) {
1042                 cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
1043                 cfs_atomic_inc(&site->cs_locks_state[state]);
1044
1045                 cl_lock_state_signal(env, lock, state);
1046                 lock->cll_state = state;
1047         }
1048         EXIT;
1049 }
1050 EXPORT_SYMBOL(cl_lock_state_set);
1051
1052 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1053 {
1054         const struct cl_lock_slice *slice;
1055         int result;
1056
1057         do {
1058                 result = 0;
1059
1060                 LINVRNT(cl_lock_is_mutexed(lock));
1061                 LINVRNT(cl_lock_invariant(env, lock));
1062                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1063
1064                 result = -ENOSYS;
1065                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1066                                                 cls_linkage) {
1067                         if (slice->cls_ops->clo_unuse != NULL) {
1068                                 result = slice->cls_ops->clo_unuse(env, slice);
1069                                 if (result != 0)
1070                                         break;
1071                         }
1072                 }
1073                 LASSERT(result != -ENOSYS);
1074         } while (result == CLO_REPEAT);
1075
1076         return result;
1077 }
1078
1079 /**
1080  * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1081  * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1082  * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1083  *  use process atomic
1084  */
1085 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1086 {
1087         const struct cl_lock_slice *slice;
1088         int result;
1089         enum cl_lock_state state;
1090
1091         ENTRY;
1092         cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1093
1094         LASSERT(lock->cll_state == CLS_CACHED);
1095         if (lock->cll_error)
1096                 RETURN(lock->cll_error);
1097
1098         result = -ENOSYS;
1099         state = cl_lock_intransit(env, lock);
1100         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1101                 if (slice->cls_ops->clo_use != NULL) {
1102                         result = slice->cls_ops->clo_use(env, slice);
1103                         if (result != 0)
1104                                 break;
1105                 }
1106         }
1107         LASSERT(result != -ENOSYS);
1108
1109         LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1110                  lock->cll_state);
1111
1112         if (result == 0) {
1113                 state = CLS_HELD;
1114         } else {
1115                 if (result == -ESTALE) {
1116                         /*
1117                          * ESTALE means sublock being cancelled
1118                          * at this time, and set lock state to
1119                          * be NEW here and ask the caller to repeat.
1120                          */
1121                         state = CLS_NEW;
1122                         result = CLO_REPEAT;
1123                 }
1124
1125                 /* @atomic means back-off-on-failure. */
1126                 if (atomic) {
1127                         int rc;
1128                         rc = cl_unuse_try_internal(env, lock);
1129                         /* Vet the results. */
1130                         if (rc < 0 && result > 0)
1131                                 result = rc;
1132                 }
1133
1134         }
1135         cl_lock_extransit(env, lock, state);
1136         RETURN(result);
1137 }
1138 EXPORT_SYMBOL(cl_use_try);
1139
1140 /**
1141  * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1142  * top-to-bottom.
1143  */
1144 static int cl_enqueue_kick(const struct lu_env *env,
1145                            struct cl_lock *lock,
1146                            struct cl_io *io, __u32 flags)
1147 {
1148         int result;
1149         const struct cl_lock_slice *slice;
1150
1151         ENTRY;
1152         result = -ENOSYS;
1153         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1154                 if (slice->cls_ops->clo_enqueue != NULL) {
1155                         result = slice->cls_ops->clo_enqueue(env,
1156                                                              slice, io, flags);
1157                         if (result != 0)
1158                                 break;
1159                 }
1160         }
1161         LASSERT(result != -ENOSYS);
1162         RETURN(result);
1163 }
1164
1165 /**
1166  * Tries to enqueue a lock.
1167  *
1168  * This function is called repeatedly by cl_enqueue() until either lock is
1169  * enqueued, or error occurs. This function does not block waiting for
1170  * networking communication to complete.
1171  *
1172  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1173  *                         lock->cll_state == CLS_HELD)
1174  *
1175  * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1176  * \see cl_lock_state::CLS_ENQUEUED
1177  */
1178 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1179                    struct cl_io *io, __u32 flags)
1180 {
1181         int result;
1182
1183         ENTRY;
1184         cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1185         do {
1186                 LINVRNT(cl_lock_is_mutexed(lock));
1187
1188                 result = lock->cll_error;
1189                 if (result != 0)
1190                         break;
1191
1192                 switch (lock->cll_state) {
1193                 case CLS_NEW:
1194                         cl_lock_state_set(env, lock, CLS_QUEUING);
1195                         /* fall-through */
1196                 case CLS_QUEUING:
1197                         /* kick layers. */
1198                         result = cl_enqueue_kick(env, lock, io, flags);
1199                         if (result == 0)
1200                                 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1201                         break;
1202                 case CLS_INTRANSIT:
1203                         LASSERT(cl_lock_is_intransit(lock));
1204                         result = CLO_WAIT;
1205                         break;
1206                 case CLS_CACHED:
1207                         /* yank lock from the cache. */
1208                         result = cl_use_try(env, lock, 0);
1209                         break;
1210                 case CLS_ENQUEUED:
1211                 case CLS_HELD:
1212                         result = 0;
1213                         break;
1214                 default:
1215                 case CLS_FREEING:
1216                         /*
1217                          * impossible, only held locks with increased
1218                          * ->cll_holds can be enqueued, and they cannot be
1219                          * freed.
1220                          */
1221                         LBUG();
1222                 }
1223         } while (result == CLO_REPEAT);
1224         RETURN(result);
1225 }
1226 EXPORT_SYMBOL(cl_enqueue_try);
1227
1228 /**
1229  * Cancel the conflicting lock found during previous enqueue.
1230  *
1231  * \retval 0 conflicting lock has been canceled.
1232  * \retval -ve error code.
1233  */
1234 int cl_lock_enqueue_wait(const struct lu_env *env,
1235                          struct cl_lock *lock,
1236                          int keep_mutex)
1237 {
1238         struct cl_lock  *conflict;
1239         int              rc = 0;
1240         ENTRY;
1241
1242         LASSERT(cl_lock_is_mutexed(lock));
1243         LASSERT(lock->cll_state == CLS_QUEUING);
1244         LASSERT(lock->cll_conflict != NULL);
1245
1246         conflict = lock->cll_conflict;
1247         lock->cll_conflict = NULL;
1248
1249         cl_lock_mutex_put(env, lock);
1250         LASSERT(cl_lock_nr_mutexed(env) == 0);
1251
1252         cl_lock_mutex_get(env, conflict);
1253         cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1254         cl_lock_cancel(env, conflict);
1255         cl_lock_delete(env, conflict);
1256
1257         while (conflict->cll_state != CLS_FREEING) {
1258                 rc = cl_lock_state_wait(env, conflict);
1259                 if (rc != 0)
1260                         break;
1261         }
1262         cl_lock_mutex_put(env, conflict);
1263         lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1264         cl_lock_put(env, conflict);
1265
1266         if (keep_mutex)
1267                 cl_lock_mutex_get(env, lock);
1268
1269         LASSERT(rc <= 0);
1270         RETURN(rc);
1271 }
1272 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1273
1274 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1275                              struct cl_io *io, __u32 enqflags)
1276 {
1277         int result;
1278
1279         ENTRY;
1280
1281         LINVRNT(cl_lock_is_mutexed(lock));
1282         LINVRNT(cl_lock_invariant(env, lock));
1283         LASSERT(lock->cll_holds > 0);
1284
1285         cl_lock_user_add(env, lock);
1286         do {
1287                 result = cl_enqueue_try(env, lock, io, enqflags);
1288                 if (result == CLO_WAIT) {
1289                         if (lock->cll_conflict != NULL)
1290                                 result = cl_lock_enqueue_wait(env, lock, 1);
1291                         else
1292                                 result = cl_lock_state_wait(env, lock);
1293                         if (result == 0)
1294                                 continue;
1295                 }
1296                 break;
1297         } while (1);
1298         if (result != 0)
1299                 cl_unuse_try(env, lock);
1300         LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1301                      lock->cll_state == CLS_ENQUEUED ||
1302                      lock->cll_state == CLS_HELD));
1303         RETURN(result);
1304 }
1305
1306 /**
1307  * Enqueues a lock.
1308  *
1309  * \pre current thread or io owns a hold on lock.
1310  *
1311  * \post ergo(result == 0, lock->users increased)
1312  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1313  *                         lock->cll_state == CLS_HELD)
1314  */
1315 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1316                struct cl_io *io, __u32 enqflags)
1317 {
1318         int result;
1319
1320         ENTRY;
1321
1322         cl_lock_lockdep_acquire(env, lock, enqflags);
1323         cl_lock_mutex_get(env, lock);
1324         result = cl_enqueue_locked(env, lock, io, enqflags);
1325         cl_lock_mutex_put(env, lock);
1326         if (result != 0)
1327                 cl_lock_lockdep_release(env, lock);
1328         LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1329                      lock->cll_state == CLS_HELD));
1330         RETURN(result);
1331 }
1332 EXPORT_SYMBOL(cl_enqueue);
1333
1334 /**
1335  * Tries to unlock a lock.
1336  *
1337  * This function is called to release underlying resource:
1338  * 1. for top lock, the resource is sublocks it held;
1339  * 2. for sublock, the resource is the reference to dlmlock.
1340  *
1341  * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1342  *
1343  * \see cl_unuse() cl_lock_operations::clo_unuse()
1344  * \see cl_lock_state::CLS_CACHED
1345  */
1346 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1347 {
1348         int                         result;
1349         enum cl_lock_state          state = CLS_NEW;
1350
1351         ENTRY;
1352         cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1353
1354         if (lock->cll_users > 1) {
1355                 cl_lock_user_del(env, lock);
1356                 RETURN(0);
1357         }
1358
1359         /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1360          * underlying resources. */
1361         if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1362                 cl_lock_user_del(env, lock);
1363                 RETURN(0);
1364         }
1365
1366         /*
1367          * New lock users (->cll_users) are not protecting unlocking
1368          * from proceeding. From this point, lock eventually reaches
1369          * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1370          * CLS_FREEING.
1371          */
1372         state = cl_lock_intransit(env, lock);
1373
1374         result = cl_unuse_try_internal(env, lock);
1375         LASSERT(lock->cll_state == CLS_INTRANSIT);
1376         LASSERT(result != CLO_WAIT);
1377         cl_lock_user_del(env, lock);
1378         if (result == 0 || result == -ESTALE) {
1379                 /*
1380                  * Return lock back to the cache. This is the only
1381                  * place where lock is moved into CLS_CACHED state.
1382                  *
1383                  * If one of ->clo_unuse() methods returned -ESTALE, lock
1384                  * cannot be placed into cache and has to be
1385                  * re-initialized. This happens e.g., when a sub-lock was
1386                  * canceled while unlocking was in progress.
1387                  */
1388                 if (state == CLS_HELD && result == 0)
1389                         state = CLS_CACHED;
1390                 else
1391                         state = CLS_NEW;
1392                 cl_lock_extransit(env, lock, state);
1393
1394                 /*
1395                  * Hide -ESTALE error.
1396                  * If the lock is a glimpse lock, and it has multiple
1397                  * stripes. Assuming that one of its sublock returned -ENAVAIL,
1398                  * and other sublocks are matched write locks. In this case,
1399                  * we can't set this lock to error because otherwise some of
1400                  * its sublocks may not be canceled. This causes some dirty
1401                  * pages won't be written to OSTs. -jay
1402                  */
1403                 result = 0;
1404         } else {
1405                 CERROR("result = %d, this is unlikely!\n", result);
1406                 state = CLS_NEW;
1407                 cl_lock_extransit(env, lock, state);
1408         }
1409         RETURN(result ?: lock->cll_error);
1410 }
1411 EXPORT_SYMBOL(cl_unuse_try);
1412
1413 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1414 {
1415         int result;
1416         ENTRY;
1417
1418         result = cl_unuse_try(env, lock);
1419         if (result)
1420                 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1421
1422         EXIT;
1423 }
1424
1425 /**
1426  * Unlocks a lock.
1427  */
1428 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1429 {
1430         ENTRY;
1431         cl_lock_mutex_get(env, lock);
1432         cl_unuse_locked(env, lock);
1433         cl_lock_mutex_put(env, lock);
1434         cl_lock_lockdep_release(env, lock);
1435         EXIT;
1436 }
1437 EXPORT_SYMBOL(cl_unuse);
1438
1439 /**
1440  * Tries to wait for a lock.
1441  *
1442  * This function is called repeatedly by cl_wait() until either lock is
1443  * granted, or error occurs. This function does not block waiting for network
1444  * communication to complete.
1445  *
1446  * \see cl_wait() cl_lock_operations::clo_wait()
1447  * \see cl_lock_state::CLS_HELD
1448  */
1449 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1450 {
1451         const struct cl_lock_slice *slice;
1452         int                         result;
1453
1454         ENTRY;
1455         cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1456         do {
1457                 LINVRNT(cl_lock_is_mutexed(lock));
1458                 LINVRNT(cl_lock_invariant(env, lock));
1459                 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1460                         lock->cll_state == CLS_HELD ||
1461                         lock->cll_state == CLS_INTRANSIT);
1462                 LASSERT(lock->cll_users > 0);
1463                 LASSERT(lock->cll_holds > 0);
1464
1465                 result = lock->cll_error;
1466                 if (result != 0)
1467                         break;
1468
1469                 if (cl_lock_is_intransit(lock)) {
1470                         result = CLO_WAIT;
1471                         break;
1472                 }
1473
1474                 if (lock->cll_state == CLS_HELD)
1475                         /* nothing to do */
1476                         break;
1477
1478                 result = -ENOSYS;
1479                 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1480                         if (slice->cls_ops->clo_wait != NULL) {
1481                                 result = slice->cls_ops->clo_wait(env, slice);
1482                                 if (result != 0)
1483                                         break;
1484                         }
1485                 }
1486                 LASSERT(result != -ENOSYS);
1487                 if (result == 0) {
1488                         LASSERT(lock->cll_state != CLS_INTRANSIT);
1489                         cl_lock_state_set(env, lock, CLS_HELD);
1490                 }
1491         } while (result == CLO_REPEAT);
1492         RETURN(result);
1493 }
1494 EXPORT_SYMBOL(cl_wait_try);
1495
1496 /**
1497  * Waits until enqueued lock is granted.
1498  *
1499  * \pre current thread or io owns a hold on the lock
1500  * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1501  *                        lock->cll_state == CLS_HELD)
1502  *
1503  * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1504  */
1505 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1506 {
1507         int result;
1508
1509         ENTRY;
1510         cl_lock_mutex_get(env, lock);
1511
1512         LINVRNT(cl_lock_invariant(env, lock));
1513         LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1514                  "Wrong state %d \n", lock->cll_state);
1515         LASSERT(lock->cll_holds > 0);
1516
1517         do {
1518                 result = cl_wait_try(env, lock);
1519                 if (result == CLO_WAIT) {
1520                         result = cl_lock_state_wait(env, lock);
1521                         if (result == 0)
1522                                 continue;
1523                 }
1524                 break;
1525         } while (1);
1526         if (result < 0) {
1527                 cl_unuse_try(env, lock);
1528                 cl_lock_lockdep_release(env, lock);
1529         }
1530         cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1531         cl_lock_mutex_put(env, lock);
1532         LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1533         RETURN(result);
1534 }
1535 EXPORT_SYMBOL(cl_wait);
1536
1537 /**
1538  * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1539  * value.
1540  */
1541 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1542 {
1543         const struct cl_lock_slice *slice;
1544         unsigned long pound;
1545         unsigned long ounce;
1546
1547         ENTRY;
1548         LINVRNT(cl_lock_is_mutexed(lock));
1549         LINVRNT(cl_lock_invariant(env, lock));
1550
1551         pound = 0;
1552         cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1553                 if (slice->cls_ops->clo_weigh != NULL) {
1554                         ounce = slice->cls_ops->clo_weigh(env, slice);
1555                         pound += ounce;
1556                         if (pound < ounce) /* over-weight^Wflow */
1557                                 pound = ~0UL;
1558                 }
1559         }
1560         RETURN(pound);
1561 }
1562 EXPORT_SYMBOL(cl_lock_weigh);
1563
1564 /**
1565  * Notifies layers that lock description changed.
1566  *
1567  * The server can grant client a lock different from one that was requested
1568  * (e.g., larger in extent). This method is called when actually granted lock
1569  * description becomes known to let layers to accommodate for changed lock
1570  * description.
1571  *
1572  * \see cl_lock_operations::clo_modify()
1573  */
1574 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1575                    const struct cl_lock_descr *desc)
1576 {
1577         const struct cl_lock_slice *slice;
1578         struct cl_object           *obj = lock->cll_descr.cld_obj;
1579         struct cl_object_header    *hdr = cl_object_header(obj);
1580         int result;
1581
1582         ENTRY;
1583         cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1584         /* don't allow object to change */
1585         LASSERT(obj == desc->cld_obj);
1586         LINVRNT(cl_lock_is_mutexed(lock));
1587         LINVRNT(cl_lock_invariant(env, lock));
1588
1589         cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1590                 if (slice->cls_ops->clo_modify != NULL) {
1591                         result = slice->cls_ops->clo_modify(env, slice, desc);
1592                         if (result != 0)
1593                                 RETURN(result);
1594                 }
1595         }
1596         CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1597                       PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1598         /*
1599          * Just replace description in place. Nothing more is needed for
1600          * now. If locks were indexed according to their extent and/or mode,
1601          * that index would have to be updated here.
1602          */
1603         cfs_spin_lock(&hdr->coh_lock_guard);
1604         lock->cll_descr = *desc;
1605         cfs_spin_unlock(&hdr->coh_lock_guard);
1606         RETURN(0);
1607 }
1608 EXPORT_SYMBOL(cl_lock_modify);
1609
1610 /**
1611  * Initializes lock closure with a given origin.
1612  *
1613  * \see cl_lock_closure
1614  */
1615 void cl_lock_closure_init(const struct lu_env *env,
1616                           struct cl_lock_closure *closure,
1617                           struct cl_lock *origin, int wait)
1618 {
1619         LINVRNT(cl_lock_is_mutexed(origin));
1620         LINVRNT(cl_lock_invariant(env, origin));
1621
1622         CFS_INIT_LIST_HEAD(&closure->clc_list);
1623         closure->clc_origin = origin;
1624         closure->clc_wait   = wait;
1625         closure->clc_nr     = 0;
1626 }
1627 EXPORT_SYMBOL(cl_lock_closure_init);
1628
1629 /**
1630  * Builds a closure of \a lock.
1631  *
1632  * Building of a closure consists of adding initial lock (\a lock) into it,
1633  * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1634  * methods might call cl_lock_closure_build() recursively again, adding more
1635  * locks to the closure, etc.
1636  *
1637  * \see cl_lock_closure
1638  */
1639 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1640                           struct cl_lock_closure *closure)
1641 {
1642         const struct cl_lock_slice *slice;
1643         int result;
1644
1645         ENTRY;
1646         LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1647         LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1648
1649         result = cl_lock_enclosure(env, lock, closure);
1650         if (result == 0) {
1651                 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1652                         if (slice->cls_ops->clo_closure != NULL) {
1653                                 result = slice->cls_ops->clo_closure(env, slice,
1654                                                                      closure);
1655                                 if (result != 0)
1656                                         break;
1657                         }
1658                 }
1659         }
1660         if (result != 0)
1661                 cl_lock_disclosure(env, closure);
1662         RETURN(result);
1663 }
1664 EXPORT_SYMBOL(cl_lock_closure_build);
1665
1666 /**
1667  * Adds new lock to a closure.
1668  *
1669  * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1670  * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1671  * until next try-lock is likely to succeed.
1672  */
1673 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1674                       struct cl_lock_closure *closure)
1675 {
1676         int result = 0;
1677         ENTRY;
1678         cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1679         if (!cl_lock_mutex_try(env, lock)) {
1680                 /*
1681                  * If lock->cll_inclosure is not empty, lock is already in
1682                  * this closure.
1683                  */
1684                 if (cfs_list_empty(&lock->cll_inclosure)) {
1685                         cl_lock_get_trust(lock);
1686                         lu_ref_add(&lock->cll_reference, "closure", closure);
1687                         cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1688                         closure->clc_nr++;
1689                 } else
1690                         cl_lock_mutex_put(env, lock);
1691                 result = 0;
1692         } else {
1693                 cl_lock_disclosure(env, closure);
1694                 if (closure->clc_wait) {
1695                         cl_lock_get_trust(lock);
1696                         lu_ref_add(&lock->cll_reference, "closure-w", closure);
1697                         cl_lock_mutex_put(env, closure->clc_origin);
1698
1699                         LASSERT(cl_lock_nr_mutexed(env) == 0);
1700                         cl_lock_mutex_get(env, lock);
1701                         cl_lock_mutex_put(env, lock);
1702
1703                         cl_lock_mutex_get(env, closure->clc_origin);
1704                         lu_ref_del(&lock->cll_reference, "closure-w", closure);
1705                         cl_lock_put(env, lock);
1706                 }
1707                 result = CLO_REPEAT;
1708         }
1709         RETURN(result);
1710 }
1711 EXPORT_SYMBOL(cl_lock_enclosure);
1712
1713 /** Releases mutices of enclosed locks. */
1714 void cl_lock_disclosure(const struct lu_env *env,
1715                         struct cl_lock_closure *closure)
1716 {
1717         struct cl_lock *scan;
1718         struct cl_lock *temp;
1719
1720         cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1721         cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1722                                      cll_inclosure){
1723                 cfs_list_del_init(&scan->cll_inclosure);
1724                 cl_lock_mutex_put(env, scan);
1725                 lu_ref_del(&scan->cll_reference, "closure", closure);
1726                 cl_lock_put(env, scan);
1727                 closure->clc_nr--;
1728         }
1729         LASSERT(closure->clc_nr == 0);
1730 }
1731 EXPORT_SYMBOL(cl_lock_disclosure);
1732
1733 /** Finalizes a closure. */
1734 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1735 {
1736         LASSERT(closure->clc_nr == 0);
1737         LASSERT(cfs_list_empty(&closure->clc_list));
1738 }
1739 EXPORT_SYMBOL(cl_lock_closure_fini);
1740
1741 /**
1742  * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1743  * destroyed, then destroy the lock. If there are holds on the lock, postpone
1744  * destruction until all holds are released. This is called when a decision is
1745  * made to destroy the lock in the future. E.g., when a blocking AST is
1746  * received on it, or fatal communication error happens.
1747  *
1748  * Caller must have a reference on this lock to prevent a situation, when
1749  * deleted lock lingers in memory for indefinite time, because nobody calls
1750  * cl_lock_put() to finish it.
1751  *
1752  * \pre atomic_read(&lock->cll_ref) > 0
1753  * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1754  *           cl_lock_nr_mutexed(env) == 1)
1755  *      [i.e., if a top-lock is deleted, mutices of no other locks can be
1756  *      held, as deletion of sub-locks might require releasing a top-lock
1757  *      mutex]
1758  *
1759  * \see cl_lock_operations::clo_delete()
1760  * \see cl_lock::cll_holds
1761  */
1762 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1763 {
1764         LINVRNT(cl_lock_is_mutexed(lock));
1765         LINVRNT(cl_lock_invariant(env, lock));
1766         LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1767                      cl_lock_nr_mutexed(env) == 1));
1768
1769         ENTRY;
1770         cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1771         if (lock->cll_holds == 0)
1772                 cl_lock_delete0(env, lock);
1773         else
1774                 lock->cll_flags |= CLF_DOOMED;
1775         EXIT;
1776 }
1777 EXPORT_SYMBOL(cl_lock_delete);
1778
1779 /**
1780  * Mark lock as irrecoverably failed, and mark it for destruction. This
1781  * happens when, e.g., server fails to grant a lock to us, or networking
1782  * time-out happens.
1783  *
1784  * \pre atomic_read(&lock->cll_ref) > 0
1785  *
1786  * \see clo_lock_delete()
1787  * \see cl_lock::cll_holds
1788  */
1789 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1790 {
1791         LINVRNT(cl_lock_is_mutexed(lock));
1792         LINVRNT(cl_lock_invariant(env, lock));
1793
1794         ENTRY;
1795         if (lock->cll_error == 0 && error != 0) {
1796                 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1797                 lock->cll_error = error;
1798                 cl_lock_signal(env, lock);
1799                 cl_lock_cancel(env, lock);
1800                 cl_lock_delete(env, lock);
1801         }
1802         EXIT;
1803 }
1804 EXPORT_SYMBOL(cl_lock_error);
1805
1806 /**
1807  * Cancels this lock. Notifies layers
1808  * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1809  * there are holds on the lock, postpone cancellation until
1810  * all holds are released.
1811  *
1812  * Cancellation notification is delivered to layers at most once.
1813  *
1814  * \see cl_lock_operations::clo_cancel()
1815  * \see cl_lock::cll_holds
1816  */
1817 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1818 {
1819         LINVRNT(cl_lock_is_mutexed(lock));
1820         LINVRNT(cl_lock_invariant(env, lock));
1821
1822         ENTRY;
1823         cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1824         if (lock->cll_holds == 0)
1825                 cl_lock_cancel0(env, lock);
1826         else
1827                 lock->cll_flags |= CLF_CANCELPEND;
1828         EXIT;
1829 }
1830 EXPORT_SYMBOL(cl_lock_cancel);
1831
1832 /**
1833  * Finds an existing lock covering given index and optionally different from a
1834  * given \a except lock.
1835  */
1836 struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1837                                  struct cl_object *obj, pgoff_t index,
1838                                  struct cl_lock *except,
1839                                  int pending, int canceld)
1840 {
1841         struct cl_object_header *head;
1842         struct cl_lock          *scan;
1843         struct cl_lock          *lock;
1844         struct cl_lock_descr    *need;
1845
1846         ENTRY;
1847
1848         head = cl_object_header(obj);
1849         need = &cl_env_info(env)->clt_descr;
1850         lock = NULL;
1851
1852         need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1853                                     * not PHANTOM */
1854         need->cld_start = need->cld_end = index;
1855         need->cld_enq_flags = 0;
1856
1857         cfs_spin_lock(&head->coh_lock_guard);
1858         /* It is fine to match any group lock since there could be only one
1859          * with a uniq gid and it conflicts with all other lock modes too */
1860         cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1861                 if (scan != except &&
1862                     (scan->cll_descr.cld_mode == CLM_GROUP ||
1863                     cl_lock_ext_match(&scan->cll_descr, need)) &&
1864                     scan->cll_state >= CLS_HELD &&
1865                     scan->cll_state < CLS_FREEING &&
1866                     /*
1867                      * This check is racy as the lock can be canceled right
1868                      * after it is done, but this is fine, because page exists
1869                      * already.
1870                      */
1871                     (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1872                     (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1873                         /* Don't increase cs_hit here since this
1874                          * is just a helper function. */
1875                         cl_lock_get_trust(scan);
1876                         lock = scan;
1877                         break;
1878                 }
1879         }
1880         cfs_spin_unlock(&head->coh_lock_guard);
1881         RETURN(lock);
1882 }
1883 EXPORT_SYMBOL(cl_lock_at_pgoff);
1884
1885 /**
1886  * Calculate the page offset at the layer of @lock.
1887  * At the time of this writing, @page is top page and @lock is sub lock.
1888  */
1889 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1890 {
1891         struct lu_device_type *dtype;
1892         const struct cl_page_slice *slice;
1893
1894         dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1895         slice = cl_page_at(page, dtype);
1896         LASSERT(slice != NULL);
1897         return slice->cpl_page->cp_index;
1898 }
1899
1900 /**
1901  * Check if page @page is covered by an extra lock or discard it.
1902  */
1903 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1904                                 struct cl_page *page, void *cbdata)
1905 {
1906         struct cl_thread_info *info = cl_env_info(env);
1907         struct cl_lock *lock = cbdata;
1908         pgoff_t index = pgoff_at_lock(page, lock);
1909
1910         if (index >= info->clt_fn_index) {
1911                 struct cl_lock *tmp;
1912
1913                 /* refresh non-overlapped index */
1914                 tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj, page, lock,
1915                                       1, 0);
1916                 if (tmp != NULL) {
1917                         /* Cache the first-non-overlapped index so as to skip
1918                          * all pages within [index, clt_fn_index). This
1919                          * is safe because if tmp lock is canceled, it will
1920                          * discard these pages. */
1921                         info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1922                         if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1923                                 info->clt_fn_index = CL_PAGE_EOF;
1924                         cl_lock_put(env, tmp);
1925                 } else if (cl_page_own(env, io, page) == 0) {
1926                         /* discard the page */
1927                         cl_page_unmap(env, io, page);
1928                         cl_page_discard(env, io, page);
1929                         cl_page_disown(env, io, page);
1930                 } else {
1931                         LASSERT(page->cp_state == CPS_FREEING);
1932                 }
1933         }
1934
1935         info->clt_next_index = index + 1;
1936         return CLP_GANG_OKAY;
1937 }
1938
1939 static int discard_cb(const struct lu_env *env, struct cl_io *io,
1940                       struct cl_page *page, void *cbdata)
1941 {
1942         struct cl_thread_info *info = cl_env_info(env);
1943         struct cl_lock *lock   = cbdata;
1944
1945         LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
1946         KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1947                       !PageWriteback(cl_page_vmpage(env, page))));
1948         KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1949                       !PageDirty(cl_page_vmpage(env, page))));
1950
1951         info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1952         if (cl_page_own(env, io, page) == 0) {
1953                 /* discard the page */
1954                 cl_page_unmap(env, io, page);
1955                 cl_page_discard(env, io, page);
1956                 cl_page_disown(env, io, page);
1957         } else {
1958                 LASSERT(page->cp_state == CPS_FREEING);
1959         }
1960
1961         return CLP_GANG_OKAY;
1962 }
1963
1964 /**
1965  * Discard pages protected by the given lock. This function traverses radix
1966  * tree to find all covering pages and discard them. If a page is being covered
1967  * by other locks, it should remain in cache.
1968  *
1969  * If error happens on any step, the process continues anyway (the reasoning
1970  * behind this being that lock cancellation cannot be delayed indefinitely).
1971  */
1972 int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
1973 {
1974         struct cl_thread_info *info  = cl_env_info(env);
1975         struct cl_io          *io    = &info->clt_io;
1976         struct cl_lock_descr  *descr = &lock->cll_descr;
1977         cl_page_gang_cb_t      cb;
1978         int res;
1979         int result;
1980
1981         LINVRNT(cl_lock_invariant(env, lock));
1982         ENTRY;
1983
1984         io->ci_obj = cl_object_top(descr->cld_obj);
1985         io->ci_ignore_layout = 1;
1986         result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1987         if (result != 0)
1988                 GOTO(out, result);
1989
1990         cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
1991         info->clt_fn_index = info->clt_next_index = descr->cld_start;
1992         do {
1993                 res = cl_page_gang_lookup(env, descr->cld_obj, io,
1994                                           info->clt_next_index, descr->cld_end,
1995                                           cb, (void *)lock);
1996                 if (info->clt_next_index > descr->cld_end)
1997                         break;
1998
1999                 if (res == CLP_GANG_RESCHED)
2000                         cfs_cond_resched();
2001         } while (res != CLP_GANG_OKAY);
2002 out:
2003         cl_io_fini(env, io);
2004         RETURN(result);
2005 }
2006 EXPORT_SYMBOL(cl_lock_discard_pages);
2007
2008 /**
2009  * Eliminate all locks for a given object.
2010  *
2011  * Caller has to guarantee that no lock is in active use.
2012  *
2013  * \param cancel when this is set, cl_locks_prune() cancels locks before
2014  *               destroying.
2015  */
2016 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
2017 {
2018         struct cl_object_header *head;
2019         struct cl_lock          *lock;
2020
2021         ENTRY;
2022         head = cl_object_header(obj);
2023         /*
2024          * If locks are destroyed without cancellation, all pages must be
2025          * already destroyed (as otherwise they will be left unprotected).
2026          */
2027         LASSERT(ergo(!cancel,
2028                      head->coh_tree.rnode == NULL && head->coh_pages == 0));
2029
2030         cfs_spin_lock(&head->coh_lock_guard);
2031         while (!cfs_list_empty(&head->coh_locks)) {
2032                 lock = container_of(head->coh_locks.next,
2033                                     struct cl_lock, cll_linkage);
2034                 cl_lock_get_trust(lock);
2035                 cfs_spin_unlock(&head->coh_lock_guard);
2036                 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2037
2038 again:
2039                 cl_lock_mutex_get(env, lock);
2040                 if (lock->cll_state < CLS_FREEING) {
2041                         LASSERT(lock->cll_holds == 0);
2042                         LASSERT(lock->cll_users <= 1);
2043                         if (unlikely(lock->cll_users == 1)) {
2044                                 struct l_wait_info lwi = { 0 };
2045
2046                                 cl_lock_mutex_put(env, lock);
2047                                 l_wait_event(lock->cll_wq,
2048                                              lock->cll_users == 0,
2049                                              &lwi);
2050                                 goto again;
2051                         }
2052
2053                         if (cancel)
2054                                 cl_lock_cancel(env, lock);
2055                         cl_lock_delete(env, lock);
2056                 }
2057                 cl_lock_mutex_put(env, lock);
2058                 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2059                 cl_lock_put(env, lock);
2060                 cfs_spin_lock(&head->coh_lock_guard);
2061         }
2062         cfs_spin_unlock(&head->coh_lock_guard);
2063         EXIT;
2064 }
2065 EXPORT_SYMBOL(cl_locks_prune);
2066
2067 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2068                                           const struct cl_io *io,
2069                                           const struct cl_lock_descr *need,
2070                                           const char *scope, const void *source)
2071 {
2072         struct cl_lock *lock;
2073
2074         ENTRY;
2075
2076         while (1) {
2077                 lock = cl_lock_find(env, io, need);
2078                 if (IS_ERR(lock))
2079                         break;
2080                 cl_lock_mutex_get(env, lock);
2081                 if (lock->cll_state < CLS_FREEING &&
2082                     !(lock->cll_flags & CLF_CANCELLED)) {
2083                         cl_lock_hold_mod(env, lock, +1);
2084                         lu_ref_add(&lock->cll_holders, scope, source);
2085                         lu_ref_add(&lock->cll_reference, scope, source);
2086                         break;
2087                 }
2088                 cl_lock_mutex_put(env, lock);
2089                 cl_lock_put(env, lock);
2090         }
2091         RETURN(lock);
2092 }
2093
2094 /**
2095  * Returns a lock matching \a need description with a reference and a hold on
2096  * it.
2097  *
2098  * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2099  * guarantees that lock is not in the CLS_FREEING state on return.
2100  */
2101 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2102                              const struct cl_lock_descr *need,
2103                              const char *scope, const void *source)
2104 {
2105         struct cl_lock *lock;
2106
2107         ENTRY;
2108
2109         lock = cl_lock_hold_mutex(env, io, need, scope, source);
2110         if (!IS_ERR(lock))
2111                 cl_lock_mutex_put(env, lock);
2112         RETURN(lock);
2113 }
2114 EXPORT_SYMBOL(cl_lock_hold);
2115
2116 /**
2117  * Main high-level entry point of cl_lock interface that finds existing or
2118  * enqueues new lock matching given description.
2119  */
2120 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2121                                 const struct cl_lock_descr *need,
2122                                 const char *scope, const void *source)
2123 {
2124         struct cl_lock       *lock;
2125         int                   rc;
2126         __u32                 enqflags = need->cld_enq_flags;
2127
2128         ENTRY;
2129         do {
2130                 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2131                 if (IS_ERR(lock))
2132                         break;
2133
2134                 rc = cl_enqueue_locked(env, lock, io, enqflags);
2135                 if (rc == 0) {
2136                         if (cl_lock_fits_into(env, lock, need, io)) {
2137                                 if (!(enqflags & CEF_AGL)) {
2138                                         cl_lock_mutex_put(env, lock);
2139                                         cl_lock_lockdep_acquire(env, lock,
2140                                                                 enqflags);
2141                                         break;
2142                                 }
2143                                 rc = 1;
2144                         }
2145                         cl_unuse_locked(env, lock);
2146                 }
2147                 cl_lock_trace(D_DLMTRACE, env,
2148                               rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2149                 cl_lock_hold_release(env, lock, scope, source);
2150                 cl_lock_mutex_put(env, lock);
2151                 lu_ref_del(&lock->cll_reference, scope, source);
2152                 cl_lock_put(env, lock);
2153                 if (rc > 0) {
2154                         LASSERT(enqflags & CEF_AGL);
2155                         lock = NULL;
2156                 } else if (rc != 0) {
2157                         lock = ERR_PTR(rc);
2158                 }
2159         } while (rc == 0);
2160         RETURN(lock);
2161 }
2162 EXPORT_SYMBOL(cl_lock_request);
2163
2164 /**
2165  * Adds a hold to a known lock.
2166  */
2167 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2168                       const char *scope, const void *source)
2169 {
2170         LINVRNT(cl_lock_is_mutexed(lock));
2171         LINVRNT(cl_lock_invariant(env, lock));
2172         LASSERT(lock->cll_state != CLS_FREEING);
2173
2174         ENTRY;
2175         cl_lock_hold_mod(env, lock, +1);
2176         cl_lock_get(lock);
2177         lu_ref_add(&lock->cll_holders, scope, source);
2178         lu_ref_add(&lock->cll_reference, scope, source);
2179         EXIT;
2180 }
2181 EXPORT_SYMBOL(cl_lock_hold_add);
2182
2183 /**
2184  * Releases a hold and a reference on a lock, on which caller acquired a
2185  * mutex.
2186  */
2187 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2188                     const char *scope, const void *source)
2189 {
2190         LINVRNT(cl_lock_invariant(env, lock));
2191         ENTRY;
2192         cl_lock_hold_release(env, lock, scope, source);
2193         lu_ref_del(&lock->cll_reference, scope, source);
2194         cl_lock_put(env, lock);
2195         EXIT;
2196 }
2197 EXPORT_SYMBOL(cl_lock_unhold);
2198
2199 /**
2200  * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2201  */
2202 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2203                      const char *scope, const void *source)
2204 {
2205         LINVRNT(cl_lock_invariant(env, lock));
2206         ENTRY;
2207         cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2208         cl_lock_mutex_get(env, lock);
2209         cl_lock_hold_release(env, lock, scope, source);
2210         cl_lock_mutex_put(env, lock);
2211         lu_ref_del(&lock->cll_reference, scope, source);
2212         cl_lock_put(env, lock);
2213         EXIT;
2214 }
2215 EXPORT_SYMBOL(cl_lock_release);
2216
2217 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2218 {
2219         LINVRNT(cl_lock_is_mutexed(lock));
2220         LINVRNT(cl_lock_invariant(env, lock));
2221
2222         ENTRY;
2223         cl_lock_used_mod(env, lock, +1);
2224         EXIT;
2225 }
2226 EXPORT_SYMBOL(cl_lock_user_add);
2227
2228 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2229 {
2230         LINVRNT(cl_lock_is_mutexed(lock));
2231         LINVRNT(cl_lock_invariant(env, lock));
2232         LASSERT(lock->cll_users > 0);
2233
2234         ENTRY;
2235         cl_lock_used_mod(env, lock, -1);
2236         if (lock->cll_users == 0)
2237                 cfs_waitq_broadcast(&lock->cll_wq);
2238         EXIT;
2239 }
2240 EXPORT_SYMBOL(cl_lock_user_del);
2241
2242 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2243 {
2244         static const char *names[] = {
2245                 [CLM_PHANTOM] = "P",
2246                 [CLM_READ]    = "R",
2247                 [CLM_WRITE]   = "W",
2248                 [CLM_GROUP]   = "G"
2249         };
2250         if (0 <= mode && mode < ARRAY_SIZE(names))
2251                 return names[mode];
2252         else
2253                 return "U";
2254 }
2255 EXPORT_SYMBOL(cl_lock_mode_name);
2256
2257 /**
2258  * Prints human readable representation of a lock description.
2259  */
2260 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2261                        lu_printer_t printer,
2262                        const struct cl_lock_descr *descr)
2263 {
2264         const struct lu_fid  *fid;
2265
2266         fid = lu_object_fid(&descr->cld_obj->co_lu);
2267         (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2268 }
2269 EXPORT_SYMBOL(cl_lock_descr_print);
2270
2271 /**
2272  * Prints human readable representation of \a lock to the \a f.
2273  */
2274 void cl_lock_print(const struct lu_env *env, void *cookie,
2275                    lu_printer_t printer, const struct cl_lock *lock)
2276 {
2277         const struct cl_lock_slice *slice;
2278         (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2279                    lock, cfs_atomic_read(&lock->cll_ref),
2280                    lock->cll_state, lock->cll_error, lock->cll_holds,
2281                    lock->cll_users, lock->cll_flags);
2282         cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2283         (*printer)(env, cookie, " {\n");
2284
2285         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2286                 (*printer)(env, cookie, "    %s@%p: ",
2287                            slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2288                            slice);
2289                 if (slice->cls_ops->clo_print != NULL)
2290                         slice->cls_ops->clo_print(env, cookie, printer, slice);
2291                 (*printer)(env, cookie, "\n");
2292         }
2293         (*printer)(env, cookie, "} lock@%p\n", lock);
2294 }
2295 EXPORT_SYMBOL(cl_lock_print);
2296
2297 int cl_lock_init(void)
2298 {
2299         return lu_kmem_init(cl_lock_caches);
2300 }
2301
2302 void cl_lock_fini(void)
2303 {
2304         lu_kmem_fini(cl_lock_caches);
2305 }