Whamcloud - gitweb
6535e3ad72ada6b9cc3d45d01b3018e1fcea1d18
[fs/lustre-release.git] / lustre / obdclass / cl_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Extent Lock.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42 #ifndef EXPORT_SYMTAB
43 # define EXPORT_SYMTAB
44 #endif
45
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_fid.h>
49 #include <libcfs/list.h>
50 /* lu_time_global_{init,fini}() */
51 #include <lu_time.h>
52
53 #include <cl_object.h>
54 #include "cl_internal.h"
55
56 /** Lock class of cl_lock::cll_guard */
57 static cfs_lock_class_key_t cl_lock_guard_class;
58 static cfs_mem_cache_t *cl_lock_kmem;
59
60 static struct lu_kmem_descr cl_lock_caches[] = {
61         {
62                 .ckd_cache = &cl_lock_kmem,
63                 .ckd_name  = "cl_lock_kmem",
64                 .ckd_size  = sizeof (struct cl_lock)
65         },
66         {
67                 .ckd_cache = NULL
68         }
69 };
70
71 /**
72  * Basic lock invariant that is maintained at all times. Caller either has a
73  * reference to \a lock, or somehow assures that \a lock cannot be freed.
74  *
75  * \see cl_lock_invariant()
76  */
77 static int cl_lock_invariant_trusted(const struct lu_env *env,
78                                      const struct cl_lock *lock)
79 {
80         return
81                 cl_is_lock(lock) &&
82                 ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
83                 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
84                 lock->cll_holds >= lock->cll_users &&
85                 lock->cll_holds >= 0 &&
86                 lock->cll_users >= 0 &&
87                 lock->cll_depth >= 0;
88 }
89
90 /**
91  * Stronger lock invariant, checking that caller has a reference on a lock.
92  *
93  * \see cl_lock_invariant_trusted()
94  */
95 static int cl_lock_invariant(const struct lu_env *env,
96                              const struct cl_lock *lock)
97 {
98         int result;
99
100         result = cfs_atomic_read(&lock->cll_ref) > 0 &&
101                 cl_lock_invariant_trusted(env, lock);
102         if (!result && env != NULL)
103                 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
104         return result;
105 }
106
107 /**
108  * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
109  */
110 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
111 {
112         return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
113 }
114
115 /**
116  * Returns a set of counters for this lock, depending on a lock nesting.
117  */
118 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
119                                                    const struct cl_lock *lock)
120 {
121         struct cl_thread_info *info;
122         enum clt_nesting_level nesting;
123
124         info = cl_env_info(env);
125         nesting = cl_lock_nesting(lock);
126         LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
127         return &info->clt_counters[nesting];
128 }
129
130 static void cl_lock_trace0(int level, const struct lu_env *env,
131                            const char *prefix, const struct cl_lock *lock,
132                            const char *func, const int line)
133 {
134         struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
135         CDEBUG(level, "%s: %p@(%i %p %i %d %d %d %d %lx)"
136                       "(%p/%d/%i) at %s():%d\n",
137                prefix, lock, cfs_atomic_read(&lock->cll_ref),
138                lock->cll_guarder, lock->cll_depth,
139                lock->cll_state, lock->cll_error, lock->cll_holds,
140                lock->cll_users, lock->cll_flags,
141                env, h->coh_nesting, cl_lock_nr_mutexed(env),
142                func, line);
143 }
144 #define cl_lock_trace(level, env, prefix, lock)                         \
145         cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
146
147 #define RETIP ((unsigned long)__builtin_return_address(0))
148
149 #ifdef CONFIG_LOCKDEP
150 static cfs_lock_class_key_t cl_lock_key;
151
152 static void cl_lock_lockdep_init(struct cl_lock *lock)
153 {
154         lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
155 }
156
157 static void cl_lock_lockdep_acquire(const struct lu_env *env,
158                                     struct cl_lock *lock, __u32 enqflags)
159 {
160         cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
161 #ifdef HAVE_LOCK_MAP_ACQUIRE
162         lock_map_acquire(&lock->dep_map);
163 #else  /* HAVE_LOCK_MAP_ACQUIRE */
164         lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
165                      /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
166                      /* check: */ 2, RETIP);
167 #endif /* HAVE_LOCK_MAP_ACQUIRE */
168 }
169
170 static void cl_lock_lockdep_release(const struct lu_env *env,
171                                     struct cl_lock *lock)
172 {
173         cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
174         lock_release(&lock->dep_map, 0, RETIP);
175 }
176
177 #else /* !CONFIG_LOCKDEP */
178
179 static void cl_lock_lockdep_init(struct cl_lock *lock)
180 {}
181 static void cl_lock_lockdep_acquire(const struct lu_env *env,
182                                     struct cl_lock *lock, __u32 enqflags)
183 {}
184 static void cl_lock_lockdep_release(const struct lu_env *env,
185                                     struct cl_lock *lock)
186 {}
187
188 #endif /* !CONFIG_LOCKDEP */
189
190 /**
191  * Adds lock slice to the compound lock.
192  *
193  * This is called by cl_object_operations::coo_lock_init() methods to add a
194  * per-layer state to the lock. New state is added at the end of
195  * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
196  *
197  * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
198  */
199 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
200                        struct cl_object *obj,
201                        const struct cl_lock_operations *ops)
202 {
203         ENTRY;
204         slice->cls_lock = lock;
205         cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
206         slice->cls_obj = obj;
207         slice->cls_ops = ops;
208         EXIT;
209 }
210 EXPORT_SYMBOL(cl_lock_slice_add);
211
212 /**
213  * Returns true iff a lock with the mode \a has provides at least the same
214  * guarantees as a lock with the mode \a need.
215  */
216 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
217 {
218         LINVRNT(need == CLM_READ || need == CLM_WRITE ||
219                 need == CLM_PHANTOM || need == CLM_GROUP);
220         LINVRNT(has == CLM_READ || has == CLM_WRITE ||
221                 has == CLM_PHANTOM || has == CLM_GROUP);
222         CLASSERT(CLM_PHANTOM < CLM_READ);
223         CLASSERT(CLM_READ < CLM_WRITE);
224         CLASSERT(CLM_WRITE < CLM_GROUP);
225
226         if (has != CLM_GROUP)
227                 return need <= has;
228         else
229                 return need == has;
230 }
231 EXPORT_SYMBOL(cl_lock_mode_match);
232
233 /**
234  * Returns true iff extent portions of lock descriptions match.
235  */
236 int cl_lock_ext_match(const struct cl_lock_descr *has,
237                       const struct cl_lock_descr *need)
238 {
239         return
240                 has->cld_start <= need->cld_start &&
241                 has->cld_end >= need->cld_end &&
242                 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
243                 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
244 }
245 EXPORT_SYMBOL(cl_lock_ext_match);
246
247 /**
248  * Returns true iff a lock with the description \a has provides at least the
249  * same guarantees as a lock with the description \a need.
250  */
251 int cl_lock_descr_match(const struct cl_lock_descr *has,
252                         const struct cl_lock_descr *need)
253 {
254         return
255                 cl_object_same(has->cld_obj, need->cld_obj) &&
256                 cl_lock_ext_match(has, need);
257 }
258 EXPORT_SYMBOL(cl_lock_descr_match);
259
260 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
261 {
262         struct cl_object *obj = lock->cll_descr.cld_obj;
263
264         LASSERT(cl_is_lock(lock));
265         LINVRNT(!cl_lock_is_mutexed(lock));
266
267         ENTRY;
268         cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
269         cfs_might_sleep();
270         while (!cfs_list_empty(&lock->cll_layers)) {
271                 struct cl_lock_slice *slice;
272
273                 slice = cfs_list_entry(lock->cll_layers.next,
274                                        struct cl_lock_slice, cls_linkage);
275                 cfs_list_del_init(lock->cll_layers.next);
276                 slice->cls_ops->clo_fini(env, slice);
277         }
278         cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
279         cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
280         lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
281         cl_object_put(env, obj);
282         lu_ref_fini(&lock->cll_reference);
283         lu_ref_fini(&lock->cll_holders);
284         cfs_mutex_destroy(&lock->cll_guard);
285         OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
286         EXIT;
287 }
288
289 /**
290  * Releases a reference on a lock.
291  *
292  * When last reference is released, lock is returned to the cache, unless it
293  * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
294  * immediately.
295  *
296  * \see cl_object_put(), cl_page_put()
297  */
298 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
299 {
300         struct cl_object        *obj;
301         struct cl_object_header *head;
302         struct cl_site          *site;
303
304         LINVRNT(cl_lock_invariant(env, lock));
305         ENTRY;
306         obj = lock->cll_descr.cld_obj;
307         LINVRNT(obj != NULL);
308         head = cl_object_header(obj);
309         site = cl_object_site(obj);
310
311         CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
312                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
313
314         if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
315                 if (lock->cll_state == CLS_FREEING) {
316                         LASSERT(cfs_list_empty(&lock->cll_linkage));
317                         cl_lock_free(env, lock);
318                 }
319                 cfs_atomic_dec(&site->cs_locks.cs_busy);
320         }
321         EXIT;
322 }
323 EXPORT_SYMBOL(cl_lock_put);
324
325 /**
326  * Acquires an additional reference to a lock.
327  *
328  * This can be called only by caller already possessing a reference to \a
329  * lock.
330  *
331  * \see cl_object_get(), cl_page_get()
332  */
333 void cl_lock_get(struct cl_lock *lock)
334 {
335         LINVRNT(cl_lock_invariant(NULL, lock));
336         CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
337                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
338         cfs_atomic_inc(&lock->cll_ref);
339 }
340 EXPORT_SYMBOL(cl_lock_get);
341
342 /**
343  * Acquires a reference to a lock.
344  *
345  * This is much like cl_lock_get(), except that this function can be used to
346  * acquire initial reference to the cached lock. Caller has to deal with all
347  * possible races. Use with care!
348  *
349  * \see cl_page_get_trust()
350  */
351 void cl_lock_get_trust(struct cl_lock *lock)
352 {
353         struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
354
355         LASSERT(cl_is_lock(lock));
356         CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
357                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
358         if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
359                 cfs_atomic_inc(&site->cs_locks.cs_busy);
360 }
361 EXPORT_SYMBOL(cl_lock_get_trust);
362
363 /**
364  * Helper function destroying the lock that wasn't completely initialized.
365  *
366  * Other threads can acquire references to the top-lock through its
367  * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
368  */
369 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
370 {
371         cl_lock_mutex_get(env, lock);
372         cl_lock_cancel(env, lock);
373         cl_lock_delete(env, lock);
374         cl_lock_mutex_put(env, lock);
375         cl_lock_put(env, lock);
376 }
377
378 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
379                                      struct cl_object *obj,
380                                      const struct cl_io *io,
381                                      const struct cl_lock_descr *descr)
382 {
383         struct cl_lock          *lock;
384         struct lu_object_header *head;
385         struct cl_site          *site = cl_object_site(obj);
386
387         ENTRY;
388         OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
389         if (lock != NULL) {
390                 cfs_atomic_set(&lock->cll_ref, 1);
391                 lock->cll_descr = *descr;
392                 lock->cll_state = CLS_NEW;
393                 cl_object_get(obj);
394                 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
395                                                       "cl_lock", lock);
396                 CFS_INIT_LIST_HEAD(&lock->cll_layers);
397                 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
398                 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
399                 lu_ref_init(&lock->cll_reference);
400                 lu_ref_init(&lock->cll_holders);
401                 cfs_mutex_init(&lock->cll_guard);
402                 cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
403                 cfs_waitq_init(&lock->cll_wq);
404                 head = obj->co_lu.lo_header;
405                 cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
406                 cfs_atomic_inc(&site->cs_locks.cs_total);
407                 cfs_atomic_inc(&site->cs_locks.cs_created);
408                 cl_lock_lockdep_init(lock);
409                 cfs_list_for_each_entry(obj, &head->loh_layers,
410                                         co_lu.lo_linkage) {
411                         int err;
412
413                         err = obj->co_ops->coo_lock_init(env, obj, lock, io);
414                         if (err != 0) {
415                                 cl_lock_finish(env, lock);
416                                 lock = ERR_PTR(err);
417                                 break;
418                         }
419                 }
420         } else
421                 lock = ERR_PTR(-ENOMEM);
422         RETURN(lock);
423 }
424
425 /**
426  * Transfer the lock into INTRANSIT state and return the original state.
427  *
428  * \pre  state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
429  * \post state: CLS_INTRANSIT
430  * \see CLS_INTRANSIT
431  */
432 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
433                                      struct cl_lock *lock)
434 {
435         enum cl_lock_state state = lock->cll_state;
436
437         LASSERT(cl_lock_is_mutexed(lock));
438         LASSERT(state != CLS_INTRANSIT);
439         LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
440                  "Malformed lock state %d.\n", state);
441
442         cl_lock_state_set(env, lock, CLS_INTRANSIT);
443         lock->cll_intransit_owner = cfs_current();
444         cl_lock_hold_add(env, lock, "intransit", cfs_current());
445         return state;
446 }
447 EXPORT_SYMBOL(cl_lock_intransit);
448
449 /**
450  *  Exit the intransit state and restore the lock state to the original state
451  */
452 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
453                        enum cl_lock_state state)
454 {
455         LASSERT(cl_lock_is_mutexed(lock));
456         LASSERT(lock->cll_state == CLS_INTRANSIT);
457         LASSERT(state != CLS_INTRANSIT);
458         LASSERT(lock->cll_intransit_owner == cfs_current());
459
460         lock->cll_intransit_owner = NULL;
461         cl_lock_state_set(env, lock, state);
462         cl_lock_unhold(env, lock, "intransit", cfs_current());
463 }
464 EXPORT_SYMBOL(cl_lock_extransit);
465
466 /**
467  * Checking whether the lock is intransit state
468  */
469 int cl_lock_is_intransit(struct cl_lock *lock)
470 {
471         LASSERT(cl_lock_is_mutexed(lock));
472         return lock->cll_state == CLS_INTRANSIT &&
473                lock->cll_intransit_owner != cfs_current();
474 }
475 EXPORT_SYMBOL(cl_lock_is_intransit);
476 /**
477  * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
478  * truncate and O_APPEND cannot be reused for read/non-append-write, as they
479  * cover multiple stripes and can trigger cascading timeouts.
480  */
481 static int cl_lock_fits_into(const struct lu_env *env,
482                              const struct cl_lock *lock,
483                              const struct cl_lock_descr *need,
484                              const struct cl_io *io)
485 {
486         const struct cl_lock_slice *slice;
487
488         LINVRNT(cl_lock_invariant_trusted(env, lock));
489         ENTRY;
490         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
491                 if (slice->cls_ops->clo_fits_into != NULL &&
492                     !slice->cls_ops->clo_fits_into(env, slice, need, io))
493                         RETURN(0);
494         }
495         RETURN(1);
496 }
497
498 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
499                                       struct cl_object *obj,
500                                       const struct cl_io *io,
501                                       const struct cl_lock_descr *need)
502 {
503         struct cl_lock          *lock;
504         struct cl_object_header *head;
505         struct cl_site          *site;
506
507         ENTRY;
508
509         head = cl_object_header(obj);
510         site = cl_object_site(obj);
511         LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
512         cfs_atomic_inc(&site->cs_locks.cs_lookup);
513         cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
514                 int matched;
515
516                 LASSERT(cl_is_lock(lock));
517                 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
518                           lock->cll_state < CLS_FREEING &&
519                           lock->cll_error == 0 &&
520                           !(lock->cll_flags & CLF_CANCELLED) &&
521                           cl_lock_fits_into(env, lock, need, io);
522                 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%i) need: "DDESCR": %d\n",
523                        PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
524                        matched);
525                 if (matched) {
526                         cl_lock_get_trust(lock);
527                         cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
528                         RETURN(lock);
529                 }
530         }
531         RETURN(NULL);
532 }
533
534 /**
535  * Returns a lock matching description \a need.
536  *
537  * This is the main entry point into the cl_lock caching interface. First, a
538  * cache (implemented as a per-object linked list) is consulted. If lock is
539  * found there, it is returned immediately. Otherwise new lock is allocated
540  * and returned. In any case, additional reference to lock is acquired.
541  *
542  * \see cl_object_find(), cl_page_find()
543  */
544 static struct cl_lock *cl_lock_find(const struct lu_env *env,
545                                     const struct cl_io *io,
546                                     const struct cl_lock_descr *need)
547 {
548         struct cl_object_header *head;
549         struct cl_object        *obj;
550         struct cl_lock          *lock;
551         struct cl_site          *site;
552
553         ENTRY;
554
555         obj  = need->cld_obj;
556         head = cl_object_header(obj);
557         site = cl_object_site(obj);
558
559         cfs_spin_lock(&head->coh_lock_guard);
560         lock = cl_lock_lookup(env, obj, io, need);
561         cfs_spin_unlock(&head->coh_lock_guard);
562
563         if (lock == NULL) {
564                 lock = cl_lock_alloc(env, obj, io, need);
565                 if (!IS_ERR(lock)) {
566                         struct cl_lock *ghost;
567
568                         cfs_spin_lock(&head->coh_lock_guard);
569                         ghost = cl_lock_lookup(env, obj, io, need);
570                         if (ghost == NULL) {
571                                 cfs_list_add_tail(&lock->cll_linkage,
572                                                   &head->coh_locks);
573                                 cfs_spin_unlock(&head->coh_lock_guard);
574                                 cfs_atomic_inc(&site->cs_locks.cs_busy);
575                         } else {
576                                 cfs_spin_unlock(&head->coh_lock_guard);
577                                 /*
578                                  * Other threads can acquire references to the
579                                  * top-lock through its sub-locks. Hence, it
580                                  * cannot be cl_lock_free()-ed immediately.
581                                  */
582                                 cl_lock_finish(env, lock);
583                                 lock = ghost;
584                         }
585                 }
586         }
587         RETURN(lock);
588 }
589
590 /**
591  * Returns existing lock matching given description. This is similar to
592  * cl_lock_find() except that no new lock is created, and returned lock is
593  * guaranteed to be in enum cl_lock_state::CLS_HELD state.
594  */
595 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
596                              const struct cl_lock_descr *need,
597                              const char *scope, const void *source)
598 {
599         struct cl_object_header *head;
600         struct cl_object        *obj;
601         struct cl_lock          *lock;
602         int ok;
603
604         obj  = need->cld_obj;
605         head = cl_object_header(obj);
606
607         cfs_spin_lock(&head->coh_lock_guard);
608         lock = cl_lock_lookup(env, obj, io, need);
609         cfs_spin_unlock(&head->coh_lock_guard);
610
611         if (lock == NULL)
612                 return NULL;
613
614         cl_lock_mutex_get(env, lock);
615         if (lock->cll_state == CLS_INTRANSIT)
616                 cl_lock_state_wait(env, lock); /* Don't care return value. */
617         if (lock->cll_state == CLS_CACHED) {
618                 int result;
619                 result = cl_use_try(env, lock, 1);
620                 if (result < 0)
621                         cl_lock_error(env, lock, result);
622         }
623         ok = lock->cll_state == CLS_HELD;
624         if (ok) {
625                 cl_lock_hold_add(env, lock, scope, source);
626                 cl_lock_user_add(env, lock);
627                 cl_lock_put(env, lock);
628         }
629         cl_lock_mutex_put(env, lock);
630         if (!ok) {
631                 cl_lock_put(env, lock);
632                 lock = NULL;
633         }
634
635         return lock;
636 }
637 EXPORT_SYMBOL(cl_lock_peek);
638
639 /**
640  * Returns a slice within a lock, corresponding to the given layer in the
641  * device stack.
642  *
643  * \see cl_page_at()
644  */
645 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
646                                        const struct lu_device_type *dtype)
647 {
648         const struct cl_lock_slice *slice;
649
650         LINVRNT(cl_lock_invariant_trusted(NULL, lock));
651         ENTRY;
652
653         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
654                 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
655                         RETURN(slice);
656         }
657         RETURN(NULL);
658 }
659 EXPORT_SYMBOL(cl_lock_at);
660
661 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
662 {
663         struct cl_thread_counters *counters;
664
665         counters = cl_lock_counters(env, lock);
666         lock->cll_depth++;
667         counters->ctc_nr_locks_locked++;
668         lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
669         cl_lock_trace(D_TRACE, env, "got mutex", lock);
670 }
671
672 /**
673  * Locks cl_lock object.
674  *
675  * This is used to manipulate cl_lock fields, and to serialize state
676  * transitions in the lock state machine.
677  *
678  * \post cl_lock_is_mutexed(lock)
679  *
680  * \see cl_lock_mutex_put()
681  */
682 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
683 {
684         LINVRNT(cl_lock_invariant(env, lock));
685
686         if (lock->cll_guarder == cfs_current()) {
687                 LINVRNT(cl_lock_is_mutexed(lock));
688                 LINVRNT(lock->cll_depth > 0);
689         } else {
690                 struct cl_object_header *hdr;
691                 struct cl_thread_info   *info;
692                 int i;
693
694                 LINVRNT(lock->cll_guarder != cfs_current());
695                 hdr = cl_object_header(lock->cll_descr.cld_obj);
696                 /*
697                  * Check that mutices are taken in the bottom-to-top order.
698                  */
699                 info = cl_env_info(env);
700                 for (i = 0; i < hdr->coh_nesting; ++i)
701                         LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
702                 cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
703                 lock->cll_guarder = cfs_current();
704                 LINVRNT(lock->cll_depth == 0);
705         }
706         cl_lock_mutex_tail(env, lock);
707 }
708 EXPORT_SYMBOL(cl_lock_mutex_get);
709
710 /**
711  * Try-locks cl_lock object.
712  *
713  * \retval 0 \a lock was successfully locked
714  *
715  * \retval -EBUSY \a lock cannot be locked right now
716  *
717  * \post ergo(result == 0, cl_lock_is_mutexed(lock))
718  *
719  * \see cl_lock_mutex_get()
720  */
721 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
722 {
723         int result;
724
725         LINVRNT(cl_lock_invariant_trusted(env, lock));
726         ENTRY;
727
728         result = 0;
729         if (lock->cll_guarder == cfs_current()) {
730                 LINVRNT(lock->cll_depth > 0);
731                 cl_lock_mutex_tail(env, lock);
732         } else if (cfs_mutex_trylock(&lock->cll_guard)) {
733                 LINVRNT(lock->cll_depth == 0);
734                 lock->cll_guarder = cfs_current();
735                 cl_lock_mutex_tail(env, lock);
736         } else
737                 result = -EBUSY;
738         RETURN(result);
739 }
740 EXPORT_SYMBOL(cl_lock_mutex_try);
741
742 /**
743  {* Unlocks cl_lock object.
744  *
745  * \pre cl_lock_is_mutexed(lock)
746  *
747  * \see cl_lock_mutex_get()
748  */
749 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
750 {
751         struct cl_thread_counters *counters;
752
753         LINVRNT(cl_lock_invariant(env, lock));
754         LINVRNT(cl_lock_is_mutexed(lock));
755         LINVRNT(lock->cll_guarder == cfs_current());
756         LINVRNT(lock->cll_depth > 0);
757
758         counters = cl_lock_counters(env, lock);
759         LINVRNT(counters->ctc_nr_locks_locked > 0);
760
761         cl_lock_trace(D_TRACE, env, "put mutex", lock);
762         lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
763         counters->ctc_nr_locks_locked--;
764         if (--lock->cll_depth == 0) {
765                 lock->cll_guarder = NULL;
766                 cfs_mutex_unlock(&lock->cll_guard);
767         }
768 }
769 EXPORT_SYMBOL(cl_lock_mutex_put);
770
771 /**
772  * Returns true iff lock's mutex is owned by the current thread.
773  */
774 int cl_lock_is_mutexed(struct cl_lock *lock)
775 {
776         return lock->cll_guarder == cfs_current();
777 }
778 EXPORT_SYMBOL(cl_lock_is_mutexed);
779
780 /**
781  * Returns number of cl_lock mutices held by the current thread (environment).
782  */
783 int cl_lock_nr_mutexed(const struct lu_env *env)
784 {
785         struct cl_thread_info *info;
786         int i;
787         int locked;
788
789         /*
790          * NOTE: if summation across all nesting levels (currently 2) proves
791          *       too expensive, a summary counter can be added to
792          *       struct cl_thread_info.
793          */
794         info = cl_env_info(env);
795         for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
796                 locked += info->clt_counters[i].ctc_nr_locks_locked;
797         return locked;
798 }
799 EXPORT_SYMBOL(cl_lock_nr_mutexed);
800
801 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
802 {
803         LINVRNT(cl_lock_is_mutexed(lock));
804         LINVRNT(cl_lock_invariant(env, lock));
805         ENTRY;
806         if (!(lock->cll_flags & CLF_CANCELLED)) {
807                 const struct cl_lock_slice *slice;
808
809                 lock->cll_flags |= CLF_CANCELLED;
810                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
811                                                 cls_linkage) {
812                         if (slice->cls_ops->clo_cancel != NULL)
813                                 slice->cls_ops->clo_cancel(env, slice);
814                 }
815         }
816         EXIT;
817 }
818
819 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
820 {
821         struct cl_object_header    *head;
822         const struct cl_lock_slice *slice;
823
824         LINVRNT(cl_lock_is_mutexed(lock));
825         LINVRNT(cl_lock_invariant(env, lock));
826
827         ENTRY;
828         if (lock->cll_state < CLS_FREEING) {
829                 LASSERT(lock->cll_state != CLS_INTRANSIT);
830                 cl_lock_state_set(env, lock, CLS_FREEING);
831
832                 head = cl_object_header(lock->cll_descr.cld_obj);
833
834                 cfs_spin_lock(&head->coh_lock_guard);
835                 cfs_list_del_init(&lock->cll_linkage);
836
837                 cfs_spin_unlock(&head->coh_lock_guard);
838                 /*
839                  * From now on, no new references to this lock can be acquired
840                  * by cl_lock_lookup().
841                  */
842                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
843                                                 cls_linkage) {
844                         if (slice->cls_ops->clo_delete != NULL)
845                                 slice->cls_ops->clo_delete(env, slice);
846                 }
847                 /*
848                  * From now on, no new references to this lock can be acquired
849                  * by layer-specific means (like a pointer from struct
850                  * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
851                  * lov).
852                  *
853                  * Lock will be finally freed in cl_lock_put() when last of
854                  * existing references goes away.
855                  */
856         }
857         EXIT;
858 }
859
860 /**
861  * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
862  * top-lock (nesting == 0) accounts for this modification in the per-thread
863  * debugging counters. Sub-lock holds can be released by a thread different
864  * from one that acquired it.
865  */
866 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
867                              int delta)
868 {
869         struct cl_thread_counters *counters;
870         enum clt_nesting_level     nesting;
871
872         lock->cll_holds += delta;
873         nesting = cl_lock_nesting(lock);
874         if (nesting == CNL_TOP) {
875                 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
876                 counters->ctc_nr_held += delta;
877                 LASSERT(counters->ctc_nr_held >= 0);
878         }
879 }
880
881 /**
882  * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
883  * cl_lock_hold_mod() for the explanation of the debugging code.
884  */
885 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
886                              int delta)
887 {
888         struct cl_thread_counters *counters;
889         enum clt_nesting_level     nesting;
890
891         lock->cll_users += delta;
892         nesting = cl_lock_nesting(lock);
893         if (nesting == CNL_TOP) {
894                 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
895                 counters->ctc_nr_used += delta;
896                 LASSERT(counters->ctc_nr_used >= 0);
897         }
898 }
899
900 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
901                                  const char *scope, const void *source)
902 {
903         LINVRNT(cl_lock_is_mutexed(lock));
904         LINVRNT(cl_lock_invariant(env, lock));
905         LASSERT(lock->cll_holds > 0);
906
907         ENTRY;
908         cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
909         lu_ref_del(&lock->cll_holders, scope, source);
910         cl_lock_hold_mod(env, lock, -1);
911         if (lock->cll_holds == 0) {
912                 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
913                     lock->cll_descr.cld_mode == CLM_GROUP)
914                         /*
915                          * If lock is still phantom or grouplock when user is
916                          * done with it---destroy the lock.
917                          */
918                         lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
919                 if (lock->cll_flags & CLF_CANCELPEND) {
920                         lock->cll_flags &= ~CLF_CANCELPEND;
921                         cl_lock_cancel0(env, lock);
922                 }
923                 if (lock->cll_flags & CLF_DOOMED) {
924                         /* no longer doomed: it's dead... Jim. */
925                         lock->cll_flags &= ~CLF_DOOMED;
926                         cl_lock_delete0(env, lock);
927                 }
928         }
929         EXIT;
930 }
931
932
933 /**
934  * Waits until lock state is changed.
935  *
936  * This function is called with cl_lock mutex locked, atomically releases
937  * mutex and goes to sleep, waiting for a lock state change (signaled by
938  * cl_lock_signal()), and re-acquires the mutex before return.
939  *
940  * This function is used to wait until lock state machine makes some progress
941  * and to emulate synchronous operations on top of asynchronous lock
942  * interface.
943  *
944  * \retval -EINTR wait was interrupted
945  *
946  * \retval 0 wait wasn't interrupted
947  *
948  * \pre cl_lock_is_mutexed(lock)
949  *
950  * \see cl_lock_signal()
951  */
952 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
953 {
954         cfs_waitlink_t waiter;
955         int result;
956
957         ENTRY;
958         LINVRNT(cl_lock_is_mutexed(lock));
959         LINVRNT(cl_lock_invariant(env, lock));
960         LASSERT(lock->cll_depth == 1);
961         LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
962
963         cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
964         result = lock->cll_error;
965         if (result == 0) {
966                 cfs_waitlink_init(&waiter);
967                 cfs_waitq_add(&lock->cll_wq, &waiter);
968                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
969                 cl_lock_mutex_put(env, lock);
970
971                 LASSERT(cl_lock_nr_mutexed(env) == 0);
972                 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
973
974                 cl_lock_mutex_get(env, lock);
975                 cfs_set_current_state(CFS_TASK_RUNNING);
976                 cfs_waitq_del(&lock->cll_wq, &waiter);
977                 result = cfs_signal_pending() ? -EINTR : 0;
978         }
979         RETURN(result);
980 }
981 EXPORT_SYMBOL(cl_lock_state_wait);
982
983 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
984                                  enum cl_lock_state state)
985 {
986         const struct cl_lock_slice *slice;
987
988         ENTRY;
989         LINVRNT(cl_lock_is_mutexed(lock));
990         LINVRNT(cl_lock_invariant(env, lock));
991
992         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
993                 if (slice->cls_ops->clo_state != NULL)
994                         slice->cls_ops->clo_state(env, slice, state);
995         cfs_waitq_broadcast(&lock->cll_wq);
996         EXIT;
997 }
998
999 /**
1000  * Notifies waiters that lock state changed.
1001  *
1002  * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1003  * layers about state change by calling cl_lock_operations::clo_state()
1004  * top-to-bottom.
1005  */
1006 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1007 {
1008         ENTRY;
1009         cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1010         cl_lock_state_signal(env, lock, lock->cll_state);
1011         EXIT;
1012 }
1013 EXPORT_SYMBOL(cl_lock_signal);
1014
1015 /**
1016  * Changes lock state.
1017  *
1018  * This function is invoked to notify layers that lock state changed, possible
1019  * as a result of an asynchronous event such as call-back reception.
1020  *
1021  * \post lock->cll_state == state
1022  *
1023  * \see cl_lock_operations::clo_state()
1024  */
1025 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1026                        enum cl_lock_state state)
1027 {
1028         struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1029
1030         ENTRY;
1031         LASSERT(lock->cll_state <= state ||
1032                 (lock->cll_state == CLS_CACHED &&
1033                  (state == CLS_HELD || /* lock found in cache */
1034                   state == CLS_NEW  ||   /* sub-lock canceled */
1035                   state == CLS_INTRANSIT)) ||
1036                 /* lock is in transit state */
1037                 lock->cll_state == CLS_INTRANSIT);
1038
1039         if (lock->cll_state != state) {
1040                 cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
1041                 cfs_atomic_inc(&site->cs_locks_state[state]);
1042
1043                 cl_lock_state_signal(env, lock, state);
1044                 lock->cll_state = state;
1045         }
1046         EXIT;
1047 }
1048 EXPORT_SYMBOL(cl_lock_state_set);
1049
1050 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1051 {
1052         const struct cl_lock_slice *slice;
1053         int result;
1054
1055         do {
1056                 result = 0;
1057
1058                 LINVRNT(cl_lock_is_mutexed(lock));
1059                 LINVRNT(cl_lock_invariant(env, lock));
1060                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1061
1062                 result = -ENOSYS;
1063                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1064                                                 cls_linkage) {
1065                         if (slice->cls_ops->clo_unuse != NULL) {
1066                                 result = slice->cls_ops->clo_unuse(env, slice);
1067                                 if (result != 0)
1068                                         break;
1069                         }
1070                 }
1071                 LASSERT(result != -ENOSYS);
1072         } while (result == CLO_REPEAT);
1073
1074         return result;
1075 }
1076
1077 /**
1078  * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1079  * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1080  * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1081  *  use process atomic
1082  */
1083 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1084 {
1085         const struct cl_lock_slice *slice;
1086         int result;
1087         enum cl_lock_state state;
1088
1089         ENTRY;
1090         cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1091
1092         LASSERT(lock->cll_state == CLS_CACHED);
1093         if (lock->cll_error)
1094                 RETURN(lock->cll_error);
1095
1096         result = -ENOSYS;
1097         state = cl_lock_intransit(env, lock);
1098         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1099                 if (slice->cls_ops->clo_use != NULL) {
1100                         result = slice->cls_ops->clo_use(env, slice);
1101                         if (result != 0)
1102                                 break;
1103                 }
1104         }
1105         LASSERT(result != -ENOSYS);
1106
1107         LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1108                  lock->cll_state);
1109
1110         if (result == 0) {
1111                 state = CLS_HELD;
1112         } else {
1113                 if (result == -ESTALE) {
1114                         /*
1115                          * ESTALE means sublock being cancelled
1116                          * at this time, and set lock state to
1117                          * be NEW here and ask the caller to repeat.
1118                          */
1119                         state = CLS_NEW;
1120                         result = CLO_REPEAT;
1121                 }
1122
1123                 /* @atomic means back-off-on-failure. */
1124                 if (atomic) {
1125                         int rc;
1126                         rc = cl_unuse_try_internal(env, lock);
1127                         /* Vet the results. */
1128                         if (rc < 0 && result > 0)
1129                                 result = rc;
1130                 }
1131
1132         }
1133         cl_lock_extransit(env, lock, state);
1134         RETURN(result);
1135 }
1136 EXPORT_SYMBOL(cl_use_try);
1137
1138 /**
1139  * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1140  * top-to-bottom.
1141  */
1142 static int cl_enqueue_kick(const struct lu_env *env,
1143                            struct cl_lock *lock,
1144                            struct cl_io *io, __u32 flags)
1145 {
1146         int result;
1147         const struct cl_lock_slice *slice;
1148
1149         ENTRY;
1150         result = -ENOSYS;
1151         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1152                 if (slice->cls_ops->clo_enqueue != NULL) {
1153                         result = slice->cls_ops->clo_enqueue(env,
1154                                                              slice, io, flags);
1155                         if (result != 0)
1156                                 break;
1157                 }
1158         }
1159         LASSERT(result != -ENOSYS);
1160         RETURN(result);
1161 }
1162
1163 /**
1164  * Tries to enqueue a lock.
1165  *
1166  * This function is called repeatedly by cl_enqueue() until either lock is
1167  * enqueued, or error occurs. This function does not block waiting for
1168  * networking communication to complete.
1169  *
1170  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1171  *                         lock->cll_state == CLS_HELD)
1172  *
1173  * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1174  * \see cl_lock_state::CLS_ENQUEUED
1175  */
1176 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1177                    struct cl_io *io, __u32 flags)
1178 {
1179         int result;
1180
1181         ENTRY;
1182         cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1183         do {
1184                 result = 0;
1185
1186                 LINVRNT(cl_lock_is_mutexed(lock));
1187
1188                 if (lock->cll_error != 0)
1189                         break;
1190                 switch (lock->cll_state) {
1191                 case CLS_NEW:
1192                         cl_lock_state_set(env, lock, CLS_QUEUING);
1193                         /* fall-through */
1194                 case CLS_QUEUING:
1195                         /* kick layers. */
1196                         result = cl_enqueue_kick(env, lock, io, flags);
1197                         if (result == 0)
1198                                 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1199                         break;
1200                 case CLS_INTRANSIT:
1201                         LASSERT(cl_lock_is_intransit(lock));
1202                         result = CLO_WAIT;
1203                         break;
1204                 case CLS_CACHED:
1205                         /* yank lock from the cache. */
1206                         result = cl_use_try(env, lock, 0);
1207                         break;
1208                 case CLS_ENQUEUED:
1209                 case CLS_HELD:
1210                         result = 0;
1211                         break;
1212                 default:
1213                 case CLS_FREEING:
1214                         /*
1215                          * impossible, only held locks with increased
1216                          * ->cll_holds can be enqueued, and they cannot be
1217                          * freed.
1218                          */
1219                         LBUG();
1220                 }
1221         } while (result == CLO_REPEAT);
1222         if (result < 0)
1223                 cl_lock_error(env, lock, result);
1224         RETURN(result ?: lock->cll_error);
1225 }
1226 EXPORT_SYMBOL(cl_enqueue_try);
1227
1228 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1229                              struct cl_io *io, __u32 enqflags)
1230 {
1231         int result;
1232
1233         ENTRY;
1234
1235         LINVRNT(cl_lock_is_mutexed(lock));
1236         LINVRNT(cl_lock_invariant(env, lock));
1237         LASSERT(lock->cll_holds > 0);
1238
1239         cl_lock_user_add(env, lock);
1240         do {
1241                 result = cl_enqueue_try(env, lock, io, enqflags);
1242                 if (result == CLO_WAIT) {
1243                         result = cl_lock_state_wait(env, lock);
1244                         if (result == 0)
1245                                 continue;
1246                 }
1247                 break;
1248         } while (1);
1249         if (result != 0) {
1250                 cl_lock_user_del(env, lock);
1251                 cl_lock_error(env, lock, result);
1252         }
1253         LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1254                      lock->cll_state == CLS_HELD));
1255         RETURN(result);
1256 }
1257
1258 /**
1259  * Enqueues a lock.
1260  *
1261  * \pre current thread or io owns a hold on lock.
1262  *
1263  * \post ergo(result == 0, lock->users increased)
1264  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1265  *                         lock->cll_state == CLS_HELD)
1266  */
1267 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1268                struct cl_io *io, __u32 enqflags)
1269 {
1270         int result;
1271
1272         ENTRY;
1273
1274         cl_lock_lockdep_acquire(env, lock, enqflags);
1275         cl_lock_mutex_get(env, lock);
1276         result = cl_enqueue_locked(env, lock, io, enqflags);
1277         cl_lock_mutex_put(env, lock);
1278         if (result != 0)
1279                 cl_lock_lockdep_release(env, lock);
1280         LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1281                      lock->cll_state == CLS_HELD));
1282         RETURN(result);
1283 }
1284 EXPORT_SYMBOL(cl_enqueue);
1285
1286 /**
1287  * Tries to unlock a lock.
1288  *
1289  * This function is called repeatedly by cl_unuse() until either lock is
1290  * unlocked, or error occurs.
1291  * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1292  *
1293  * \pre  lock->cll_state == CLS_HELD
1294  *
1295  * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
1296  *
1297  * \see cl_unuse() cl_lock_operations::clo_unuse()
1298  * \see cl_lock_state::CLS_CACHED
1299  */
1300 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1301 {
1302         int                         result;
1303         enum cl_lock_state          state = CLS_NEW;
1304
1305         ENTRY;
1306         cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1307
1308         LASSERT(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED);
1309         if (lock->cll_users > 1) {
1310                 cl_lock_user_del(env, lock);
1311                 RETURN(0);
1312         }
1313
1314         /*
1315          * New lock users (->cll_users) are not protecting unlocking
1316          * from proceeding. From this point, lock eventually reaches
1317          * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1318          * CLS_FREEING.
1319          */
1320         state = cl_lock_intransit(env, lock);
1321
1322         result = cl_unuse_try_internal(env, lock);
1323         LASSERT(lock->cll_state == CLS_INTRANSIT);
1324         LASSERT(result != CLO_WAIT);
1325         cl_lock_user_del(env, lock);
1326         if (result == 0 || result == -ESTALE) {
1327                 /*
1328                  * Return lock back to the cache. This is the only
1329                  * place where lock is moved into CLS_CACHED state.
1330                  *
1331                  * If one of ->clo_unuse() methods returned -ESTALE, lock
1332                  * cannot be placed into cache and has to be
1333                  * re-initialized. This happens e.g., when a sub-lock was
1334                  * canceled while unlocking was in progress.
1335                  */
1336                 if (state == CLS_HELD && result == 0)
1337                         state = CLS_CACHED;
1338                 else
1339                         state = CLS_NEW;
1340                 cl_lock_extransit(env, lock, state);
1341
1342                 /*
1343                  * Hide -ESTALE error.
1344                  * If the lock is a glimpse lock, and it has multiple
1345                  * stripes. Assuming that one of its sublock returned -ENAVAIL,
1346                  * and other sublocks are matched write locks. In this case,
1347                  * we can't set this lock to error because otherwise some of
1348                  * its sublocks may not be canceled. This causes some dirty
1349                  * pages won't be written to OSTs. -jay
1350                  */
1351                 result = 0;
1352         } else {
1353                 CERROR("result = %d, this is unlikely!\n", result);
1354                 cl_lock_extransit(env, lock, state);
1355         }
1356
1357         result = result ?: lock->cll_error;
1358         if (result < 0)
1359                 cl_lock_error(env, lock, result);
1360         RETURN(result);
1361 }
1362 EXPORT_SYMBOL(cl_unuse_try);
1363
1364 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1365 {
1366         int result;
1367         ENTRY;
1368
1369         result = cl_unuse_try(env, lock);
1370         if (result)
1371                 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1372
1373         EXIT;
1374 }
1375
1376 /**
1377  * Unlocks a lock.
1378  */
1379 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1380 {
1381         ENTRY;
1382         cl_lock_mutex_get(env, lock);
1383         cl_unuse_locked(env, lock);
1384         cl_lock_mutex_put(env, lock);
1385         cl_lock_lockdep_release(env, lock);
1386         EXIT;
1387 }
1388 EXPORT_SYMBOL(cl_unuse);
1389
1390 /**
1391  * Tries to wait for a lock.
1392  *
1393  * This function is called repeatedly by cl_wait() until either lock is
1394  * granted, or error occurs. This function does not block waiting for network
1395  * communication to complete.
1396  *
1397  * \see cl_wait() cl_lock_operations::clo_wait()
1398  * \see cl_lock_state::CLS_HELD
1399  */
1400 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1401 {
1402         const struct cl_lock_slice *slice;
1403         int                         result;
1404
1405         ENTRY;
1406         cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1407         do {
1408                 LINVRNT(cl_lock_is_mutexed(lock));
1409                 LINVRNT(cl_lock_invariant(env, lock));
1410                 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1411                         lock->cll_state == CLS_HELD ||
1412                         lock->cll_state == CLS_INTRANSIT);
1413                 LASSERT(lock->cll_users > 0);
1414                 LASSERT(lock->cll_holds > 0);
1415
1416                 result = 0;
1417                 if (lock->cll_error != 0)
1418                         break;
1419
1420                 if (cl_lock_is_intransit(lock)) {
1421                         result = CLO_WAIT;
1422                         break;
1423                 }
1424
1425                 if (lock->cll_state == CLS_HELD)
1426                         /* nothing to do */
1427                         break;
1428
1429                 result = -ENOSYS;
1430                 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1431                         if (slice->cls_ops->clo_wait != NULL) {
1432                                 result = slice->cls_ops->clo_wait(env, slice);
1433                                 if (result != 0)
1434                                         break;
1435                         }
1436                 }
1437                 LASSERT(result != -ENOSYS);
1438                 if (result == 0) {
1439                         LASSERT(lock->cll_state != CLS_INTRANSIT);
1440                         cl_lock_state_set(env, lock, CLS_HELD);
1441                 }
1442         } while (result == CLO_REPEAT);
1443         RETURN(result ?: lock->cll_error);
1444 }
1445 EXPORT_SYMBOL(cl_wait_try);
1446
1447 /**
1448  * Waits until enqueued lock is granted.
1449  *
1450  * \pre current thread or io owns a hold on the lock
1451  * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1452  *                        lock->cll_state == CLS_HELD)
1453  *
1454  * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1455  */
1456 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1457 {
1458         int result;
1459
1460         ENTRY;
1461         cl_lock_mutex_get(env, lock);
1462
1463         LINVRNT(cl_lock_invariant(env, lock));
1464         LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1465                  "Wrong state %d \n", lock->cll_state);
1466         LASSERT(lock->cll_holds > 0);
1467         cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1468
1469         do {
1470                 result = cl_wait_try(env, lock);
1471                 if (result == CLO_WAIT) {
1472                         result = cl_lock_state_wait(env, lock);
1473                         if (result == 0)
1474                                 continue;
1475                 }
1476                 break;
1477         } while (1);
1478         if (result < 0) {
1479                 cl_lock_user_del(env, lock);
1480                 cl_lock_error(env, lock, result);
1481                 cl_lock_lockdep_release(env, lock);
1482         }
1483         cl_lock_mutex_put(env, lock);
1484         LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1485         RETURN(result);
1486 }
1487 EXPORT_SYMBOL(cl_wait);
1488
1489 /**
1490  * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1491  * value.
1492  */
1493 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1494 {
1495         const struct cl_lock_slice *slice;
1496         unsigned long pound;
1497         unsigned long ounce;
1498
1499         ENTRY;
1500         LINVRNT(cl_lock_is_mutexed(lock));
1501         LINVRNT(cl_lock_invariant(env, lock));
1502
1503         pound = 0;
1504         cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1505                 if (slice->cls_ops->clo_weigh != NULL) {
1506                         ounce = slice->cls_ops->clo_weigh(env, slice);
1507                         pound += ounce;
1508                         if (pound < ounce) /* over-weight^Wflow */
1509                                 pound = ~0UL;
1510                 }
1511         }
1512         RETURN(pound);
1513 }
1514 EXPORT_SYMBOL(cl_lock_weigh);
1515
1516 /**
1517  * Notifies layers that lock description changed.
1518  *
1519  * The server can grant client a lock different from one that was requested
1520  * (e.g., larger in extent). This method is called when actually granted lock
1521  * description becomes known to let layers to accommodate for changed lock
1522  * description.
1523  *
1524  * \see cl_lock_operations::clo_modify()
1525  */
1526 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1527                    const struct cl_lock_descr *desc)
1528 {
1529         const struct cl_lock_slice *slice;
1530         struct cl_object           *obj = lock->cll_descr.cld_obj;
1531         struct cl_object_header    *hdr = cl_object_header(obj);
1532         int result;
1533
1534         ENTRY;
1535         cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1536         /* don't allow object to change */
1537         LASSERT(obj == desc->cld_obj);
1538         LINVRNT(cl_lock_is_mutexed(lock));
1539         LINVRNT(cl_lock_invariant(env, lock));
1540
1541         cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1542                 if (slice->cls_ops->clo_modify != NULL) {
1543                         result = slice->cls_ops->clo_modify(env, slice, desc);
1544                         if (result != 0)
1545                                 RETURN(result);
1546                 }
1547         }
1548         CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1549                       PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1550         /*
1551          * Just replace description in place. Nothing more is needed for
1552          * now. If locks were indexed according to their extent and/or mode,
1553          * that index would have to be updated here.
1554          */
1555         cfs_spin_lock(&hdr->coh_lock_guard);
1556         lock->cll_descr = *desc;
1557         cfs_spin_unlock(&hdr->coh_lock_guard);
1558         RETURN(0);
1559 }
1560 EXPORT_SYMBOL(cl_lock_modify);
1561
1562 /**
1563  * Initializes lock closure with a given origin.
1564  *
1565  * \see cl_lock_closure
1566  */
1567 void cl_lock_closure_init(const struct lu_env *env,
1568                           struct cl_lock_closure *closure,
1569                           struct cl_lock *origin, int wait)
1570 {
1571         LINVRNT(cl_lock_is_mutexed(origin));
1572         LINVRNT(cl_lock_invariant(env, origin));
1573
1574         CFS_INIT_LIST_HEAD(&closure->clc_list);
1575         closure->clc_origin = origin;
1576         closure->clc_wait   = wait;
1577         closure->clc_nr     = 0;
1578 }
1579 EXPORT_SYMBOL(cl_lock_closure_init);
1580
1581 /**
1582  * Builds a closure of \a lock.
1583  *
1584  * Building of a closure consists of adding initial lock (\a lock) into it,
1585  * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1586  * methods might call cl_lock_closure_build() recursively again, adding more
1587  * locks to the closure, etc.
1588  *
1589  * \see cl_lock_closure
1590  */
1591 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1592                           struct cl_lock_closure *closure)
1593 {
1594         const struct cl_lock_slice *slice;
1595         int result;
1596
1597         ENTRY;
1598         LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1599         LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1600
1601         result = cl_lock_enclosure(env, lock, closure);
1602         if (result == 0) {
1603                 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1604                         if (slice->cls_ops->clo_closure != NULL) {
1605                                 result = slice->cls_ops->clo_closure(env, slice,
1606                                                                      closure);
1607                                 if (result != 0)
1608                                         break;
1609                         }
1610                 }
1611         }
1612         if (result != 0)
1613                 cl_lock_disclosure(env, closure);
1614         RETURN(result);
1615 }
1616 EXPORT_SYMBOL(cl_lock_closure_build);
1617
1618 /**
1619  * Adds new lock to a closure.
1620  *
1621  * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1622  * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1623  * until next try-lock is likely to succeed.
1624  */
1625 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1626                       struct cl_lock_closure *closure)
1627 {
1628         int result = 0;
1629         ENTRY;
1630         cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1631         if (!cl_lock_mutex_try(env, lock)) {
1632                 /*
1633                  * If lock->cll_inclosure is not empty, lock is already in
1634                  * this closure.
1635                  */
1636                 if (cfs_list_empty(&lock->cll_inclosure)) {
1637                         cl_lock_get_trust(lock);
1638                         lu_ref_add(&lock->cll_reference, "closure", closure);
1639                         cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1640                         closure->clc_nr++;
1641                 } else
1642                         cl_lock_mutex_put(env, lock);
1643                 result = 0;
1644         } else {
1645                 cl_lock_disclosure(env, closure);
1646                 if (closure->clc_wait) {
1647                         cl_lock_get_trust(lock);
1648                         lu_ref_add(&lock->cll_reference, "closure-w", closure);
1649                         cl_lock_mutex_put(env, closure->clc_origin);
1650
1651                         LASSERT(cl_lock_nr_mutexed(env) == 0);
1652                         cl_lock_mutex_get(env, lock);
1653                         cl_lock_mutex_put(env, lock);
1654
1655                         cl_lock_mutex_get(env, closure->clc_origin);
1656                         lu_ref_del(&lock->cll_reference, "closure-w", closure);
1657                         cl_lock_put(env, lock);
1658                 }
1659                 result = CLO_REPEAT;
1660         }
1661         RETURN(result);
1662 }
1663 EXPORT_SYMBOL(cl_lock_enclosure);
1664
1665 /** Releases mutices of enclosed locks. */
1666 void cl_lock_disclosure(const struct lu_env *env,
1667                         struct cl_lock_closure *closure)
1668 {
1669         struct cl_lock *scan;
1670         struct cl_lock *temp;
1671
1672         cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1673         cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1674                                      cll_inclosure){
1675                 cfs_list_del_init(&scan->cll_inclosure);
1676                 cl_lock_mutex_put(env, scan);
1677                 lu_ref_del(&scan->cll_reference, "closure", closure);
1678                 cl_lock_put(env, scan);
1679                 closure->clc_nr--;
1680         }
1681         LASSERT(closure->clc_nr == 0);
1682 }
1683 EXPORT_SYMBOL(cl_lock_disclosure);
1684
1685 /** Finalizes a closure. */
1686 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1687 {
1688         LASSERT(closure->clc_nr == 0);
1689         LASSERT(cfs_list_empty(&closure->clc_list));
1690 }
1691 EXPORT_SYMBOL(cl_lock_closure_fini);
1692
1693 /**
1694  * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1695  * destroyed, then destroy the lock. If there are holds on the lock, postpone
1696  * destruction until all holds are released. This is called when a decision is
1697  * made to destroy the lock in the future. E.g., when a blocking AST is
1698  * received on it, or fatal communication error happens.
1699  *
1700  * Caller must have a reference on this lock to prevent a situation, when
1701  * deleted lock lingers in memory for indefinite time, because nobody calls
1702  * cl_lock_put() to finish it.
1703  *
1704  * \pre atomic_read(&lock->cll_ref) > 0
1705  * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1706  *           cl_lock_nr_mutexed(env) == 1)
1707  *      [i.e., if a top-lock is deleted, mutices of no other locks can be
1708  *      held, as deletion of sub-locks might require releasing a top-lock
1709  *      mutex]
1710  *
1711  * \see cl_lock_operations::clo_delete()
1712  * \see cl_lock::cll_holds
1713  */
1714 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1715 {
1716         LINVRNT(cl_lock_is_mutexed(lock));
1717         LINVRNT(cl_lock_invariant(env, lock));
1718         LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1719                      cl_lock_nr_mutexed(env) == 1));
1720
1721         ENTRY;
1722         cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1723         if (lock->cll_holds == 0)
1724                 cl_lock_delete0(env, lock);
1725         else
1726                 lock->cll_flags |= CLF_DOOMED;
1727         EXIT;
1728 }
1729 EXPORT_SYMBOL(cl_lock_delete);
1730
1731 /**
1732  * Mark lock as irrecoverably failed, and mark it for destruction. This
1733  * happens when, e.g., server fails to grant a lock to us, or networking
1734  * time-out happens.
1735  *
1736  * \pre atomic_read(&lock->cll_ref) > 0
1737  *
1738  * \see clo_lock_delete()
1739  * \see cl_lock::cll_holds
1740  */
1741 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1742 {
1743         LINVRNT(cl_lock_is_mutexed(lock));
1744         LINVRNT(cl_lock_invariant(env, lock));
1745
1746         ENTRY;
1747         cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1748         if (lock->cll_error == 0 && error != 0) {
1749                 lock->cll_error = error;
1750                 cl_lock_signal(env, lock);
1751                 cl_lock_cancel(env, lock);
1752                 cl_lock_delete(env, lock);
1753         }
1754         EXIT;
1755 }
1756 EXPORT_SYMBOL(cl_lock_error);
1757
1758 /**
1759  * Cancels this lock. Notifies layers
1760  * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1761  * there are holds on the lock, postpone cancellation until
1762  * all holds are released.
1763  *
1764  * Cancellation notification is delivered to layers at most once.
1765  *
1766  * \see cl_lock_operations::clo_cancel()
1767  * \see cl_lock::cll_holds
1768  */
1769 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1770 {
1771         LINVRNT(cl_lock_is_mutexed(lock));
1772         LINVRNT(cl_lock_invariant(env, lock));
1773
1774         ENTRY;
1775         cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1776         if (lock->cll_holds == 0)
1777                 cl_lock_cancel0(env, lock);
1778         else
1779                 lock->cll_flags |= CLF_CANCELPEND;
1780         EXIT;
1781 }
1782 EXPORT_SYMBOL(cl_lock_cancel);
1783
1784 /**
1785  * Finds an existing lock covering given page and optionally different from a
1786  * given \a except lock.
1787  */
1788 struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
1789                                 struct cl_page *page, struct cl_lock *except,
1790                                 int pending, int canceld)
1791 {
1792         struct cl_object_header *head;
1793         struct cl_lock          *scan;
1794         struct cl_lock          *lock;
1795         struct cl_lock_descr    *need;
1796
1797         ENTRY;
1798
1799         head = cl_object_header(obj);
1800         need = &cl_env_info(env)->clt_descr;
1801         lock = NULL;
1802
1803         need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1804                                     * not PHANTOM */
1805         need->cld_start = need->cld_end = page->cp_index;
1806         need->cld_enq_flags = 0;
1807
1808         cfs_spin_lock(&head->coh_lock_guard);
1809         /* It is fine to match any group lock since there could be only one
1810          * with a uniq gid and it conflicts with all other lock modes too */
1811         cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1812                 if (scan != except &&
1813                     (scan->cll_descr.cld_mode == CLM_GROUP ||
1814                     cl_lock_ext_match(&scan->cll_descr, need)) &&
1815                     scan->cll_state >= CLS_HELD &&
1816                     scan->cll_state < CLS_FREEING &&
1817                     /*
1818                      * This check is racy as the lock can be canceled right
1819                      * after it is done, but this is fine, because page exists
1820                      * already.
1821                      */
1822                     (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1823                     (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1824                         /* Don't increase cs_hit here since this
1825                          * is just a helper function. */
1826                         cl_lock_get_trust(scan);
1827                         lock = scan;
1828                         break;
1829                 }
1830         }
1831         cfs_spin_unlock(&head->coh_lock_guard);
1832         RETURN(lock);
1833 }
1834 EXPORT_SYMBOL(cl_lock_at_page);
1835
1836 /**
1837  * Returns a list of pages protected (only) by a given lock.
1838  *
1839  * Scans an extent of page radix tree, corresponding to the \a lock and queues
1840  * all pages that are not protected by locks other than \a lock into \a queue.
1841  */
1842 void cl_lock_page_list_fixup(const struct lu_env *env,
1843                              struct cl_io *io, struct cl_lock *lock,
1844                              struct cl_page_list *queue)
1845 {
1846         struct cl_page        *page;
1847         struct cl_page        *temp;
1848         struct cl_page_list   *plist = &cl_env_info(env)->clt_list;
1849
1850         LINVRNT(cl_lock_invariant(env, lock));
1851         ENTRY;
1852
1853         /* Now, we have a list of cl_pages under the \a lock, we need
1854          * to check if some of pages are covered by other ldlm lock.
1855          * If this is the case, they aren't needed to be written out this time.
1856          *
1857          * For example, we have A:[0,200] & B:[100,300] PW locks on client, now
1858          * the latter is to be canceled, this means other client is
1859          * reading/writing [200,300] since A won't canceled. Actually
1860          * we just need to write the pages covered by [200,300]. This is safe,
1861          * since [100,200] is also protected lock A.
1862          */
1863
1864         cl_page_list_init(plist);
1865         cl_page_list_for_each_safe(page, temp, queue) {
1866                 pgoff_t                idx = page->cp_index;
1867                 struct cl_lock        *found;
1868                 struct cl_lock_descr  *descr;
1869
1870                 /* The algorithm counts on the index-ascending page index. */
1871                 LASSERT(ergo(&temp->cp_batch != &queue->pl_pages,
1872                         page->cp_index < temp->cp_index));
1873
1874                 found = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1875                                         page, lock, 0, 0);
1876                 if (found == NULL)
1877                         continue;
1878
1879                 descr = &found->cll_descr;
1880                 cfs_list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
1881                                                   cp_batch) {
1882                         idx = page->cp_index;
1883                         if (descr->cld_start > idx || descr->cld_end < idx)
1884                                 break;
1885                         cl_page_list_move(plist, queue, page);
1886                 }
1887                 cl_lock_put(env, found);
1888         }
1889
1890         /* The pages in plist are covered by other locks, don't handle them
1891          * this time.
1892          */
1893         if (io != NULL)
1894                 cl_page_list_disown(env, io, plist);
1895         cl_page_list_fini(env, plist);
1896         EXIT;
1897 }
1898 EXPORT_SYMBOL(cl_lock_page_list_fixup);
1899
1900 /**
1901  * Invalidate pages protected by the given lock, sending them out to the
1902  * server first, if necessary.
1903  *
1904  * This function does the following:
1905  *
1906  *     - collects a list of pages to be invalidated,
1907  *
1908  *     - unmaps them from the user virtual memory,
1909  *
1910  *     - sends dirty pages to the server,
1911  *
1912  *     - waits for transfer completion,
1913  *
1914  *     - discards pages, and throws them out of memory.
1915  *
1916  * If \a discard is set, pages are discarded without sending them to the
1917  * server.
1918  *
1919  * If error happens on any step, the process continues anyway (the reasoning
1920  * behind this being that lock cancellation cannot be delayed indefinitely).
1921  */
1922 int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
1923                      int discard)
1924 {
1925         struct cl_thread_info *info  = cl_env_info(env);
1926         struct cl_io          *io    = &info->clt_io;
1927         struct cl_2queue      *queue = &info->clt_queue;
1928         struct cl_lock_descr  *descr = &lock->cll_descr;
1929         long page_count;
1930         int nonblock = 1, resched;
1931         int result;
1932
1933         LINVRNT(cl_lock_invariant(env, lock));
1934         ENTRY;
1935
1936         io->ci_obj = cl_object_top(descr->cld_obj);
1937         result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1938         if (result != 0)
1939                 GOTO(out, result);
1940
1941         do {
1942                 cl_2queue_init(queue);
1943                 cl_page_gang_lookup(env, descr->cld_obj, io, descr->cld_start,
1944                                     descr->cld_end, &queue->c2_qin, nonblock,
1945                                     &resched);
1946                 page_count = queue->c2_qin.pl_nr;
1947                 if (page_count > 0) {
1948                         result = cl_page_list_unmap(env, io, &queue->c2_qin);
1949                         if (!discard) {
1950                                 long timeout = 600; /* 10 minutes. */
1951                                 /* for debug purpose, if this request can't be
1952                                  * finished in 10 minutes, we hope it can
1953                                  * notify us.
1954                                  */
1955                                 result = cl_io_submit_sync(env, io, CRT_WRITE,
1956                                                            queue, CRP_CANCEL,
1957                                                            timeout);
1958                                 if (result)
1959                                         CWARN("Writing %lu pages error: %d\n",
1960                                               page_count, result);
1961                         }
1962                         cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
1963                         cl_2queue_discard(env, io, queue);
1964                         cl_2queue_disown(env, io, queue);
1965                 }
1966                 cl_2queue_fini(env, queue);
1967
1968                 if (resched)
1969                         cfs_cond_resched();
1970         } while (resched || nonblock--);
1971 out:
1972         cl_io_fini(env, io);
1973         RETURN(result);
1974 }
1975 EXPORT_SYMBOL(cl_lock_page_out);
1976
1977 /**
1978  * Eliminate all locks for a given object.
1979  *
1980  * Caller has to guarantee that no lock is in active use.
1981  *
1982  * \param cancel when this is set, cl_locks_prune() cancels locks before
1983  *               destroying.
1984  */
1985 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1986 {
1987         struct cl_object_header *head;
1988         struct cl_lock          *lock;
1989
1990         ENTRY;
1991         head = cl_object_header(obj);
1992         /*
1993          * If locks are destroyed without cancellation, all pages must be
1994          * already destroyed (as otherwise they will be left unprotected).
1995          */
1996         LASSERT(ergo(!cancel,
1997                      head->coh_tree.rnode == NULL && head->coh_pages == 0));
1998
1999         cfs_spin_lock(&head->coh_lock_guard);
2000         while (!cfs_list_empty(&head->coh_locks)) {
2001                 lock = container_of(head->coh_locks.next,
2002                                     struct cl_lock, cll_linkage);
2003                 cl_lock_get_trust(lock);
2004                 cfs_spin_unlock(&head->coh_lock_guard);
2005                 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2006                 cl_lock_mutex_get(env, lock);
2007                 if (lock->cll_state < CLS_FREEING) {
2008                         LASSERT(lock->cll_holds == 0);
2009                         LASSERT(lock->cll_users == 0);
2010                         if (cancel)
2011                                 cl_lock_cancel(env, lock);
2012                         cl_lock_delete(env, lock);
2013                 }
2014                 cl_lock_mutex_put(env, lock);
2015                 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2016                 cl_lock_put(env, lock);
2017                 cfs_spin_lock(&head->coh_lock_guard);
2018         }
2019         cfs_spin_unlock(&head->coh_lock_guard);
2020         EXIT;
2021 }
2022 EXPORT_SYMBOL(cl_locks_prune);
2023
2024 /**
2025  * Returns true if \a addr is an address of an allocated cl_lock. Used in
2026  * assertions. This check is optimistically imprecise, i.e., it occasionally
2027  * returns true for the incorrect addresses, but if it returns false, then the
2028  * address is guaranteed to be incorrect. (Should be named cl_lockp().)
2029  *
2030  * \see cl_is_page()
2031  */
2032 int cl_is_lock(const void *addr)
2033 {
2034         return cfs_mem_is_in_cache(addr, cl_lock_kmem);
2035 }
2036 EXPORT_SYMBOL(cl_is_lock);
2037
2038 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2039                                           const struct cl_io *io,
2040                                           const struct cl_lock_descr *need,
2041                                           const char *scope, const void *source)
2042 {
2043         struct cl_lock *lock;
2044
2045         ENTRY;
2046
2047         while (1) {
2048                 lock = cl_lock_find(env, io, need);
2049                 if (IS_ERR(lock))
2050                         break;
2051                 cl_lock_mutex_get(env, lock);
2052                 if (lock->cll_state < CLS_FREEING &&
2053                     !(lock->cll_flags & CLF_CANCELLED)) {
2054                         cl_lock_hold_mod(env, lock, +1);
2055                         lu_ref_add(&lock->cll_holders, scope, source);
2056                         lu_ref_add(&lock->cll_reference, scope, source);
2057                         break;
2058                 }
2059                 cl_lock_mutex_put(env, lock);
2060                 cl_lock_put(env, lock);
2061         }
2062         RETURN(lock);
2063 }
2064
2065 /**
2066  * Returns a lock matching \a need description with a reference and a hold on
2067  * it.
2068  *
2069  * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2070  * guarantees that lock is not in the CLS_FREEING state on return.
2071  */
2072 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2073                              const struct cl_lock_descr *need,
2074                              const char *scope, const void *source)
2075 {
2076         struct cl_lock *lock;
2077
2078         ENTRY;
2079
2080         lock = cl_lock_hold_mutex(env, io, need, scope, source);
2081         if (!IS_ERR(lock))
2082                 cl_lock_mutex_put(env, lock);
2083         RETURN(lock);
2084 }
2085 EXPORT_SYMBOL(cl_lock_hold);
2086
2087 /**
2088  * Main high-level entry point of cl_lock interface that finds existing or
2089  * enqueues new lock matching given description.
2090  */
2091 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2092                                 const struct cl_lock_descr *need,
2093                                 const char *scope, const void *source)
2094 {
2095         struct cl_lock       *lock;
2096         const struct lu_fid  *fid;
2097         int                   rc;
2098         int                   iter;
2099         __u32                 enqflags = need->cld_enq_flags;
2100
2101         ENTRY;
2102         fid = lu_object_fid(&io->ci_obj->co_lu);
2103         iter = 0;
2104         do {
2105                 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2106                 if (!IS_ERR(lock)) {
2107                         rc = cl_enqueue_locked(env, lock, io, enqflags);
2108                         if (rc == 0) {
2109                                 if (cl_lock_fits_into(env, lock, need, io)) {
2110                                         cl_lock_mutex_put(env, lock);
2111                                         cl_lock_lockdep_acquire(env,
2112                                                                 lock, enqflags);
2113                                         break;
2114                                 }
2115                                 cl_unuse_locked(env, lock);
2116                         }
2117                         cl_lock_trace(D_DLMTRACE, env, "enqueue failed", lock);
2118                         cl_lock_hold_release(env, lock, scope, source);
2119                         cl_lock_mutex_put(env, lock);
2120                         lu_ref_del(&lock->cll_reference, scope, source);
2121                         cl_lock_put(env, lock);
2122                         lock = ERR_PTR(rc);
2123                 } else
2124                         rc = PTR_ERR(lock);
2125                 iter++;
2126         } while (rc == 0);
2127         RETURN(lock);
2128 }
2129 EXPORT_SYMBOL(cl_lock_request);
2130
2131 /**
2132  * Adds a hold to a known lock.
2133  */
2134 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2135                       const char *scope, const void *source)
2136 {
2137         LINVRNT(cl_lock_is_mutexed(lock));
2138         LINVRNT(cl_lock_invariant(env, lock));
2139         LASSERT(lock->cll_state != CLS_FREEING);
2140
2141         ENTRY;
2142         cl_lock_hold_mod(env, lock, +1);
2143         cl_lock_get(lock);
2144         lu_ref_add(&lock->cll_holders, scope, source);
2145         lu_ref_add(&lock->cll_reference, scope, source);
2146         EXIT;
2147 }
2148 EXPORT_SYMBOL(cl_lock_hold_add);
2149
2150 /**
2151  * Releases a hold and a reference on a lock, on which caller acquired a
2152  * mutex.
2153  */
2154 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2155                     const char *scope, const void *source)
2156 {
2157         LINVRNT(cl_lock_invariant(env, lock));
2158         ENTRY;
2159         cl_lock_hold_release(env, lock, scope, source);
2160         lu_ref_del(&lock->cll_reference, scope, source);
2161         cl_lock_put(env, lock);
2162         EXIT;
2163 }
2164 EXPORT_SYMBOL(cl_lock_unhold);
2165
2166 /**
2167  * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2168  */
2169 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2170                      const char *scope, const void *source)
2171 {
2172         LINVRNT(cl_lock_invariant(env, lock));
2173         ENTRY;
2174         cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2175         cl_lock_mutex_get(env, lock);
2176         cl_lock_hold_release(env, lock, scope, source);
2177         cl_lock_mutex_put(env, lock);
2178         lu_ref_del(&lock->cll_reference, scope, source);
2179         cl_lock_put(env, lock);
2180         EXIT;
2181 }
2182 EXPORT_SYMBOL(cl_lock_release);
2183
2184 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2185 {
2186         LINVRNT(cl_lock_is_mutexed(lock));
2187         LINVRNT(cl_lock_invariant(env, lock));
2188
2189         ENTRY;
2190         cl_lock_used_mod(env, lock, +1);
2191         EXIT;
2192 }
2193 EXPORT_SYMBOL(cl_lock_user_add);
2194
2195 int cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2196 {
2197         LINVRNT(cl_lock_is_mutexed(lock));
2198         LINVRNT(cl_lock_invariant(env, lock));
2199         LASSERT(lock->cll_users > 0);
2200
2201         ENTRY;
2202         cl_lock_used_mod(env, lock, -1);
2203         RETURN(lock->cll_users == 0);
2204 }
2205 EXPORT_SYMBOL(cl_lock_user_del);
2206
2207 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2208 {
2209         static const char *names[] = {
2210                 [CLM_PHANTOM] = "P",
2211                 [CLM_READ]    = "R",
2212                 [CLM_WRITE]   = "W",
2213                 [CLM_GROUP]   = "G"
2214         };
2215         if (0 <= mode && mode < ARRAY_SIZE(names))
2216                 return names[mode];
2217         else
2218                 return "U";
2219 }
2220 EXPORT_SYMBOL(cl_lock_mode_name);
2221
2222 /**
2223  * Prints human readable representation of a lock description.
2224  */
2225 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2226                        lu_printer_t printer,
2227                        const struct cl_lock_descr *descr)
2228 {
2229         const struct lu_fid  *fid;
2230
2231         fid = lu_object_fid(&descr->cld_obj->co_lu);
2232         (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2233 }
2234 EXPORT_SYMBOL(cl_lock_descr_print);
2235
2236 /**
2237  * Prints human readable representation of \a lock to the \a f.
2238  */
2239 void cl_lock_print(const struct lu_env *env, void *cookie,
2240                    lu_printer_t printer, const struct cl_lock *lock)
2241 {
2242         const struct cl_lock_slice *slice;
2243         (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2244                    lock, cfs_atomic_read(&lock->cll_ref),
2245                    lock->cll_state, lock->cll_error, lock->cll_holds,
2246                    lock->cll_users, lock->cll_flags);
2247         cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2248         (*printer)(env, cookie, " {\n");
2249
2250         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2251                 (*printer)(env, cookie, "    %s@%p: ",
2252                            slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2253                            slice);
2254                 if (slice->cls_ops->clo_print != NULL)
2255                         slice->cls_ops->clo_print(env, cookie, printer, slice);
2256                 (*printer)(env, cookie, "\n");
2257         }
2258         (*printer)(env, cookie, "} lock@%p\n", lock);
2259 }
2260 EXPORT_SYMBOL(cl_lock_print);
2261
2262 int cl_lock_init(void)
2263 {
2264         return lu_kmem_init(cl_lock_caches);
2265 }
2266
2267 void cl_lock_fini(void)
2268 {
2269         lu_kmem_fini(cl_lock_caches);
2270 }