Whamcloud - gitweb
b=17167 libcfs: ensure all libcfs exported symbols to have cfs_ prefix
[fs/lustre-release.git] / lustre / obdclass / cl_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Extent Lock.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42 #ifndef EXPORT_SYMTAB
43 # define EXPORT_SYMTAB
44 #endif
45
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_fid.h>
49 #include <libcfs/list.h>
50 /* lu_time_global_{init,fini}() */
51 #include <lu_time.h>
52
53 #include <cl_object.h>
54 #include "cl_internal.h"
55
56 /** Lock class of cl_lock::cll_guard */
57 static cfs_lock_class_key_t cl_lock_guard_class;
58 static cfs_mem_cache_t *cl_lock_kmem;
59
60 static struct lu_kmem_descr cl_lock_caches[] = {
61         {
62                 .ckd_cache = &cl_lock_kmem,
63                 .ckd_name  = "cl_lock_kmem",
64                 .ckd_size  = sizeof (struct cl_lock)
65         },
66         {
67                 .ckd_cache = NULL
68         }
69 };
70
71 /**
72  * Basic lock invariant that is maintained at all times. Caller either has a
73  * reference to \a lock, or somehow assures that \a lock cannot be freed.
74  *
75  * \see cl_lock_invariant()
76  */
77 static int cl_lock_invariant_trusted(const struct lu_env *env,
78                                      const struct cl_lock *lock)
79 {
80         return
81                 cl_is_lock(lock) &&
82                 ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
83                 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
84                 lock->cll_holds >= lock->cll_users &&
85                 lock->cll_holds >= 0 &&
86                 lock->cll_users >= 0 &&
87                 lock->cll_depth >= 0;
88 }
89
90 /**
91  * Stronger lock invariant, checking that caller has a reference on a lock.
92  *
93  * \see cl_lock_invariant_trusted()
94  */
95 static int cl_lock_invariant(const struct lu_env *env,
96                              const struct cl_lock *lock)
97 {
98         int result;
99
100         result = cfs_atomic_read(&lock->cll_ref) > 0 &&
101                 cl_lock_invariant_trusted(env, lock);
102         if (!result && env != NULL)
103                 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
104         return result;
105 }
106
107 /**
108  * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
109  */
110 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
111 {
112         return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
113 }
114
115 /**
116  * Returns a set of counters for this lock, depending on a lock nesting.
117  */
118 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
119                                                    const struct cl_lock *lock)
120 {
121         struct cl_thread_info *info;
122         enum clt_nesting_level nesting;
123
124         info = cl_env_info(env);
125         nesting = cl_lock_nesting(lock);
126         LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
127         return &info->clt_counters[nesting];
128 }
129
130 static void cl_lock_trace0(int level, const struct lu_env *env,
131                            const char *prefix, const struct cl_lock *lock,
132                            const char *func, const int line)
133 {
134         struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
135         CDEBUG(level, "%s: %p@(%i %p %i %d %d %d %d %lx)"
136                       "(%p/%d/%i) at %s():%d\n",
137                prefix, lock, cfs_atomic_read(&lock->cll_ref),
138                lock->cll_guarder, lock->cll_depth,
139                lock->cll_state, lock->cll_error, lock->cll_holds,
140                lock->cll_users, lock->cll_flags,
141                env, h->coh_nesting, cl_lock_nr_mutexed(env),
142                func, line);
143 }
144 #define cl_lock_trace(level, env, prefix, lock)                         \
145         cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
146
147 #define RETIP ((unsigned long)__builtin_return_address(0))
148
149 #ifdef CONFIG_LOCKDEP
150 static cfs_lock_class_key_t cl_lock_key;
151
152 static void cl_lock_lockdep_init(struct cl_lock *lock)
153 {
154         lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
155 }
156
157 static void cl_lock_lockdep_acquire(const struct lu_env *env,
158                                     struct cl_lock *lock, __u32 enqflags)
159 {
160         cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
161         lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
162                      /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
163                      /* check: */ 2, RETIP);
164 }
165
166 static void cl_lock_lockdep_release(const struct lu_env *env,
167                                     struct cl_lock *lock)
168 {
169         cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
170         lock_release(&lock->dep_map, 0, RETIP);
171 }
172
173 #else /* !CONFIG_LOCKDEP */
174
175 static void cl_lock_lockdep_init(struct cl_lock *lock)
176 {}
177 static void cl_lock_lockdep_acquire(const struct lu_env *env,
178                                     struct cl_lock *lock, __u32 enqflags)
179 {}
180 static void cl_lock_lockdep_release(const struct lu_env *env,
181                                     struct cl_lock *lock)
182 {}
183
184 #endif /* !CONFIG_LOCKDEP */
185
186 /**
187  * Adds lock slice to the compound lock.
188  *
189  * This is called by cl_object_operations::coo_lock_init() methods to add a
190  * per-layer state to the lock. New state is added at the end of
191  * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
192  *
193  * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
194  */
195 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
196                        struct cl_object *obj,
197                        const struct cl_lock_operations *ops)
198 {
199         ENTRY;
200         slice->cls_lock = lock;
201         cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
202         slice->cls_obj = obj;
203         slice->cls_ops = ops;
204         EXIT;
205 }
206 EXPORT_SYMBOL(cl_lock_slice_add);
207
208 /**
209  * Returns true iff a lock with the mode \a has provides at least the same
210  * guarantees as a lock with the mode \a need.
211  */
212 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
213 {
214         LINVRNT(need == CLM_READ || need == CLM_WRITE ||
215                 need == CLM_PHANTOM || need == CLM_GROUP);
216         LINVRNT(has == CLM_READ || has == CLM_WRITE ||
217                 has == CLM_PHANTOM || has == CLM_GROUP);
218         CLASSERT(CLM_PHANTOM < CLM_READ);
219         CLASSERT(CLM_READ < CLM_WRITE);
220         CLASSERT(CLM_WRITE < CLM_GROUP);
221
222         if (has != CLM_GROUP)
223                 return need <= has;
224         else
225                 return need == has;
226 }
227 EXPORT_SYMBOL(cl_lock_mode_match);
228
229 /**
230  * Returns true iff extent portions of lock descriptions match.
231  */
232 int cl_lock_ext_match(const struct cl_lock_descr *has,
233                       const struct cl_lock_descr *need)
234 {
235         return
236                 has->cld_start <= need->cld_start &&
237                 has->cld_end >= need->cld_end &&
238                 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
239                 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
240 }
241 EXPORT_SYMBOL(cl_lock_ext_match);
242
243 /**
244  * Returns true iff a lock with the description \a has provides at least the
245  * same guarantees as a lock with the description \a need.
246  */
247 int cl_lock_descr_match(const struct cl_lock_descr *has,
248                         const struct cl_lock_descr *need)
249 {
250         return
251                 cl_object_same(has->cld_obj, need->cld_obj) &&
252                 cl_lock_ext_match(has, need);
253 }
254 EXPORT_SYMBOL(cl_lock_descr_match);
255
256 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
257 {
258         struct cl_object *obj = lock->cll_descr.cld_obj;
259
260         LASSERT(cl_is_lock(lock));
261         LINVRNT(!cl_lock_is_mutexed(lock));
262
263         ENTRY;
264         cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
265         cfs_might_sleep();
266         while (!cfs_list_empty(&lock->cll_layers)) {
267                 struct cl_lock_slice *slice;
268
269                 slice = cfs_list_entry(lock->cll_layers.next,
270                                        struct cl_lock_slice, cls_linkage);
271                 cfs_list_del_init(lock->cll_layers.next);
272                 slice->cls_ops->clo_fini(env, slice);
273         }
274         cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
275         cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
276         lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
277         cl_object_put(env, obj);
278         lu_ref_fini(&lock->cll_reference);
279         lu_ref_fini(&lock->cll_holders);
280         cfs_mutex_destroy(&lock->cll_guard);
281         OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
282         EXIT;
283 }
284
285 /**
286  * Releases a reference on a lock.
287  *
288  * When last reference is released, lock is returned to the cache, unless it
289  * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
290  * immediately.
291  *
292  * \see cl_object_put(), cl_page_put()
293  */
294 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
295 {
296         struct cl_object        *obj;
297         struct cl_object_header *head;
298         struct cl_site          *site;
299
300         LINVRNT(cl_lock_invariant(env, lock));
301         ENTRY;
302         obj = lock->cll_descr.cld_obj;
303         LINVRNT(obj != NULL);
304         head = cl_object_header(obj);
305         site = cl_object_site(obj);
306
307         CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
308                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
309
310         if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
311                 if (lock->cll_state == CLS_FREEING) {
312                         LASSERT(cfs_list_empty(&lock->cll_linkage));
313                         cl_lock_free(env, lock);
314                 }
315                 cfs_atomic_dec(&site->cs_locks.cs_busy);
316         }
317         EXIT;
318 }
319 EXPORT_SYMBOL(cl_lock_put);
320
321 /**
322  * Acquires an additional reference to a lock.
323  *
324  * This can be called only by caller already possessing a reference to \a
325  * lock.
326  *
327  * \see cl_object_get(), cl_page_get()
328  */
329 void cl_lock_get(struct cl_lock *lock)
330 {
331         LINVRNT(cl_lock_invariant(NULL, lock));
332         CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
333                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
334         cfs_atomic_inc(&lock->cll_ref);
335 }
336 EXPORT_SYMBOL(cl_lock_get);
337
338 /**
339  * Acquires a reference to a lock.
340  *
341  * This is much like cl_lock_get(), except that this function can be used to
342  * acquire initial reference to the cached lock. Caller has to deal with all
343  * possible races. Use with care!
344  *
345  * \see cl_page_get_trust()
346  */
347 void cl_lock_get_trust(struct cl_lock *lock)
348 {
349         struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
350
351         LASSERT(cl_is_lock(lock));
352         CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
353                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
354         if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
355                 cfs_atomic_inc(&site->cs_locks.cs_busy);
356 }
357 EXPORT_SYMBOL(cl_lock_get_trust);
358
359 /**
360  * Helper function destroying the lock that wasn't completely initialized.
361  *
362  * Other threads can acquire references to the top-lock through its
363  * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
364  */
365 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
366 {
367         cl_lock_mutex_get(env, lock);
368         cl_lock_cancel(env, lock);
369         cl_lock_delete(env, lock);
370         cl_lock_mutex_put(env, lock);
371         cl_lock_put(env, lock);
372 }
373
374 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
375                                      struct cl_object *obj,
376                                      const struct cl_io *io,
377                                      const struct cl_lock_descr *descr)
378 {
379         struct cl_lock          *lock;
380         struct lu_object_header *head;
381         struct cl_site          *site = cl_object_site(obj);
382
383         ENTRY;
384         OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
385         if (lock != NULL) {
386                 cfs_atomic_set(&lock->cll_ref, 1);
387                 lock->cll_descr = *descr;
388                 lock->cll_state = CLS_NEW;
389                 cl_object_get(obj);
390                 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
391                                                       "cl_lock", lock);
392                 CFS_INIT_LIST_HEAD(&lock->cll_layers);
393                 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
394                 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
395                 lu_ref_init(&lock->cll_reference);
396                 lu_ref_init(&lock->cll_holders);
397                 cfs_mutex_init(&lock->cll_guard);
398                 cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
399                 cfs_waitq_init(&lock->cll_wq);
400                 head = obj->co_lu.lo_header;
401                 cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
402                 cfs_atomic_inc(&site->cs_locks.cs_total);
403                 cfs_atomic_inc(&site->cs_locks.cs_created);
404                 cl_lock_lockdep_init(lock);
405                 cfs_list_for_each_entry(obj, &head->loh_layers,
406                                         co_lu.lo_linkage) {
407                         int err;
408
409                         err = obj->co_ops->coo_lock_init(env, obj, lock, io);
410                         if (err != 0) {
411                                 cl_lock_finish(env, lock);
412                                 lock = ERR_PTR(err);
413                                 break;
414                         }
415                 }
416         } else
417                 lock = ERR_PTR(-ENOMEM);
418         RETURN(lock);
419 }
420
421 /**
422  * Transfer the lock into INTRANSIT state and return the original state.
423  *
424  * \pre  state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
425  * \post state: CLS_INTRANSIT
426  * \see CLS_INTRANSIT
427  */
428 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
429                                      struct cl_lock *lock)
430 {
431         enum cl_lock_state state = lock->cll_state;
432
433         LASSERT(cl_lock_is_mutexed(lock));
434         LASSERT(state != CLS_INTRANSIT);
435         LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
436                  "Malformed lock state %d.\n", state);
437
438         cl_lock_state_set(env, lock, CLS_INTRANSIT);
439         lock->cll_intransit_owner = cfs_current();
440         cl_lock_hold_add(env, lock, "intransit", cfs_current());
441         return state;
442 }
443 EXPORT_SYMBOL(cl_lock_intransit);
444
445 /**
446  *  Exit the intransit state and restore the lock state to the original state
447  */
448 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
449                        enum cl_lock_state state)
450 {
451         LASSERT(cl_lock_is_mutexed(lock));
452         LASSERT(lock->cll_state == CLS_INTRANSIT);
453         LASSERT(state != CLS_INTRANSIT);
454         LASSERT(lock->cll_intransit_owner == cfs_current());
455
456         lock->cll_intransit_owner = NULL;
457         cl_lock_state_set(env, lock, state);
458         cl_lock_unhold(env, lock, "intransit", cfs_current());
459 }
460 EXPORT_SYMBOL(cl_lock_extransit);
461
462 /**
463  * Checking whether the lock is intransit state
464  */
465 int cl_lock_is_intransit(struct cl_lock *lock)
466 {
467         LASSERT(cl_lock_is_mutexed(lock));
468         return lock->cll_state == CLS_INTRANSIT &&
469                lock->cll_intransit_owner != cfs_current();
470 }
471 EXPORT_SYMBOL(cl_lock_is_intransit);
472 /**
473  * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
474  * truncate and O_APPEND cannot be reused for read/non-append-write, as they
475  * cover multiple stripes and can trigger cascading timeouts.
476  */
477 static int cl_lock_fits_into(const struct lu_env *env,
478                              const struct cl_lock *lock,
479                              const struct cl_lock_descr *need,
480                              const struct cl_io *io)
481 {
482         const struct cl_lock_slice *slice;
483
484         LINVRNT(cl_lock_invariant_trusted(env, lock));
485         ENTRY;
486         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
487                 if (slice->cls_ops->clo_fits_into != NULL &&
488                     !slice->cls_ops->clo_fits_into(env, slice, need, io))
489                         RETURN(0);
490         }
491         RETURN(1);
492 }
493
494 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
495                                       struct cl_object *obj,
496                                       const struct cl_io *io,
497                                       const struct cl_lock_descr *need)
498 {
499         struct cl_lock          *lock;
500         struct cl_object_header *head;
501         struct cl_site          *site;
502
503         ENTRY;
504
505         head = cl_object_header(obj);
506         site = cl_object_site(obj);
507         LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
508         cfs_atomic_inc(&site->cs_locks.cs_lookup);
509         cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
510                 int matched;
511
512                 LASSERT(cl_is_lock(lock));
513                 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
514                           lock->cll_state < CLS_FREEING &&
515                           lock->cll_error == 0 &&
516                           !(lock->cll_flags & CLF_CANCELLED) &&
517                           cl_lock_fits_into(env, lock, need, io);
518                 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%i) need: "DDESCR": %d\n",
519                        PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
520                        matched);
521                 if (matched) {
522                         cl_lock_get_trust(lock);
523                         cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
524                         RETURN(lock);
525                 }
526         }
527         RETURN(NULL);
528 }
529
530 /**
531  * Returns a lock matching description \a need.
532  *
533  * This is the main entry point into the cl_lock caching interface. First, a
534  * cache (implemented as a per-object linked list) is consulted. If lock is
535  * found there, it is returned immediately. Otherwise new lock is allocated
536  * and returned. In any case, additional reference to lock is acquired.
537  *
538  * \see cl_object_find(), cl_page_find()
539  */
540 static struct cl_lock *cl_lock_find(const struct lu_env *env,
541                                     const struct cl_io *io,
542                                     const struct cl_lock_descr *need)
543 {
544         struct cl_object_header *head;
545         struct cl_object        *obj;
546         struct cl_lock          *lock;
547         struct cl_site          *site;
548
549         ENTRY;
550
551         obj  = need->cld_obj;
552         head = cl_object_header(obj);
553         site = cl_object_site(obj);
554
555         cfs_spin_lock(&head->coh_lock_guard);
556         lock = cl_lock_lookup(env, obj, io, need);
557         cfs_spin_unlock(&head->coh_lock_guard);
558
559         if (lock == NULL) {
560                 lock = cl_lock_alloc(env, obj, io, need);
561                 if (!IS_ERR(lock)) {
562                         struct cl_lock *ghost;
563
564                         cfs_spin_lock(&head->coh_lock_guard);
565                         ghost = cl_lock_lookup(env, obj, io, need);
566                         if (ghost == NULL) {
567                                 cfs_list_add_tail(&lock->cll_linkage,
568                                                   &head->coh_locks);
569                                 cfs_spin_unlock(&head->coh_lock_guard);
570                                 cfs_atomic_inc(&site->cs_locks.cs_busy);
571                         } else {
572                                 cfs_spin_unlock(&head->coh_lock_guard);
573                                 /*
574                                  * Other threads can acquire references to the
575                                  * top-lock through its sub-locks. Hence, it
576                                  * cannot be cl_lock_free()-ed immediately.
577                                  */
578                                 cl_lock_finish(env, lock);
579                                 lock = ghost;
580                         }
581                 }
582         }
583         RETURN(lock);
584 }
585
586 /**
587  * Returns existing lock matching given description. This is similar to
588  * cl_lock_find() except that no new lock is created, and returned lock is
589  * guaranteed to be in enum cl_lock_state::CLS_HELD state.
590  */
591 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
592                              const struct cl_lock_descr *need,
593                              const char *scope, const void *source)
594 {
595         struct cl_object_header *head;
596         struct cl_object        *obj;
597         struct cl_lock          *lock;
598         int ok;
599
600         obj  = need->cld_obj;
601         head = cl_object_header(obj);
602
603         cfs_spin_lock(&head->coh_lock_guard);
604         lock = cl_lock_lookup(env, obj, io, need);
605         cfs_spin_unlock(&head->coh_lock_guard);
606
607         if (lock == NULL)
608                 return NULL;
609
610         cl_lock_mutex_get(env, lock);
611         if (lock->cll_state == CLS_INTRANSIT)
612                 cl_lock_state_wait(env, lock); /* Don't care return value. */
613         if (lock->cll_state == CLS_CACHED) {
614                 int result;
615                 result = cl_use_try(env, lock, 1);
616                 if (result < 0)
617                         cl_lock_error(env, lock, result);
618         }
619         ok = lock->cll_state == CLS_HELD;
620         if (ok) {
621                 cl_lock_hold_add(env, lock, scope, source);
622                 cl_lock_user_add(env, lock);
623                 cl_lock_put(env, lock);
624         }
625         cl_lock_mutex_put(env, lock);
626         if (!ok) {
627                 cl_lock_put(env, lock);
628                 lock = NULL;
629         }
630
631         return lock;
632 }
633 EXPORT_SYMBOL(cl_lock_peek);
634
635 /**
636  * Returns a slice within a lock, corresponding to the given layer in the
637  * device stack.
638  *
639  * \see cl_page_at()
640  */
641 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
642                                        const struct lu_device_type *dtype)
643 {
644         const struct cl_lock_slice *slice;
645
646         LINVRNT(cl_lock_invariant_trusted(NULL, lock));
647         ENTRY;
648
649         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
650                 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
651                         RETURN(slice);
652         }
653         RETURN(NULL);
654 }
655 EXPORT_SYMBOL(cl_lock_at);
656
657 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
658 {
659         struct cl_thread_counters *counters;
660
661         counters = cl_lock_counters(env, lock);
662         lock->cll_depth++;
663         counters->ctc_nr_locks_locked++;
664         lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
665         cl_lock_trace(D_TRACE, env, "got mutex", lock);
666 }
667
668 /**
669  * Locks cl_lock object.
670  *
671  * This is used to manipulate cl_lock fields, and to serialize state
672  * transitions in the lock state machine.
673  *
674  * \post cl_lock_is_mutexed(lock)
675  *
676  * \see cl_lock_mutex_put()
677  */
678 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
679 {
680         LINVRNT(cl_lock_invariant(env, lock));
681
682         if (lock->cll_guarder == cfs_current()) {
683                 LINVRNT(cl_lock_is_mutexed(lock));
684                 LINVRNT(lock->cll_depth > 0);
685         } else {
686                 struct cl_object_header *hdr;
687                 struct cl_thread_info   *info;
688                 int i;
689
690                 LINVRNT(lock->cll_guarder != cfs_current());
691                 hdr = cl_object_header(lock->cll_descr.cld_obj);
692                 /*
693                  * Check that mutices are taken in the bottom-to-top order.
694                  */
695                 info = cl_env_info(env);
696                 for (i = 0; i < hdr->coh_nesting; ++i)
697                         LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
698                 cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
699                 lock->cll_guarder = cfs_current();
700                 LINVRNT(lock->cll_depth == 0);
701         }
702         cl_lock_mutex_tail(env, lock);
703 }
704 EXPORT_SYMBOL(cl_lock_mutex_get);
705
706 /**
707  * Try-locks cl_lock object.
708  *
709  * \retval 0 \a lock was successfully locked
710  *
711  * \retval -EBUSY \a lock cannot be locked right now
712  *
713  * \post ergo(result == 0, cl_lock_is_mutexed(lock))
714  *
715  * \see cl_lock_mutex_get()
716  */
717 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
718 {
719         int result;
720
721         LINVRNT(cl_lock_invariant_trusted(env, lock));
722         ENTRY;
723
724         result = 0;
725         if (lock->cll_guarder == cfs_current()) {
726                 LINVRNT(lock->cll_depth > 0);
727                 cl_lock_mutex_tail(env, lock);
728         } else if (cfs_mutex_trylock(&lock->cll_guard)) {
729                 LINVRNT(lock->cll_depth == 0);
730                 lock->cll_guarder = cfs_current();
731                 cl_lock_mutex_tail(env, lock);
732         } else
733                 result = -EBUSY;
734         RETURN(result);
735 }
736 EXPORT_SYMBOL(cl_lock_mutex_try);
737
738 /**
739  {* Unlocks cl_lock object.
740  *
741  * \pre cl_lock_is_mutexed(lock)
742  *
743  * \see cl_lock_mutex_get()
744  */
745 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
746 {
747         struct cl_thread_counters *counters;
748
749         LINVRNT(cl_lock_invariant(env, lock));
750         LINVRNT(cl_lock_is_mutexed(lock));
751         LINVRNT(lock->cll_guarder == cfs_current());
752         LINVRNT(lock->cll_depth > 0);
753
754         counters = cl_lock_counters(env, lock);
755         LINVRNT(counters->ctc_nr_locks_locked > 0);
756
757         cl_lock_trace(D_TRACE, env, "put mutex", lock);
758         lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
759         counters->ctc_nr_locks_locked--;
760         if (--lock->cll_depth == 0) {
761                 lock->cll_guarder = NULL;
762                 cfs_mutex_unlock(&lock->cll_guard);
763         }
764 }
765 EXPORT_SYMBOL(cl_lock_mutex_put);
766
767 /**
768  * Returns true iff lock's mutex is owned by the current thread.
769  */
770 int cl_lock_is_mutexed(struct cl_lock *lock)
771 {
772         return lock->cll_guarder == cfs_current();
773 }
774 EXPORT_SYMBOL(cl_lock_is_mutexed);
775
776 /**
777  * Returns number of cl_lock mutices held by the current thread (environment).
778  */
779 int cl_lock_nr_mutexed(const struct lu_env *env)
780 {
781         struct cl_thread_info *info;
782         int i;
783         int locked;
784
785         /*
786          * NOTE: if summation across all nesting levels (currently 2) proves
787          *       too expensive, a summary counter can be added to
788          *       struct cl_thread_info.
789          */
790         info = cl_env_info(env);
791         for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
792                 locked += info->clt_counters[i].ctc_nr_locks_locked;
793         return locked;
794 }
795 EXPORT_SYMBOL(cl_lock_nr_mutexed);
796
797 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
798 {
799         LINVRNT(cl_lock_is_mutexed(lock));
800         LINVRNT(cl_lock_invariant(env, lock));
801         ENTRY;
802         if (!(lock->cll_flags & CLF_CANCELLED)) {
803                 const struct cl_lock_slice *slice;
804
805                 lock->cll_flags |= CLF_CANCELLED;
806                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
807                                                 cls_linkage) {
808                         if (slice->cls_ops->clo_cancel != NULL)
809                                 slice->cls_ops->clo_cancel(env, slice);
810                 }
811         }
812         EXIT;
813 }
814
815 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
816 {
817         struct cl_object_header    *head;
818         const struct cl_lock_slice *slice;
819
820         LINVRNT(cl_lock_is_mutexed(lock));
821         LINVRNT(cl_lock_invariant(env, lock));
822
823         ENTRY;
824         if (lock->cll_state < CLS_FREEING) {
825                 LASSERT(lock->cll_state != CLS_INTRANSIT);
826                 cl_lock_state_set(env, lock, CLS_FREEING);
827
828                 head = cl_object_header(lock->cll_descr.cld_obj);
829
830                 cfs_spin_lock(&head->coh_lock_guard);
831                 cfs_list_del_init(&lock->cll_linkage);
832
833                 cfs_spin_unlock(&head->coh_lock_guard);
834                 /*
835                  * From now on, no new references to this lock can be acquired
836                  * by cl_lock_lookup().
837                  */
838                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
839                                                 cls_linkage) {
840                         if (slice->cls_ops->clo_delete != NULL)
841                                 slice->cls_ops->clo_delete(env, slice);
842                 }
843                 /*
844                  * From now on, no new references to this lock can be acquired
845                  * by layer-specific means (like a pointer from struct
846                  * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
847                  * lov).
848                  *
849                  * Lock will be finally freed in cl_lock_put() when last of
850                  * existing references goes away.
851                  */
852         }
853         EXIT;
854 }
855
856 /**
857  * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
858  * top-lock (nesting == 0) accounts for this modification in the per-thread
859  * debugging counters. Sub-lock holds can be released by a thread different
860  * from one that acquired it.
861  */
862 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
863                              int delta)
864 {
865         struct cl_thread_counters *counters;
866         enum clt_nesting_level     nesting;
867
868         lock->cll_holds += delta;
869         nesting = cl_lock_nesting(lock);
870         if (nesting == CNL_TOP) {
871                 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
872                 counters->ctc_nr_held += delta;
873                 LASSERT(counters->ctc_nr_held >= 0);
874         }
875 }
876
877 /**
878  * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
879  * cl_lock_hold_mod() for the explanation of the debugging code.
880  */
881 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
882                              int delta)
883 {
884         struct cl_thread_counters *counters;
885         enum clt_nesting_level     nesting;
886
887         lock->cll_users += delta;
888         nesting = cl_lock_nesting(lock);
889         if (nesting == CNL_TOP) {
890                 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
891                 counters->ctc_nr_used += delta;
892                 LASSERT(counters->ctc_nr_used >= 0);
893         }
894 }
895
896 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
897                                  const char *scope, const void *source)
898 {
899         LINVRNT(cl_lock_is_mutexed(lock));
900         LINVRNT(cl_lock_invariant(env, lock));
901         LASSERT(lock->cll_holds > 0);
902
903         ENTRY;
904         cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
905         lu_ref_del(&lock->cll_holders, scope, source);
906         cl_lock_hold_mod(env, lock, -1);
907         if (lock->cll_holds == 0) {
908                 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
909                     lock->cll_descr.cld_mode == CLM_GROUP)
910                         /*
911                          * If lock is still phantom or grouplock when user is
912                          * done with it---destroy the lock.
913                          */
914                         lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
915                 if (lock->cll_flags & CLF_CANCELPEND) {
916                         lock->cll_flags &= ~CLF_CANCELPEND;
917                         cl_lock_cancel0(env, lock);
918                 }
919                 if (lock->cll_flags & CLF_DOOMED) {
920                         /* no longer doomed: it's dead... Jim. */
921                         lock->cll_flags &= ~CLF_DOOMED;
922                         cl_lock_delete0(env, lock);
923                 }
924         }
925         EXIT;
926 }
927
928
929 /**
930  * Waits until lock state is changed.
931  *
932  * This function is called with cl_lock mutex locked, atomically releases
933  * mutex and goes to sleep, waiting for a lock state change (signaled by
934  * cl_lock_signal()), and re-acquires the mutex before return.
935  *
936  * This function is used to wait until lock state machine makes some progress
937  * and to emulate synchronous operations on top of asynchronous lock
938  * interface.
939  *
940  * \retval -EINTR wait was interrupted
941  *
942  * \retval 0 wait wasn't interrupted
943  *
944  * \pre cl_lock_is_mutexed(lock)
945  *
946  * \see cl_lock_signal()
947  */
948 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
949 {
950         cfs_waitlink_t waiter;
951         int result;
952
953         ENTRY;
954         LINVRNT(cl_lock_is_mutexed(lock));
955         LINVRNT(cl_lock_invariant(env, lock));
956         LASSERT(lock->cll_depth == 1);
957         LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
958
959         cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
960         result = lock->cll_error;
961         if (result == 0) {
962                 cfs_waitlink_init(&waiter);
963                 cfs_waitq_add(&lock->cll_wq, &waiter);
964                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
965                 cl_lock_mutex_put(env, lock);
966
967                 LASSERT(cl_lock_nr_mutexed(env) == 0);
968                 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
969
970                 cl_lock_mutex_get(env, lock);
971                 cfs_set_current_state(CFS_TASK_RUNNING);
972                 cfs_waitq_del(&lock->cll_wq, &waiter);
973                 result = cfs_signal_pending() ? -EINTR : 0;
974         }
975         RETURN(result);
976 }
977 EXPORT_SYMBOL(cl_lock_state_wait);
978
979 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
980                                  enum cl_lock_state state)
981 {
982         const struct cl_lock_slice *slice;
983
984         ENTRY;
985         LINVRNT(cl_lock_is_mutexed(lock));
986         LINVRNT(cl_lock_invariant(env, lock));
987
988         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
989                 if (slice->cls_ops->clo_state != NULL)
990                         slice->cls_ops->clo_state(env, slice, state);
991         cfs_waitq_broadcast(&lock->cll_wq);
992         EXIT;
993 }
994
995 /**
996  * Notifies waiters that lock state changed.
997  *
998  * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
999  * layers about state change by calling cl_lock_operations::clo_state()
1000  * top-to-bottom.
1001  */
1002 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1003 {
1004         ENTRY;
1005         cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1006         cl_lock_state_signal(env, lock, lock->cll_state);
1007         EXIT;
1008 }
1009 EXPORT_SYMBOL(cl_lock_signal);
1010
1011 /**
1012  * Changes lock state.
1013  *
1014  * This function is invoked to notify layers that lock state changed, possible
1015  * as a result of an asynchronous event such as call-back reception.
1016  *
1017  * \post lock->cll_state == state
1018  *
1019  * \see cl_lock_operations::clo_state()
1020  */
1021 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1022                        enum cl_lock_state state)
1023 {
1024         struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1025
1026         ENTRY;
1027         LASSERT(lock->cll_state <= state ||
1028                 (lock->cll_state == CLS_CACHED &&
1029                  (state == CLS_HELD || /* lock found in cache */
1030                   state == CLS_NEW  ||   /* sub-lock canceled */
1031                   state == CLS_INTRANSIT)) ||
1032                 /* lock is in transit state */
1033                 lock->cll_state == CLS_INTRANSIT);
1034
1035         if (lock->cll_state != state) {
1036                 cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
1037                 cfs_atomic_inc(&site->cs_locks_state[state]);
1038
1039                 cl_lock_state_signal(env, lock, state);
1040                 lock->cll_state = state;
1041         }
1042         EXIT;
1043 }
1044 EXPORT_SYMBOL(cl_lock_state_set);
1045
1046 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1047 {
1048         const struct cl_lock_slice *slice;
1049         int result;
1050
1051         do {
1052                 result = 0;
1053
1054                 LINVRNT(cl_lock_is_mutexed(lock));
1055                 LINVRNT(cl_lock_invariant(env, lock));
1056                 LASSERT(lock->cll_state == CLS_INTRANSIT);
1057
1058                 result = -ENOSYS;
1059                 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1060                                                 cls_linkage) {
1061                         if (slice->cls_ops->clo_unuse != NULL) {
1062                                 result = slice->cls_ops->clo_unuse(env, slice);
1063                                 if (result != 0)
1064                                         break;
1065                         }
1066                 }
1067                 LASSERT(result != -ENOSYS);
1068         } while (result == CLO_REPEAT);
1069
1070         return result;
1071 }
1072
1073 /**
1074  * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1075  * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1076  * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1077  *  use process atomic
1078  */
1079 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1080 {
1081         const struct cl_lock_slice *slice;
1082         int result;
1083         enum cl_lock_state state;
1084
1085         ENTRY;
1086         cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1087
1088         LASSERT(lock->cll_state == CLS_CACHED);
1089         if (lock->cll_error)
1090                 RETURN(lock->cll_error);
1091
1092         result = -ENOSYS;
1093         state = cl_lock_intransit(env, lock);
1094         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1095                 if (slice->cls_ops->clo_use != NULL) {
1096                         result = slice->cls_ops->clo_use(env, slice);
1097                         if (result != 0)
1098                                 break;
1099                 }
1100         }
1101         LASSERT(result != -ENOSYS);
1102
1103         LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1104                  lock->cll_state);
1105
1106         if (result == 0) {
1107                 state = CLS_HELD;
1108         } else {
1109                 if (result == -ESTALE) {
1110                         /*
1111                          * ESTALE means sublock being cancelled
1112                          * at this time, and set lock state to
1113                          * be NEW here and ask the caller to repeat.
1114                          */
1115                         state = CLS_NEW;
1116                         result = CLO_REPEAT;
1117                 }
1118
1119                 /* @atomic means back-off-on-failure. */
1120                 if (atomic) {
1121                         int rc;
1122                         rc = cl_unuse_try_internal(env, lock);
1123                         /* Vet the results. */
1124                         if (rc < 0 && result > 0)
1125                                 result = rc;
1126                 }
1127
1128         }
1129         cl_lock_extransit(env, lock, state);
1130         RETURN(result);
1131 }
1132 EXPORT_SYMBOL(cl_use_try);
1133
1134 /**
1135  * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1136  * top-to-bottom.
1137  */
1138 static int cl_enqueue_kick(const struct lu_env *env,
1139                            struct cl_lock *lock,
1140                            struct cl_io *io, __u32 flags)
1141 {
1142         int result;
1143         const struct cl_lock_slice *slice;
1144
1145         ENTRY;
1146         result = -ENOSYS;
1147         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1148                 if (slice->cls_ops->clo_enqueue != NULL) {
1149                         result = slice->cls_ops->clo_enqueue(env,
1150                                                              slice, io, flags);
1151                         if (result != 0)
1152                                 break;
1153                 }
1154         }
1155         LASSERT(result != -ENOSYS);
1156         RETURN(result);
1157 }
1158
1159 /**
1160  * Tries to enqueue a lock.
1161  *
1162  * This function is called repeatedly by cl_enqueue() until either lock is
1163  * enqueued, or error occurs. This function does not block waiting for
1164  * networking communication to complete.
1165  *
1166  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1167  *                         lock->cll_state == CLS_HELD)
1168  *
1169  * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1170  * \see cl_lock_state::CLS_ENQUEUED
1171  */
1172 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1173                    struct cl_io *io, __u32 flags)
1174 {
1175         int result;
1176
1177         ENTRY;
1178         cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1179         do {
1180                 result = 0;
1181
1182                 LINVRNT(cl_lock_is_mutexed(lock));
1183
1184                 if (lock->cll_error != 0)
1185                         break;
1186                 switch (lock->cll_state) {
1187                 case CLS_NEW:
1188                         cl_lock_state_set(env, lock, CLS_QUEUING);
1189                         /* fall-through */
1190                 case CLS_QUEUING:
1191                         /* kick layers. */
1192                         result = cl_enqueue_kick(env, lock, io, flags);
1193                         if (result == 0)
1194                                 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1195                         break;
1196                 case CLS_INTRANSIT:
1197                         LASSERT(cl_lock_is_intransit(lock));
1198                         result = CLO_WAIT;
1199                         break;
1200                 case CLS_CACHED:
1201                         /* yank lock from the cache. */
1202                         result = cl_use_try(env, lock, 0);
1203                         break;
1204                 case CLS_ENQUEUED:
1205                 case CLS_HELD:
1206                         result = 0;
1207                         break;
1208                 default:
1209                 case CLS_FREEING:
1210                         /*
1211                          * impossible, only held locks with increased
1212                          * ->cll_holds can be enqueued, and they cannot be
1213                          * freed.
1214                          */
1215                         LBUG();
1216                 }
1217         } while (result == CLO_REPEAT);
1218         if (result < 0)
1219                 cl_lock_error(env, lock, result);
1220         RETURN(result ?: lock->cll_error);
1221 }
1222 EXPORT_SYMBOL(cl_enqueue_try);
1223
1224 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1225                              struct cl_io *io, __u32 enqflags)
1226 {
1227         int result;
1228
1229         ENTRY;
1230
1231         LINVRNT(cl_lock_is_mutexed(lock));
1232         LINVRNT(cl_lock_invariant(env, lock));
1233         LASSERT(lock->cll_holds > 0);
1234
1235         cl_lock_user_add(env, lock);
1236         do {
1237                 result = cl_enqueue_try(env, lock, io, enqflags);
1238                 if (result == CLO_WAIT) {
1239                         result = cl_lock_state_wait(env, lock);
1240                         if (result == 0)
1241                                 continue;
1242                 }
1243                 break;
1244         } while (1);
1245         if (result != 0) {
1246                 cl_lock_user_del(env, lock);
1247                 cl_lock_error(env, lock, result);
1248         }
1249         LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1250                      lock->cll_state == CLS_HELD));
1251         RETURN(result);
1252 }
1253
1254 /**
1255  * Enqueues a lock.
1256  *
1257  * \pre current thread or io owns a hold on lock.
1258  *
1259  * \post ergo(result == 0, lock->users increased)
1260  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1261  *                         lock->cll_state == CLS_HELD)
1262  */
1263 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1264                struct cl_io *io, __u32 enqflags)
1265 {
1266         int result;
1267
1268         ENTRY;
1269
1270         cl_lock_lockdep_acquire(env, lock, enqflags);
1271         cl_lock_mutex_get(env, lock);
1272         result = cl_enqueue_locked(env, lock, io, enqflags);
1273         cl_lock_mutex_put(env, lock);
1274         if (result != 0)
1275                 cl_lock_lockdep_release(env, lock);
1276         LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1277                      lock->cll_state == CLS_HELD));
1278         RETURN(result);
1279 }
1280 EXPORT_SYMBOL(cl_enqueue);
1281
1282 /**
1283  * Tries to unlock a lock.
1284  *
1285  * This function is called repeatedly by cl_unuse() until either lock is
1286  * unlocked, or error occurs.
1287  * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1288  *
1289  * \pre  lock->cll_state == CLS_HELD
1290  *
1291  * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
1292  *
1293  * \see cl_unuse() cl_lock_operations::clo_unuse()
1294  * \see cl_lock_state::CLS_CACHED
1295  */
1296 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1297 {
1298         int                         result;
1299         enum cl_lock_state          state = CLS_NEW;
1300
1301         ENTRY;
1302         cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1303
1304         LASSERT(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED);
1305         if (lock->cll_users > 1) {
1306                 cl_lock_user_del(env, lock);
1307                 RETURN(0);
1308         }
1309
1310         /*
1311          * New lock users (->cll_users) are not protecting unlocking
1312          * from proceeding. From this point, lock eventually reaches
1313          * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1314          * CLS_FREEING.
1315          */
1316         state = cl_lock_intransit(env, lock);
1317
1318         result = cl_unuse_try_internal(env, lock);
1319         LASSERT(lock->cll_state == CLS_INTRANSIT);
1320         LASSERT(result != CLO_WAIT);
1321         cl_lock_user_del(env, lock);
1322         if (result == 0 || result == -ESTALE) {
1323                 /*
1324                  * Return lock back to the cache. This is the only
1325                  * place where lock is moved into CLS_CACHED state.
1326                  *
1327                  * If one of ->clo_unuse() methods returned -ESTALE, lock
1328                  * cannot be placed into cache and has to be
1329                  * re-initialized. This happens e.g., when a sub-lock was
1330                  * canceled while unlocking was in progress.
1331                  */
1332                 if (state == CLS_HELD && result == 0)
1333                         state = CLS_CACHED;
1334                 else
1335                         state = CLS_NEW;
1336                 cl_lock_extransit(env, lock, state);
1337
1338                 /*
1339                  * Hide -ESTALE error.
1340                  * If the lock is a glimpse lock, and it has multiple
1341                  * stripes. Assuming that one of its sublock returned -ENAVAIL,
1342                  * and other sublocks are matched write locks. In this case,
1343                  * we can't set this lock to error because otherwise some of
1344                  * its sublocks may not be canceled. This causes some dirty
1345                  * pages won't be written to OSTs. -jay
1346                  */
1347                 result = 0;
1348         } else {
1349                 CERROR("result = %d, this is unlikely!\n", result);
1350                 cl_lock_extransit(env, lock, state);
1351         }
1352
1353         result = result ?: lock->cll_error;
1354         if (result < 0)
1355                 cl_lock_error(env, lock, result);
1356         RETURN(result);
1357 }
1358 EXPORT_SYMBOL(cl_unuse_try);
1359
1360 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1361 {
1362         int result;
1363         ENTRY;
1364
1365         result = cl_unuse_try(env, lock);
1366         if (result)
1367                 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1368
1369         EXIT;
1370 }
1371
1372 /**
1373  * Unlocks a lock.
1374  */
1375 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1376 {
1377         ENTRY;
1378         cl_lock_mutex_get(env, lock);
1379         cl_unuse_locked(env, lock);
1380         cl_lock_mutex_put(env, lock);
1381         cl_lock_lockdep_release(env, lock);
1382         EXIT;
1383 }
1384 EXPORT_SYMBOL(cl_unuse);
1385
1386 /**
1387  * Tries to wait for a lock.
1388  *
1389  * This function is called repeatedly by cl_wait() until either lock is
1390  * granted, or error occurs. This function does not block waiting for network
1391  * communication to complete.
1392  *
1393  * \see cl_wait() cl_lock_operations::clo_wait()
1394  * \see cl_lock_state::CLS_HELD
1395  */
1396 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1397 {
1398         const struct cl_lock_slice *slice;
1399         int                         result;
1400
1401         ENTRY;
1402         cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1403         do {
1404                 LINVRNT(cl_lock_is_mutexed(lock));
1405                 LINVRNT(cl_lock_invariant(env, lock));
1406                 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1407                         lock->cll_state == CLS_HELD ||
1408                         lock->cll_state == CLS_INTRANSIT);
1409                 LASSERT(lock->cll_users > 0);
1410                 LASSERT(lock->cll_holds > 0);
1411
1412                 result = 0;
1413                 if (lock->cll_error != 0)
1414                         break;
1415
1416                 if (cl_lock_is_intransit(lock)) {
1417                         result = CLO_WAIT;
1418                         break;
1419                 }
1420
1421                 if (lock->cll_state == CLS_HELD)
1422                         /* nothing to do */
1423                         break;
1424
1425                 result = -ENOSYS;
1426                 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1427                         if (slice->cls_ops->clo_wait != NULL) {
1428                                 result = slice->cls_ops->clo_wait(env, slice);
1429                                 if (result != 0)
1430                                         break;
1431                         }
1432                 }
1433                 LASSERT(result != -ENOSYS);
1434                 if (result == 0) {
1435                         LASSERT(lock->cll_state != CLS_INTRANSIT);
1436                         cl_lock_state_set(env, lock, CLS_HELD);
1437                 }
1438         } while (result == CLO_REPEAT);
1439         RETURN(result ?: lock->cll_error);
1440 }
1441 EXPORT_SYMBOL(cl_wait_try);
1442
1443 /**
1444  * Waits until enqueued lock is granted.
1445  *
1446  * \pre current thread or io owns a hold on the lock
1447  * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1448  *                        lock->cll_state == CLS_HELD)
1449  *
1450  * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1451  */
1452 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1453 {
1454         int result;
1455
1456         ENTRY;
1457         cl_lock_mutex_get(env, lock);
1458
1459         LINVRNT(cl_lock_invariant(env, lock));
1460         LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1461                  "Wrong state %d \n", lock->cll_state);
1462         LASSERT(lock->cll_holds > 0);
1463         cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1464
1465         do {
1466                 result = cl_wait_try(env, lock);
1467                 if (result == CLO_WAIT) {
1468                         result = cl_lock_state_wait(env, lock);
1469                         if (result == 0)
1470                                 continue;
1471                 }
1472                 break;
1473         } while (1);
1474         if (result < 0) {
1475                 cl_lock_user_del(env, lock);
1476                 cl_lock_error(env, lock, result);
1477                 cl_lock_lockdep_release(env, lock);
1478         }
1479         cl_lock_mutex_put(env, lock);
1480         LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1481         RETURN(result);
1482 }
1483 EXPORT_SYMBOL(cl_wait);
1484
1485 /**
1486  * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1487  * value.
1488  */
1489 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1490 {
1491         const struct cl_lock_slice *slice;
1492         unsigned long pound;
1493         unsigned long ounce;
1494
1495         ENTRY;
1496         LINVRNT(cl_lock_is_mutexed(lock));
1497         LINVRNT(cl_lock_invariant(env, lock));
1498
1499         pound = 0;
1500         cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1501                 if (slice->cls_ops->clo_weigh != NULL) {
1502                         ounce = slice->cls_ops->clo_weigh(env, slice);
1503                         pound += ounce;
1504                         if (pound < ounce) /* over-weight^Wflow */
1505                                 pound = ~0UL;
1506                 }
1507         }
1508         RETURN(pound);
1509 }
1510 EXPORT_SYMBOL(cl_lock_weigh);
1511
1512 /**
1513  * Notifies layers that lock description changed.
1514  *
1515  * The server can grant client a lock different from one that was requested
1516  * (e.g., larger in extent). This method is called when actually granted lock
1517  * description becomes known to let layers to accommodate for changed lock
1518  * description.
1519  *
1520  * \see cl_lock_operations::clo_modify()
1521  */
1522 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1523                    const struct cl_lock_descr *desc)
1524 {
1525         const struct cl_lock_slice *slice;
1526         struct cl_object           *obj = lock->cll_descr.cld_obj;
1527         struct cl_object_header    *hdr = cl_object_header(obj);
1528         int result;
1529
1530         ENTRY;
1531         cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1532         /* don't allow object to change */
1533         LASSERT(obj == desc->cld_obj);
1534         LINVRNT(cl_lock_is_mutexed(lock));
1535         LINVRNT(cl_lock_invariant(env, lock));
1536
1537         cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1538                 if (slice->cls_ops->clo_modify != NULL) {
1539                         result = slice->cls_ops->clo_modify(env, slice, desc);
1540                         if (result != 0)
1541                                 RETURN(result);
1542                 }
1543         }
1544         CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1545                       PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1546         /*
1547          * Just replace description in place. Nothing more is needed for
1548          * now. If locks were indexed according to their extent and/or mode,
1549          * that index would have to be updated here.
1550          */
1551         cfs_spin_lock(&hdr->coh_lock_guard);
1552         lock->cll_descr = *desc;
1553         cfs_spin_unlock(&hdr->coh_lock_guard);
1554         RETURN(0);
1555 }
1556 EXPORT_SYMBOL(cl_lock_modify);
1557
1558 /**
1559  * Initializes lock closure with a given origin.
1560  *
1561  * \see cl_lock_closure
1562  */
1563 void cl_lock_closure_init(const struct lu_env *env,
1564                           struct cl_lock_closure *closure,
1565                           struct cl_lock *origin, int wait)
1566 {
1567         LINVRNT(cl_lock_is_mutexed(origin));
1568         LINVRNT(cl_lock_invariant(env, origin));
1569
1570         CFS_INIT_LIST_HEAD(&closure->clc_list);
1571         closure->clc_origin = origin;
1572         closure->clc_wait   = wait;
1573         closure->clc_nr     = 0;
1574 }
1575 EXPORT_SYMBOL(cl_lock_closure_init);
1576
1577 /**
1578  * Builds a closure of \a lock.
1579  *
1580  * Building of a closure consists of adding initial lock (\a lock) into it,
1581  * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1582  * methods might call cl_lock_closure_build() recursively again, adding more
1583  * locks to the closure, etc.
1584  *
1585  * \see cl_lock_closure
1586  */
1587 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1588                           struct cl_lock_closure *closure)
1589 {
1590         const struct cl_lock_slice *slice;
1591         int result;
1592
1593         ENTRY;
1594         LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1595         LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1596
1597         result = cl_lock_enclosure(env, lock, closure);
1598         if (result == 0) {
1599                 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1600                         if (slice->cls_ops->clo_closure != NULL) {
1601                                 result = slice->cls_ops->clo_closure(env, slice,
1602                                                                      closure);
1603                                 if (result != 0)
1604                                         break;
1605                         }
1606                 }
1607         }
1608         if (result != 0)
1609                 cl_lock_disclosure(env, closure);
1610         RETURN(result);
1611 }
1612 EXPORT_SYMBOL(cl_lock_closure_build);
1613
1614 /**
1615  * Adds new lock to a closure.
1616  *
1617  * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1618  * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1619  * until next try-lock is likely to succeed.
1620  */
1621 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1622                       struct cl_lock_closure *closure)
1623 {
1624         int result = 0;
1625         ENTRY;
1626         cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1627         if (!cl_lock_mutex_try(env, lock)) {
1628                 /*
1629                  * If lock->cll_inclosure is not empty, lock is already in
1630                  * this closure.
1631                  */
1632                 if (cfs_list_empty(&lock->cll_inclosure)) {
1633                         cl_lock_get_trust(lock);
1634                         lu_ref_add(&lock->cll_reference, "closure", closure);
1635                         cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1636                         closure->clc_nr++;
1637                 } else
1638                         cl_lock_mutex_put(env, lock);
1639                 result = 0;
1640         } else {
1641                 cl_lock_disclosure(env, closure);
1642                 if (closure->clc_wait) {
1643                         cl_lock_get_trust(lock);
1644                         lu_ref_add(&lock->cll_reference, "closure-w", closure);
1645                         cl_lock_mutex_put(env, closure->clc_origin);
1646
1647                         LASSERT(cl_lock_nr_mutexed(env) == 0);
1648                         cl_lock_mutex_get(env, lock);
1649                         cl_lock_mutex_put(env, lock);
1650
1651                         cl_lock_mutex_get(env, closure->clc_origin);
1652                         lu_ref_del(&lock->cll_reference, "closure-w", closure);
1653                         cl_lock_put(env, lock);
1654                 }
1655                 result = CLO_REPEAT;
1656         }
1657         RETURN(result);
1658 }
1659 EXPORT_SYMBOL(cl_lock_enclosure);
1660
1661 /** Releases mutices of enclosed locks. */
1662 void cl_lock_disclosure(const struct lu_env *env,
1663                         struct cl_lock_closure *closure)
1664 {
1665         struct cl_lock *scan;
1666         struct cl_lock *temp;
1667
1668         cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1669         cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1670                                      cll_inclosure){
1671                 cfs_list_del_init(&scan->cll_inclosure);
1672                 cl_lock_mutex_put(env, scan);
1673                 lu_ref_del(&scan->cll_reference, "closure", closure);
1674                 cl_lock_put(env, scan);
1675                 closure->clc_nr--;
1676         }
1677         LASSERT(closure->clc_nr == 0);
1678 }
1679 EXPORT_SYMBOL(cl_lock_disclosure);
1680
1681 /** Finalizes a closure. */
1682 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1683 {
1684         LASSERT(closure->clc_nr == 0);
1685         LASSERT(cfs_list_empty(&closure->clc_list));
1686 }
1687 EXPORT_SYMBOL(cl_lock_closure_fini);
1688
1689 /**
1690  * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1691  * destroyed, then destroy the lock. If there are holds on the lock, postpone
1692  * destruction until all holds are released. This is called when a decision is
1693  * made to destroy the lock in the future. E.g., when a blocking AST is
1694  * received on it, or fatal communication error happens.
1695  *
1696  * Caller must have a reference on this lock to prevent a situation, when
1697  * deleted lock lingers in memory for indefinite time, because nobody calls
1698  * cl_lock_put() to finish it.
1699  *
1700  * \pre atomic_read(&lock->cll_ref) > 0
1701  * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1702  *           cl_lock_nr_mutexed(env) == 1)
1703  *      [i.e., if a top-lock is deleted, mutices of no other locks can be
1704  *      held, as deletion of sub-locks might require releasing a top-lock
1705  *      mutex]
1706  *
1707  * \see cl_lock_operations::clo_delete()
1708  * \see cl_lock::cll_holds
1709  */
1710 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1711 {
1712         LINVRNT(cl_lock_is_mutexed(lock));
1713         LINVRNT(cl_lock_invariant(env, lock));
1714         LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1715                      cl_lock_nr_mutexed(env) == 1));
1716
1717         ENTRY;
1718         cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1719         if (lock->cll_holds == 0)
1720                 cl_lock_delete0(env, lock);
1721         else
1722                 lock->cll_flags |= CLF_DOOMED;
1723         EXIT;
1724 }
1725 EXPORT_SYMBOL(cl_lock_delete);
1726
1727 /**
1728  * Mark lock as irrecoverably failed, and mark it for destruction. This
1729  * happens when, e.g., server fails to grant a lock to us, or networking
1730  * time-out happens.
1731  *
1732  * \pre atomic_read(&lock->cll_ref) > 0
1733  *
1734  * \see clo_lock_delete()
1735  * \see cl_lock::cll_holds
1736  */
1737 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1738 {
1739         LINVRNT(cl_lock_is_mutexed(lock));
1740         LINVRNT(cl_lock_invariant(env, lock));
1741
1742         ENTRY;
1743         cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1744         if (lock->cll_error == 0 && error != 0) {
1745                 lock->cll_error = error;
1746                 cl_lock_signal(env, lock);
1747                 cl_lock_cancel(env, lock);
1748                 cl_lock_delete(env, lock);
1749         }
1750         EXIT;
1751 }
1752 EXPORT_SYMBOL(cl_lock_error);
1753
1754 /**
1755  * Cancels this lock. Notifies layers
1756  * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1757  * there are holds on the lock, postpone cancellation until
1758  * all holds are released.
1759  *
1760  * Cancellation notification is delivered to layers at most once.
1761  *
1762  * \see cl_lock_operations::clo_cancel()
1763  * \see cl_lock::cll_holds
1764  */
1765 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1766 {
1767         LINVRNT(cl_lock_is_mutexed(lock));
1768         LINVRNT(cl_lock_invariant(env, lock));
1769
1770         ENTRY;
1771         cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1772         if (lock->cll_holds == 0)
1773                 cl_lock_cancel0(env, lock);
1774         else
1775                 lock->cll_flags |= CLF_CANCELPEND;
1776         EXIT;
1777 }
1778 EXPORT_SYMBOL(cl_lock_cancel);
1779
1780 /**
1781  * Finds an existing lock covering given page and optionally different from a
1782  * given \a except lock.
1783  */
1784 struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
1785                                 struct cl_page *page, struct cl_lock *except,
1786                                 int pending, int canceld)
1787 {
1788         struct cl_object_header *head;
1789         struct cl_lock          *scan;
1790         struct cl_lock          *lock;
1791         struct cl_lock_descr    *need;
1792
1793         ENTRY;
1794
1795         head = cl_object_header(obj);
1796         need = &cl_env_info(env)->clt_descr;
1797         lock = NULL;
1798
1799         need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1800                                     * not PHANTOM */
1801         need->cld_start = need->cld_end = page->cp_index;
1802         need->cld_enq_flags = 0;
1803
1804         cfs_spin_lock(&head->coh_lock_guard);
1805         /* It is fine to match any group lock since there could be only one
1806          * with a uniq gid and it conflicts with all other lock modes too */
1807         cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1808                 if (scan != except &&
1809                     (scan->cll_descr.cld_mode == CLM_GROUP ||
1810                     cl_lock_ext_match(&scan->cll_descr, need)) &&
1811                     scan->cll_state >= CLS_HELD &&
1812                     scan->cll_state < CLS_FREEING &&
1813                     /*
1814                      * This check is racy as the lock can be canceled right
1815                      * after it is done, but this is fine, because page exists
1816                      * already.
1817                      */
1818                     (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1819                     (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1820                         /* Don't increase cs_hit here since this
1821                          * is just a helper function. */
1822                         cl_lock_get_trust(scan);
1823                         lock = scan;
1824                         break;
1825                 }
1826         }
1827         cfs_spin_unlock(&head->coh_lock_guard);
1828         RETURN(lock);
1829 }
1830 EXPORT_SYMBOL(cl_lock_at_page);
1831
1832 /**
1833  * Returns a list of pages protected (only) by a given lock.
1834  *
1835  * Scans an extent of page radix tree, corresponding to the \a lock and queues
1836  * all pages that are not protected by locks other than \a lock into \a queue.
1837  */
1838 void cl_lock_page_list_fixup(const struct lu_env *env,
1839                              struct cl_io *io, struct cl_lock *lock,
1840                              struct cl_page_list *queue)
1841 {
1842         struct cl_page        *page;
1843         struct cl_page        *temp;
1844         struct cl_page_list   *plist = &cl_env_info(env)->clt_list;
1845
1846         LINVRNT(cl_lock_invariant(env, lock));
1847         ENTRY;
1848
1849         /* Now, we have a list of cl_pages under the \a lock, we need
1850          * to check if some of pages are covered by other ldlm lock.
1851          * If this is the case, they aren't needed to be written out this time.
1852          *
1853          * For example, we have A:[0,200] & B:[100,300] PW locks on client, now
1854          * the latter is to be canceled, this means other client is
1855          * reading/writing [200,300] since A won't canceled. Actually
1856          * we just need to write the pages covered by [200,300]. This is safe,
1857          * since [100,200] is also protected lock A.
1858          */
1859
1860         cl_page_list_init(plist);
1861         cl_page_list_for_each_safe(page, temp, queue) {
1862                 pgoff_t                idx = page->cp_index;
1863                 struct cl_lock        *found;
1864                 struct cl_lock_descr  *descr;
1865
1866                 /* The algorithm counts on the index-ascending page index. */
1867                 LASSERT(ergo(&temp->cp_batch != &queue->pl_pages,
1868                         page->cp_index < temp->cp_index));
1869
1870                 found = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1871                                         page, lock, 0, 0);
1872                 if (found == NULL)
1873                         continue;
1874
1875                 descr = &found->cll_descr;
1876                 cfs_list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
1877                                                   cp_batch) {
1878                         idx = page->cp_index;
1879                         if (descr->cld_start > idx || descr->cld_end < idx)
1880                                 break;
1881                         cl_page_list_move(plist, queue, page);
1882                 }
1883                 cl_lock_put(env, found);
1884         }
1885
1886         /* The pages in plist are covered by other locks, don't handle them
1887          * this time.
1888          */
1889         if (io != NULL)
1890                 cl_page_list_disown(env, io, plist);
1891         cl_page_list_fini(env, plist);
1892         EXIT;
1893 }
1894 EXPORT_SYMBOL(cl_lock_page_list_fixup);
1895
1896 /**
1897  * Invalidate pages protected by the given lock, sending them out to the
1898  * server first, if necessary.
1899  *
1900  * This function does the following:
1901  *
1902  *     - collects a list of pages to be invalidated,
1903  *
1904  *     - unmaps them from the user virtual memory,
1905  *
1906  *     - sends dirty pages to the server,
1907  *
1908  *     - waits for transfer completion,
1909  *
1910  *     - discards pages, and throws them out of memory.
1911  *
1912  * If \a discard is set, pages are discarded without sending them to the
1913  * server.
1914  *
1915  * If error happens on any step, the process continues anyway (the reasoning
1916  * behind this being that lock cancellation cannot be delayed indefinitely).
1917  */
1918 int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
1919                      int discard)
1920 {
1921         struct cl_thread_info *info  = cl_env_info(env);
1922         struct cl_io          *io    = &info->clt_io;
1923         struct cl_2queue      *queue = &info->clt_queue;
1924         struct cl_lock_descr  *descr = &lock->cll_descr;
1925         long page_count;
1926         int result;
1927
1928         LINVRNT(cl_lock_invariant(env, lock));
1929         ENTRY;
1930
1931         io->ci_obj = cl_object_top(descr->cld_obj);
1932         result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1933         if (result == 0) {
1934                 int nonblock = 1;
1935
1936 restart:
1937                 cl_2queue_init(queue);
1938                 cl_page_gang_lookup(env, descr->cld_obj, io, descr->cld_start,
1939                                     descr->cld_end, &queue->c2_qin, nonblock);
1940                 page_count = queue->c2_qin.pl_nr;
1941                 if (page_count > 0) {
1942                         result = cl_page_list_unmap(env, io, &queue->c2_qin);
1943                         if (!discard) {
1944                                 long timeout = 600; /* 10 minutes. */
1945                                 /* for debug purpose, if this request can't be
1946                                  * finished in 10 minutes, we hope it can
1947                                  * notify us.
1948                                  */
1949                                 result = cl_io_submit_sync(env, io, CRT_WRITE,
1950                                                            queue, CRP_CANCEL,
1951                                                            timeout);
1952                                 if (result)
1953                                         CWARN("Writing %lu pages error: %d\n",
1954                                               page_count, result);
1955                         }
1956                         cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
1957                         cl_2queue_discard(env, io, queue);
1958                         cl_2queue_disown(env, io, queue);
1959                 }
1960                 cl_2queue_fini(env, queue);
1961
1962                 if (nonblock) {
1963                         nonblock = 0;
1964                         goto restart;
1965                 }
1966         }
1967         cl_io_fini(env, io);
1968         RETURN(result);
1969 }
1970 EXPORT_SYMBOL(cl_lock_page_out);
1971
1972 /**
1973  * Eliminate all locks for a given object.
1974  *
1975  * Caller has to guarantee that no lock is in active use.
1976  *
1977  * \param cancel when this is set, cl_locks_prune() cancels locks before
1978  *               destroying.
1979  */
1980 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1981 {
1982         struct cl_object_header *head;
1983         struct cl_lock          *lock;
1984
1985         ENTRY;
1986         head = cl_object_header(obj);
1987         /*
1988          * If locks are destroyed without cancellation, all pages must be
1989          * already destroyed (as otherwise they will be left unprotected).
1990          */
1991         LASSERT(ergo(!cancel,
1992                      head->coh_tree.rnode == NULL && head->coh_pages == 0));
1993
1994         cfs_spin_lock(&head->coh_lock_guard);
1995         while (!cfs_list_empty(&head->coh_locks)) {
1996                 lock = container_of(head->coh_locks.next,
1997                                     struct cl_lock, cll_linkage);
1998                 cl_lock_get_trust(lock);
1999                 cfs_spin_unlock(&head->coh_lock_guard);
2000                 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2001                 cl_lock_mutex_get(env, lock);
2002                 if (lock->cll_state < CLS_FREEING) {
2003                         LASSERT(lock->cll_holds == 0);
2004                         LASSERT(lock->cll_users == 0);
2005                         if (cancel)
2006                                 cl_lock_cancel(env, lock);
2007                         cl_lock_delete(env, lock);
2008                 }
2009                 cl_lock_mutex_put(env, lock);
2010                 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2011                 cl_lock_put(env, lock);
2012                 cfs_spin_lock(&head->coh_lock_guard);
2013         }
2014         cfs_spin_unlock(&head->coh_lock_guard);
2015         EXIT;
2016 }
2017 EXPORT_SYMBOL(cl_locks_prune);
2018
2019 /**
2020  * Returns true if \a addr is an address of an allocated cl_lock. Used in
2021  * assertions. This check is optimistically imprecise, i.e., it occasionally
2022  * returns true for the incorrect addresses, but if it returns false, then the
2023  * address is guaranteed to be incorrect. (Should be named cl_lockp().)
2024  *
2025  * \see cl_is_page()
2026  */
2027 int cl_is_lock(const void *addr)
2028 {
2029         return cfs_mem_is_in_cache(addr, cl_lock_kmem);
2030 }
2031 EXPORT_SYMBOL(cl_is_lock);
2032
2033 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2034                                           const struct cl_io *io,
2035                                           const struct cl_lock_descr *need,
2036                                           const char *scope, const void *source)
2037 {
2038         struct cl_lock *lock;
2039
2040         ENTRY;
2041
2042         while (1) {
2043                 lock = cl_lock_find(env, io, need);
2044                 if (IS_ERR(lock))
2045                         break;
2046                 cl_lock_mutex_get(env, lock);
2047                 if (lock->cll_state < CLS_FREEING &&
2048                     !(lock->cll_flags & CLF_CANCELLED)) {
2049                         cl_lock_hold_mod(env, lock, +1);
2050                         lu_ref_add(&lock->cll_holders, scope, source);
2051                         lu_ref_add(&lock->cll_reference, scope, source);
2052                         break;
2053                 }
2054                 cl_lock_mutex_put(env, lock);
2055                 cl_lock_put(env, lock);
2056         }
2057         RETURN(lock);
2058 }
2059
2060 /**
2061  * Returns a lock matching \a need description with a reference and a hold on
2062  * it.
2063  *
2064  * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2065  * guarantees that lock is not in the CLS_FREEING state on return.
2066  */
2067 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2068                              const struct cl_lock_descr *need,
2069                              const char *scope, const void *source)
2070 {
2071         struct cl_lock *lock;
2072
2073         ENTRY;
2074
2075         lock = cl_lock_hold_mutex(env, io, need, scope, source);
2076         if (!IS_ERR(lock))
2077                 cl_lock_mutex_put(env, lock);
2078         RETURN(lock);
2079 }
2080 EXPORT_SYMBOL(cl_lock_hold);
2081
2082 /**
2083  * Main high-level entry point of cl_lock interface that finds existing or
2084  * enqueues new lock matching given description.
2085  */
2086 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2087                                 const struct cl_lock_descr *need,
2088                                 const char *scope, const void *source)
2089 {
2090         struct cl_lock       *lock;
2091         const struct lu_fid  *fid;
2092         int                   rc;
2093         int                   iter;
2094         __u32                 enqflags = need->cld_enq_flags;
2095
2096         ENTRY;
2097         fid = lu_object_fid(&io->ci_obj->co_lu);
2098         iter = 0;
2099         do {
2100                 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2101                 if (!IS_ERR(lock)) {
2102                         rc = cl_enqueue_locked(env, lock, io, enqflags);
2103                         if (rc == 0) {
2104                                 if (cl_lock_fits_into(env, lock, need, io)) {
2105                                         cl_lock_mutex_put(env, lock);
2106                                         cl_lock_lockdep_acquire(env,
2107                                                                 lock, enqflags);
2108                                         break;
2109                                 }
2110                                 cl_unuse_locked(env, lock);
2111                         }
2112                         cl_lock_trace(D_DLMTRACE, env, "enqueue failed", lock);
2113                         cl_lock_hold_release(env, lock, scope, source);
2114                         cl_lock_mutex_put(env, lock);
2115                         lu_ref_del(&lock->cll_reference, scope, source);
2116                         cl_lock_put(env, lock);
2117                         lock = ERR_PTR(rc);
2118                 } else
2119                         rc = PTR_ERR(lock);
2120                 iter++;
2121         } while (rc == 0);
2122         RETURN(lock);
2123 }
2124 EXPORT_SYMBOL(cl_lock_request);
2125
2126 /**
2127  * Adds a hold to a known lock.
2128  */
2129 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2130                       const char *scope, const void *source)
2131 {
2132         LINVRNT(cl_lock_is_mutexed(lock));
2133         LINVRNT(cl_lock_invariant(env, lock));
2134         LASSERT(lock->cll_state != CLS_FREEING);
2135
2136         ENTRY;
2137         cl_lock_hold_mod(env, lock, +1);
2138         cl_lock_get(lock);
2139         lu_ref_add(&lock->cll_holders, scope, source);
2140         lu_ref_add(&lock->cll_reference, scope, source);
2141         EXIT;
2142 }
2143 EXPORT_SYMBOL(cl_lock_hold_add);
2144
2145 /**
2146  * Releases a hold and a reference on a lock, on which caller acquired a
2147  * mutex.
2148  */
2149 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2150                     const char *scope, const void *source)
2151 {
2152         LINVRNT(cl_lock_invariant(env, lock));
2153         ENTRY;
2154         cl_lock_hold_release(env, lock, scope, source);
2155         lu_ref_del(&lock->cll_reference, scope, source);
2156         cl_lock_put(env, lock);
2157         EXIT;
2158 }
2159 EXPORT_SYMBOL(cl_lock_unhold);
2160
2161 /**
2162  * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2163  */
2164 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2165                      const char *scope, const void *source)
2166 {
2167         LINVRNT(cl_lock_invariant(env, lock));
2168         ENTRY;
2169         cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2170         cl_lock_mutex_get(env, lock);
2171         cl_lock_hold_release(env, lock, scope, source);
2172         cl_lock_mutex_put(env, lock);
2173         lu_ref_del(&lock->cll_reference, scope, source);
2174         cl_lock_put(env, lock);
2175         EXIT;
2176 }
2177 EXPORT_SYMBOL(cl_lock_release);
2178
2179 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2180 {
2181         LINVRNT(cl_lock_is_mutexed(lock));
2182         LINVRNT(cl_lock_invariant(env, lock));
2183
2184         ENTRY;
2185         cl_lock_used_mod(env, lock, +1);
2186         EXIT;
2187 }
2188 EXPORT_SYMBOL(cl_lock_user_add);
2189
2190 int cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2191 {
2192         LINVRNT(cl_lock_is_mutexed(lock));
2193         LINVRNT(cl_lock_invariant(env, lock));
2194         LASSERT(lock->cll_users > 0);
2195
2196         ENTRY;
2197         cl_lock_used_mod(env, lock, -1);
2198         RETURN(lock->cll_users == 0);
2199 }
2200 EXPORT_SYMBOL(cl_lock_user_del);
2201
2202 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2203 {
2204         static const char *names[] = {
2205                 [CLM_PHANTOM] = "P",
2206                 [CLM_READ]    = "R",
2207                 [CLM_WRITE]   = "W",
2208                 [CLM_GROUP]   = "G"
2209         };
2210         if (0 <= mode && mode < ARRAY_SIZE(names))
2211                 return names[mode];
2212         else
2213                 return "U";
2214 }
2215 EXPORT_SYMBOL(cl_lock_mode_name);
2216
2217 /**
2218  * Prints human readable representation of a lock description.
2219  */
2220 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2221                        lu_printer_t printer,
2222                        const struct cl_lock_descr *descr)
2223 {
2224         const struct lu_fid  *fid;
2225
2226         fid = lu_object_fid(&descr->cld_obj->co_lu);
2227         (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2228 }
2229 EXPORT_SYMBOL(cl_lock_descr_print);
2230
2231 /**
2232  * Prints human readable representation of \a lock to the \a f.
2233  */
2234 void cl_lock_print(const struct lu_env *env, void *cookie,
2235                    lu_printer_t printer, const struct cl_lock *lock)
2236 {
2237         const struct cl_lock_slice *slice;
2238         (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2239                    lock, cfs_atomic_read(&lock->cll_ref),
2240                    lock->cll_state, lock->cll_error, lock->cll_holds,
2241                    lock->cll_users, lock->cll_flags);
2242         cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2243         (*printer)(env, cookie, " {\n");
2244
2245         cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2246                 (*printer)(env, cookie, "    %s@%p: ",
2247                            slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2248                            slice);
2249                 if (slice->cls_ops->clo_print != NULL)
2250                         slice->cls_ops->clo_print(env, cookie, printer, slice);
2251                 (*printer)(env, cookie, "\n");
2252         }
2253         (*printer)(env, cookie, "} lock@%p\n", lock);
2254 }
2255 EXPORT_SYMBOL(cl_lock_print);
2256
2257 int cl_lock_init(void)
2258 {
2259         return lu_kmem_init(cl_lock_caches);
2260 }
2261
2262 void cl_lock_fini(void)
2263 {
2264         lu_kmem_fini(cl_lock_caches);
2265 }