Whamcloud - gitweb
LU-10467 lustre: use wait_event_idle_timeout() as appropriate.
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_lock.c
33  *
34  * Author: Peter Braam <braam@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LDLM
39
40 #include <libcfs/libcfs.h>
41
42 #include <lustre_swab.h>
43 #include <obd_class.h>
44
45 #include "ldlm_internal.h"
46
47 struct kmem_cache *ldlm_glimpse_work_kmem;
48 EXPORT_SYMBOL(ldlm_glimpse_work_kmem);
49
50 /* lock types */
51 char *ldlm_lockname[] = {
52         [0] = "--",
53         [LCK_EX] = "EX",
54         [LCK_PW] = "PW",
55         [LCK_PR] = "PR",
56         [LCK_CW] = "CW",
57         [LCK_CR] = "CR",
58         [LCK_NL] = "NL",
59         [LCK_GROUP] = "GROUP",
60         [LCK_COS] = "COS"
61 };
62 EXPORT_SYMBOL(ldlm_lockname);
63
64 char *ldlm_typename[] = {
65         [LDLM_PLAIN] = "PLN",
66         [LDLM_EXTENT] = "EXT",
67         [LDLM_FLOCK] = "FLK",
68         [LDLM_IBITS] = "IBT",
69 };
70
71 static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
72         [LDLM_PLAIN - LDLM_MIN_TYPE]  = ldlm_plain_policy_wire_to_local,
73         [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
74         [LDLM_FLOCK - LDLM_MIN_TYPE]  = ldlm_flock_policy_wire_to_local,
75         [LDLM_IBITS - LDLM_MIN_TYPE]  = ldlm_ibits_policy_wire_to_local,
76 };
77
78 static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
79         [LDLM_PLAIN - LDLM_MIN_TYPE]  = ldlm_plain_policy_local_to_wire,
80         [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire,
81         [LDLM_FLOCK - LDLM_MIN_TYPE]  = ldlm_flock_policy_local_to_wire,
82         [LDLM_IBITS - LDLM_MIN_TYPE]  = ldlm_ibits_policy_local_to_wire,
83 };
84
85 /**
86  * Converts lock policy from local format to on the wire lock_desc format
87  */
88 void ldlm_convert_policy_to_wire(enum ldlm_type type,
89                                  const union ldlm_policy_data *lpolicy,
90                                  union ldlm_wire_policy_data *wpolicy)
91 {
92         ldlm_policy_local_to_wire_t convert;
93
94         convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
95
96         convert(lpolicy, wpolicy);
97 }
98
99 /**
100  * Converts lock policy from on the wire lock_desc format to local format
101  */
102 void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
103                                   const union ldlm_wire_policy_data *wpolicy,
104                                   union ldlm_policy_data *lpolicy)
105 {
106         ldlm_policy_wire_to_local_t convert;
107
108         convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
109
110         convert(wpolicy, lpolicy);
111 }
112
113 const char *ldlm_it2str(enum ldlm_intent_flags it)
114 {
115         switch (it) {
116         case IT_OPEN:
117                 return "open";
118         case IT_CREAT:
119                 return "creat";
120         case (IT_OPEN | IT_CREAT):
121                 return "open|creat";
122         case IT_READDIR:
123                 return "readdir";
124         case IT_GETATTR:
125                 return "getattr";
126         case IT_LOOKUP:
127                 return "lookup";
128         case IT_GETXATTR:
129                 return "getxattr";
130         case IT_LAYOUT:
131                 return "layout";
132         default:
133                 CERROR("Unknown intent 0x%08x\n", it);
134                 return "UNKNOWN";
135         }
136 }
137 EXPORT_SYMBOL(ldlm_it2str);
138
139 #ifdef HAVE_SERVER_SUPPORT
140 static ldlm_processing_policy ldlm_processing_policy_table[] = {
141         [LDLM_PLAIN]    = ldlm_process_plain_lock,
142         [LDLM_EXTENT]   = ldlm_process_extent_lock,
143         [LDLM_FLOCK]    = ldlm_process_flock_lock,
144         [LDLM_IBITS]    = ldlm_process_inodebits_lock,
145 };
146
147 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
148 {
149         return ldlm_processing_policy_table[res->lr_type];
150 }
151 EXPORT_SYMBOL(ldlm_get_processing_policy);
152
153 static ldlm_reprocessing_policy ldlm_reprocessing_policy_table[] = {
154         [LDLM_PLAIN]    = ldlm_reprocess_queue,
155         [LDLM_EXTENT]   = ldlm_reprocess_queue,
156         [LDLM_FLOCK]    = ldlm_reprocess_queue,
157         [LDLM_IBITS]    = ldlm_reprocess_inodebits_queue,
158 };
159
160 ldlm_reprocessing_policy ldlm_get_reprocessing_policy(struct ldlm_resource *res)
161 {
162         return ldlm_reprocessing_policy_table[res->lr_type];
163 }
164
165 #endif /* HAVE_SERVER_SUPPORT */
166
167 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
168 {
169         ns->ns_policy = arg;
170 }
171 EXPORT_SYMBOL(ldlm_register_intent);
172
173 /*
174  * REFCOUNTED LOCK OBJECTS
175  */
176
177
178 /**
179  * Get a reference on a lock.
180  *
181  * Lock refcounts, during creation:
182  *   - one special one for allocation, dec'd only once in destroy
183  *   - one for being a lock that's in-use
184  *   - one for the addref associated with a new lock
185  */
186 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
187 {
188         refcount_inc(&lock->l_handle.h_ref);
189         return lock;
190 }
191 EXPORT_SYMBOL(ldlm_lock_get);
192
193 static void lock_handle_free(struct rcu_head *rcu)
194 {
195         struct ldlm_lock *lock = container_of(rcu, struct ldlm_lock,
196                                               l_handle.h_rcu);
197
198         OBD_FREE_PRE(lock, sizeof(*lock), "slab-freed");
199         kmem_cache_free(ldlm_lock_slab, lock);
200 }
201
202 /**
203  * Release lock reference.
204  *
205  * Also frees the lock if it was last reference.
206  */
207 void ldlm_lock_put(struct ldlm_lock *lock)
208 {
209         ENTRY;
210
211         LASSERT(lock->l_resource != LP_POISON);
212         LASSERT(refcount_read(&lock->l_handle.h_ref) > 0);
213         if (refcount_dec_and_test(&lock->l_handle.h_ref)) {
214                 struct ldlm_resource *res;
215
216                 LDLM_DEBUG(lock,
217                            "final lock_put on destroyed lock, freeing it.");
218
219                 res = lock->l_resource;
220                 LASSERT(ldlm_is_destroyed(lock));
221                 LASSERT(list_empty(&lock->l_exp_list));
222                 LASSERT(list_empty(&lock->l_res_link));
223                 LASSERT(list_empty(&lock->l_pending_chain));
224
225                 lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
226                                      LDLM_NSS_LOCKS);
227                 lu_ref_del(&res->lr_reference, "lock", lock);
228                 if (lock->l_export) {
229                         class_export_lock_put(lock->l_export, lock);
230                         lock->l_export = NULL;
231                 }
232
233                 if (lock->l_lvb_data != NULL)
234                         OBD_FREE_LARGE(lock->l_lvb_data, lock->l_lvb_len);
235
236                 if (res->lr_type == LDLM_EXTENT) {
237                         ldlm_interval_free(ldlm_interval_detach(lock));
238                 } else if (res->lr_type == LDLM_IBITS) {
239                         if (lock->l_ibits_node != NULL)
240                                 OBD_SLAB_FREE_PTR(lock->l_ibits_node,
241                                                   ldlm_inodebits_slab);
242                 }
243                 ldlm_resource_putref(res);
244                 lock->l_resource = NULL;
245                 lu_ref_fini(&lock->l_reference);
246                 call_rcu(&lock->l_handle.h_rcu, lock_handle_free);
247         }
248
249         EXIT;
250 }
251 EXPORT_SYMBOL(ldlm_lock_put);
252
253 /**
254  * Removes LDLM lock \a lock from LRU. Assumes LRU is already locked.
255  */
256 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
257 {
258         int rc = 0;
259         if (!list_empty(&lock->l_lru)) {
260                 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
261
262                 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
263                 if (ns->ns_last_pos == &lock->l_lru)
264                         ns->ns_last_pos = lock->l_lru.prev;
265                 list_del_init(&lock->l_lru);
266                 LASSERT(ns->ns_nr_unused > 0);
267                 ns->ns_nr_unused--;
268                 rc = 1;
269         }
270         return rc;
271 }
272
273 /**
274  * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
275  *
276  * If \a last_use is non-zero, it will remove the lock from LRU only if
277  * it matches lock's l_last_used.
278  *
279  * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
280  *           doesn't match lock's l_last_used;
281  *           otherwise, the lock hasn't been in the LRU list.
282  * \retval 1 the lock was in LRU list and removed.
283  */
284 int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, ktime_t last_use)
285 {
286         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
287         int rc = 0;
288
289         ENTRY;
290         if (ldlm_is_ns_srv(lock)) {
291                 LASSERT(list_empty(&lock->l_lru));
292                 RETURN(0);
293         }
294
295         spin_lock(&ns->ns_lock);
296         if (!ktime_compare(last_use, ktime_set(0, 0)) ||
297             !ktime_compare(last_use, lock->l_last_used))
298                 rc = ldlm_lock_remove_from_lru_nolock(lock);
299         spin_unlock(&ns->ns_lock);
300
301         RETURN(rc);
302 }
303
304 /**
305  * Adds LDLM lock \a lock to namespace LRU. Assumes LRU is already locked.
306  */
307 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
308 {
309         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
310
311         lock->l_last_used = ktime_get();
312         LASSERT(list_empty(&lock->l_lru));
313         LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
314         list_add_tail(&lock->l_lru, &ns->ns_unused_list);
315         LASSERT(ns->ns_nr_unused >= 0);
316         ns->ns_nr_unused++;
317 }
318
319 /**
320  * Adds LDLM lock \a lock to namespace LRU. Obtains necessary LRU locks
321  * first.
322  */
323 void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
324 {
325         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
326
327         ENTRY;
328         spin_lock(&ns->ns_lock);
329         ldlm_lock_add_to_lru_nolock(lock);
330         spin_unlock(&ns->ns_lock);
331         EXIT;
332 }
333
334 /**
335  * Moves LDLM lock \a lock that is already in namespace LRU to the tail of
336  * the LRU. Performs necessary LRU locking
337  */
338 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
339 {
340         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
341
342         ENTRY;
343         if (ldlm_is_ns_srv(lock)) {
344                 LASSERT(list_empty(&lock->l_lru));
345                 EXIT;
346                 return;
347         }
348
349         spin_lock(&ns->ns_lock);
350         if (!list_empty(&lock->l_lru)) {
351                 ldlm_lock_remove_from_lru_nolock(lock);
352                 ldlm_lock_add_to_lru_nolock(lock);
353         }
354         spin_unlock(&ns->ns_lock);
355         EXIT;
356 }
357
358 /**
359  * Helper to destroy a locked lock.
360  *
361  * Used by ldlm_lock_destroy and ldlm_lock_destroy_nolock
362  * Must be called with l_lock and lr_lock held.
363  *
364  * Does not actually free the lock data, but rather marks the lock as
365  * destroyed by setting l_destroyed field in the lock to 1.  Destroys a
366  * handle->lock association too, so that the lock can no longer be found
367  * and removes the lock from LRU list.  Actual lock freeing occurs when
368  * last lock reference goes away.
369  *
370  * Original comment (of some historical value):
371  * This used to have a 'strict' flag, which recovery would use to mark an
372  * in-use lock as needing-to-die.  Lest I am ever tempted to put it back, I
373  * shall explain why it's gone: with the new hash table scheme, once you call
374  * ldlm_lock_destroy, you can never drop your final references on this lock.
375  * Because it's not in the hash table anymore.  -phil
376  */
377 static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
378 {
379         ENTRY;
380
381         if (lock->l_readers || lock->l_writers) {
382                 LDLM_ERROR(lock, "lock still has references");
383                 LBUG();
384         }
385
386         if (!list_empty(&lock->l_res_link)) {
387                 LDLM_ERROR(lock, "lock still on resource");
388                 LBUG();
389         }
390
391         if (ldlm_is_destroyed(lock)) {
392                 LASSERT(list_empty(&lock->l_lru));
393                 EXIT;
394                 return 0;
395         }
396         ldlm_set_destroyed(lock);
397
398         if (lock->l_export && lock->l_export->exp_lock_hash) {
399                 /* NB: it's safe to call cfs_hash_del() even lock isn't
400                  * in exp_lock_hash. */
401                 /* In the function below, .hs_keycmp resolves to
402                  * ldlm_export_lock_keycmp() */
403                 /* coverity[overrun-buffer-val] */
404                 cfs_hash_del(lock->l_export->exp_lock_hash,
405                              &lock->l_remote_handle, &lock->l_exp_hash);
406         }
407
408         ldlm_lock_remove_from_lru(lock);
409         class_handle_unhash(&lock->l_handle);
410
411         EXIT;
412         return 1;
413 }
414
415 /**
416  * Destroys a LDLM lock \a lock. Performs necessary locking first.
417  */
418 void ldlm_lock_destroy(struct ldlm_lock *lock)
419 {
420         int first;
421         ENTRY;
422         lock_res_and_lock(lock);
423         first = ldlm_lock_destroy_internal(lock);
424         unlock_res_and_lock(lock);
425
426         /* drop reference from hashtable only for first destroy */
427         if (first) {
428                 lu_ref_del(&lock->l_reference, "hash", lock);
429                 LDLM_LOCK_RELEASE(lock);
430         }
431         EXIT;
432 }
433
434 /**
435  * Destroys a LDLM lock \a lock that is already locked.
436  */
437 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
438 {
439         int first;
440         ENTRY;
441         first = ldlm_lock_destroy_internal(lock);
442         /* drop reference from hashtable only for first destroy */
443         if (first) {
444                 lu_ref_del(&lock->l_reference, "hash", lock);
445                 LDLM_LOCK_RELEASE(lock);
446         }
447         EXIT;
448 }
449
450 static struct portals_handle_ops lock_handle_ops = {
451         .hop_type       = "ldlm",
452 };
453
454 /**
455  *
456  * Allocate and initialize new lock structure.
457  *
458  * usage: pass in a resource on which you have done ldlm_resource_get
459  *        new lock will take over the refcount.
460  * returns: lock with refcount 2 - one for current caller and one for remote
461  */
462 static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
463 {
464         struct ldlm_lock *lock;
465         ENTRY;
466
467         if (resource == NULL)
468                 LBUG();
469
470         OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, GFP_NOFS);
471         if (lock == NULL)
472                 RETURN(NULL);
473
474         spin_lock_init(&lock->l_lock);
475         lock->l_resource = resource;
476         lu_ref_add(&resource->lr_reference, "lock", lock);
477
478         refcount_set(&lock->l_handle.h_ref, 2);
479         INIT_LIST_HEAD(&lock->l_res_link);
480         INIT_LIST_HEAD(&lock->l_lru);
481         INIT_LIST_HEAD(&lock->l_pending_chain);
482         INIT_LIST_HEAD(&lock->l_bl_ast);
483         INIT_LIST_HEAD(&lock->l_cp_ast);
484         INIT_LIST_HEAD(&lock->l_rk_ast);
485         init_waitqueue_head(&lock->l_waitq);
486         lock->l_blocking_lock = NULL;
487         INIT_LIST_HEAD(&lock->l_sl_mode);
488         INIT_LIST_HEAD(&lock->l_sl_policy);
489         INIT_HLIST_NODE(&lock->l_exp_hash);
490         INIT_HLIST_NODE(&lock->l_exp_flock_hash);
491
492         lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
493                              LDLM_NSS_LOCKS);
494         INIT_LIST_HEAD_RCU(&lock->l_handle.h_link);
495         class_handle_hash(&lock->l_handle, &lock_handle_ops);
496
497         lu_ref_init(&lock->l_reference);
498         lu_ref_add(&lock->l_reference, "hash", lock);
499         lock->l_callback_timeout = 0;
500         lock->l_activity = 0;
501
502 #if LUSTRE_TRACKS_LOCK_EXP_REFS
503         INIT_LIST_HEAD(&lock->l_exp_refs_link);
504         lock->l_exp_refs_nr = 0;
505         lock->l_exp_refs_target = NULL;
506 #endif
507         INIT_LIST_HEAD(&lock->l_exp_list);
508
509         RETURN(lock);
510 }
511
512 /**
513  * Moves LDLM lock \a lock to another resource.
514  * This is used on client when server returns some other lock than requested
515  * (typically as a result of intent operation)
516  */
517 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
518                               const struct ldlm_res_id *new_resid)
519 {
520         struct ldlm_resource *oldres = lock->l_resource;
521         struct ldlm_resource *newres;
522         int type;
523         ENTRY;
524
525         LASSERT(ns_is_client(ns));
526
527         lock_res_and_lock(lock);
528         if (memcmp(new_resid, &lock->l_resource->lr_name,
529                    sizeof(lock->l_resource->lr_name)) == 0) {
530                 /* Nothing to do */
531                 unlock_res_and_lock(lock);
532                 RETURN(0);
533         }
534
535         LASSERT(new_resid->name[0] != 0);
536
537         /* This function assumes that the lock isn't on any lists */
538         LASSERT(list_empty(&lock->l_res_link));
539
540         type = oldres->lr_type;
541         unlock_res_and_lock(lock);
542
543         newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
544         if (IS_ERR(newres))
545                 RETURN(PTR_ERR(newres));
546
547         lu_ref_add(&newres->lr_reference, "lock", lock);
548         /*
549          * To flip the lock from the old to the new resource, lock, oldres and
550          * newres have to be locked. Resource spin-locks are nested within
551          * lock->l_lock, and are taken in the memory address order to avoid
552          * dead-locks.
553          */
554         spin_lock(&lock->l_lock);
555         oldres = lock->l_resource;
556         if (oldres < newres) {
557                 lock_res(oldres);
558                 lock_res_nested(newres, LRT_NEW);
559         } else {
560                 lock_res(newres);
561                 lock_res_nested(oldres, LRT_NEW);
562         }
563         LASSERT(memcmp(new_resid, &oldres->lr_name,
564                        sizeof oldres->lr_name) != 0);
565         lock->l_resource = newres;
566         unlock_res(oldres);
567         unlock_res_and_lock(lock);
568
569         /* ...and the flowers are still standing! */
570         lu_ref_del(&oldres->lr_reference, "lock", lock);
571         ldlm_resource_putref(oldres);
572
573         RETURN(0);
574 }
575
576 /** \defgroup ldlm_handles LDLM HANDLES
577  * Ways to get hold of locks without any addresses.
578  * @{
579  */
580
581 /**
582  * Fills in handle for LDLM lock \a lock into supplied \a lockh
583  * Does not take any references.
584  */
585 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
586 {
587         lockh->cookie = lock->l_handle.h_cookie;
588 }
589 EXPORT_SYMBOL(ldlm_lock2handle);
590
591 /**
592  * Obtain a lock reference by handle.
593  *
594  * if \a flags: atomically get the lock and set the flags.
595  *              Return NULL if flag already set
596  */
597 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
598                                      __u64 flags)
599 {
600         struct ldlm_lock *lock;
601         ENTRY;
602
603         LASSERT(handle);
604
605         if (!lustre_handle_is_used(handle))
606                 RETURN(NULL);
607
608         lock = class_handle2object(handle->cookie, &lock_handle_ops);
609
610         if (lock == NULL)
611                 RETURN(NULL);
612
613         if (lock->l_export != NULL && lock->l_export->exp_failed) {
614                 CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
615                        lock, lock->l_export);
616                 LDLM_LOCK_PUT(lock);
617                 RETURN(NULL);
618         }
619
620         /* It's unlikely but possible that someone marked the lock as
621          * destroyed after we did handle2object on it */
622         if ((flags == 0) && !ldlm_is_destroyed(lock)) {
623                 lu_ref_add(&lock->l_reference, "handle", current);
624                 RETURN(lock);
625         }
626
627         lock_res_and_lock(lock);
628
629         LASSERT(lock->l_resource != NULL);
630
631         lu_ref_add_atomic(&lock->l_reference, "handle", current);
632         if (unlikely(ldlm_is_destroyed(lock))) {
633                 unlock_res_and_lock(lock);
634                 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
635                 LDLM_LOCK_PUT(lock);
636                 RETURN(NULL);
637         }
638
639         /* If we're setting flags, make sure none of them are already set. */
640         if (flags != 0) {
641                 if ((lock->l_flags & flags) != 0) {
642                         unlock_res_and_lock(lock);
643                         LDLM_LOCK_PUT(lock);
644                         RETURN(NULL);
645                 }
646
647                 lock->l_flags |= flags;
648         }
649
650         unlock_res_and_lock(lock);
651         RETURN(lock);
652 }
653 EXPORT_SYMBOL(__ldlm_handle2lock);
654 /** @} ldlm_handles */
655
656 /**
657  * Fill in "on the wire" representation for given LDLM lock into supplied
658  * lock descriptor \a desc structure.
659  */
660 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
661 {
662         ldlm_res2desc(lock->l_resource, &desc->l_resource);
663         desc->l_req_mode = lock->l_req_mode;
664         desc->l_granted_mode = lock->l_granted_mode;
665         ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
666                                     &lock->l_policy_data,
667                                     &desc->l_policy_data);
668 }
669
670 /**
671  * Add a lock to list of conflicting locks to send AST to.
672  *
673  * Only add if we have not sent a blocking AST to the lock yet.
674  */
675 static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
676                                   struct list_head *work_list)
677 {
678         if (!ldlm_is_ast_sent(lock)) {
679                 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
680                 ldlm_set_ast_sent(lock);
681                 /* If the enqueuing client said so, tell the AST recipient to
682                  * discard dirty data, rather than writing back. */
683                 if (ldlm_is_ast_discard_data(new))
684                         ldlm_set_discard_data(lock);
685
686                 /* Lock can be converted from a blocking state back to granted
687                  * after lock convert or COS downgrade but still be in an
688                  * older bl_list because it is controlled only by
689                  * ldlm_work_bl_ast_lock(), let it be processed there.
690                  */
691                 if (list_empty(&lock->l_bl_ast)) {
692                         list_add(&lock->l_bl_ast, work_list);
693                         LDLM_LOCK_GET(lock);
694                 }
695                 LASSERT(lock->l_blocking_lock == NULL);
696                 lock->l_blocking_lock = LDLM_LOCK_GET(new);
697         }
698 }
699
700 /**
701  * Add a lock to list of just granted locks to send completion AST to.
702  */
703 static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
704                                   struct list_head *work_list)
705 {
706         if (!ldlm_is_cp_reqd(lock)) {
707                 ldlm_set_cp_reqd(lock);
708                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
709                 LASSERT(list_empty(&lock->l_cp_ast));
710                 list_add(&lock->l_cp_ast, work_list);
711                 LDLM_LOCK_GET(lock);
712         }
713 }
714
715 /**
716  * Aggregator function to add AST work items into a list. Determines
717  * what sort of an AST work needs to be done and calls the proper
718  * adding function.
719  * Must be called with lr_lock held.
720  */
721 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
722                             struct list_head *work_list)
723 {
724         ENTRY;
725         check_res_locked(lock->l_resource);
726         if (new)
727                 ldlm_add_bl_work_item(lock, new, work_list);
728         else
729                 ldlm_add_cp_work_item(lock, work_list);
730         EXIT;
731 }
732
733 /**
734  * Add specified reader/writer reference to LDLM lock with handle \a lockh.
735  * r/w reference type is determined by \a mode
736  * Calls ldlm_lock_addref_internal.
737  */
738 void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode)
739 {
740         struct ldlm_lock *lock;
741
742         lock = ldlm_handle2lock(lockh);
743         LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
744         ldlm_lock_addref_internal(lock, mode);
745         LDLM_LOCK_PUT(lock);
746 }
747 EXPORT_SYMBOL(ldlm_lock_addref);
748
749 /**
750  * Helper function.
751  * Add specified reader/writer reference to LDLM lock \a lock.
752  * r/w reference type is determined by \a mode
753  * Removes lock from LRU if it is there.
754  * Assumes the LDLM lock is already locked.
755  */
756 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
757                                       enum ldlm_mode mode)
758 {
759         ldlm_lock_remove_from_lru(lock);
760         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
761                 lock->l_readers++;
762                 lu_ref_add_atomic(&lock->l_reference, "reader", lock);
763         }
764         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
765                 lock->l_writers++;
766                 lu_ref_add_atomic(&lock->l_reference, "writer", lock);
767         }
768         LDLM_LOCK_GET(lock);
769         lu_ref_add_atomic(&lock->l_reference, "user", lock);
770         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
771 }
772
773 /**
774  * Attempts to add reader/writer reference to a lock with handle \a lockh, and
775  * fails if lock is already LDLM_FL_CBPENDING or destroyed.
776  *
777  * \retval 0 success, lock was addref-ed
778  *
779  * \retval -EAGAIN lock is being canceled.
780  */
781 int ldlm_lock_addref_try(const struct lustre_handle *lockh, enum ldlm_mode mode)
782 {
783         struct ldlm_lock *lock;
784         int               result;
785
786         result = -EAGAIN;
787         lock = ldlm_handle2lock(lockh);
788         if (lock != NULL) {
789                 lock_res_and_lock(lock);
790                 if (lock->l_readers != 0 || lock->l_writers != 0 ||
791                     !ldlm_is_cbpending(lock)) {
792                         ldlm_lock_addref_internal_nolock(lock, mode);
793                         result = 0;
794                 }
795                 unlock_res_and_lock(lock);
796                 LDLM_LOCK_PUT(lock);
797         }
798         return result;
799 }
800 EXPORT_SYMBOL(ldlm_lock_addref_try);
801
802 /**
803  * Add specified reader/writer reference to LDLM lock \a lock.
804  * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
805  * Only called for local locks.
806  */
807 void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
808 {
809         lock_res_and_lock(lock);
810         ldlm_lock_addref_internal_nolock(lock, mode);
811         unlock_res_and_lock(lock);
812 }
813
814 /**
815  * Removes reader/writer reference for LDLM lock \a lock.
816  * Assumes LDLM lock is already locked.
817  * only called in ldlm_flock_destroy and for local locks.
818  * Does NOT add lock to LRU if no r/w references left to accomodate flock locks
819  * that cannot be placed in LRU.
820  */
821 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
822                                       enum ldlm_mode mode)
823 {
824         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
825         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
826                 LASSERT(lock->l_readers > 0);
827                 lu_ref_del(&lock->l_reference, "reader", lock);
828                 lock->l_readers--;
829         }
830         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
831                 LASSERT(lock->l_writers > 0);
832                 lu_ref_del(&lock->l_reference, "writer", lock);
833                 lock->l_writers--;
834         }
835
836         lu_ref_del(&lock->l_reference, "user", lock);
837         LDLM_LOCK_RELEASE(lock);    /* matches the LDLM_LOCK_GET() in addref */
838 }
839
840 /**
841  * Removes reader/writer reference for LDLM lock \a lock.
842  * Locks LDLM lock first.
843  * If the lock is determined to be client lock on a client and r/w refcount
844  * drops to zero and the lock is not blocked, the lock is added to LRU lock
845  * on the namespace.
846  * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
847  */
848 void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
849 {
850         struct ldlm_namespace *ns;
851         ENTRY;
852
853         lock_res_and_lock(lock);
854
855         ns = ldlm_lock_to_ns(lock);
856
857         ldlm_lock_decref_internal_nolock(lock, mode);
858
859         if ((ldlm_is_local(lock) || lock->l_req_mode == LCK_GROUP) &&
860             !lock->l_readers && !lock->l_writers) {
861                 /* If this is a local lock on a server namespace and this was
862                  * the last reference, cancel the lock.
863                  *
864                  * Group locks are special:
865                  * They must not go in LRU, but they are not called back
866                  * like non-group locks, instead they are manually released.
867                  * They have an l_writers reference which they keep until
868                  * they are manually released, so we remove them when they have
869                  * no more reader or writer references. - LU-6368 */
870                 ldlm_set_cbpending(lock);
871         }
872
873         if (!lock->l_readers && !lock->l_writers && ldlm_is_cbpending(lock)) {
874                 /* If we received a blocked AST and this was the last reference,
875                  * run the callback. */
876                 if (ldlm_is_ns_srv(lock) && lock->l_export)
877                         CERROR("FL_CBPENDING set on non-local lock--just a "
878                                "warning\n");
879
880                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
881
882                 LDLM_LOCK_GET(lock); /* dropped by bl thread */
883                 ldlm_lock_remove_from_lru(lock);
884                 unlock_res_and_lock(lock);
885
886                 if (ldlm_is_fail_loc(lock))
887                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
888
889                 if (ldlm_is_atomic_cb(lock) ||
890                     ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
891                         ldlm_handle_bl_callback(ns, NULL, lock);
892         } else if (ns_is_client(ns) &&
893                    !lock->l_readers && !lock->l_writers &&
894                    !ldlm_is_no_lru(lock) &&
895                    !ldlm_is_bl_ast(lock) &&
896                    !ldlm_is_converting(lock)) {
897
898                 LDLM_DEBUG(lock, "add lock into lru list");
899
900                 /* If this is a client-side namespace and this was the last
901                  * reference, put it on the LRU. */
902                 ldlm_lock_add_to_lru(lock);
903                 unlock_res_and_lock(lock);
904
905                 if (ldlm_is_fail_loc(lock))
906                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
907
908                 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
909                  * are not supported by the server, otherwise, it is done on
910                  * enqueue. */
911                 if (!exp_connect_cancelset(lock->l_conn_export) &&
912                     !ns_connect_lru_resize(ns))
913                         ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
914         } else {
915                 LDLM_DEBUG(lock, "do not add lock into lru list");
916                 unlock_res_and_lock(lock);
917         }
918
919         EXIT;
920 }
921
922 /**
923  * Decrease reader/writer refcount for LDLM lock with handle \a lockh
924  */
925 void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode)
926 {
927         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
928         LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
929         ldlm_lock_decref_internal(lock, mode);
930         LDLM_LOCK_PUT(lock);
931 }
932 EXPORT_SYMBOL(ldlm_lock_decref);
933
934 /**
935  * Decrease reader/writer refcount for LDLM lock with handle
936  * \a lockh and mark it for subsequent cancellation once r/w refcount
937  * drops to zero instead of putting into LRU.
938  *
939  */
940 void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
941                                  enum ldlm_mode mode)
942 {
943         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
944         ENTRY;
945
946         LASSERT(lock != NULL);
947
948         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
949         lock_res_and_lock(lock);
950         ldlm_set_cbpending(lock);
951         unlock_res_and_lock(lock);
952         ldlm_lock_decref_internal(lock, mode);
953         LDLM_LOCK_PUT(lock);
954 }
955 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
956
957 struct sl_insert_point {
958         struct list_head *res_link;
959         struct list_head *mode_link;
960         struct list_head *policy_link;
961 };
962
963 /**
964  * Finds a position to insert the new lock into granted lock list.
965  *
966  * Used for locks eligible for skiplist optimization.
967  *
968  * Parameters:
969  *      queue [input]:  the granted list where search acts on;
970  *      req [input]:    the lock whose position to be located;
971  *      prev [output]:  positions within 3 lists to insert @req to
972  * Return Value:
973  *      filled @prev
974  * NOTE: called by
975  *  - ldlm_grant_lock_with_skiplist
976  */
977 static void search_granted_lock(struct list_head *queue,
978                                 struct ldlm_lock *req,
979                                 struct sl_insert_point *prev)
980 {
981         struct list_head *tmp;
982         struct ldlm_lock *lock, *mode_end, *policy_end;
983         ENTRY;
984
985         list_for_each(tmp, queue) {
986                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
987
988                 mode_end = list_entry(lock->l_sl_mode.prev,
989                                           struct ldlm_lock, l_sl_mode);
990
991                 if (lock->l_req_mode != req->l_req_mode) {
992                         /* jump to last lock of mode group */
993                         tmp = &mode_end->l_res_link;
994                         continue;
995                 }
996
997                 /* suitable mode group is found */
998                 if (lock->l_resource->lr_type == LDLM_PLAIN) {
999                         /* insert point is last lock of the mode group */
1000                         prev->res_link = &mode_end->l_res_link;
1001                         prev->mode_link = &mode_end->l_sl_mode;
1002                         prev->policy_link = &req->l_sl_policy;
1003                         EXIT;
1004                         return;
1005                 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
1006                         for (;;) {
1007                                 policy_end =
1008                                         list_entry(lock->l_sl_policy.prev,
1009                                                        struct ldlm_lock,
1010                                                        l_sl_policy);
1011
1012                                 if (lock->l_policy_data.l_inodebits.bits ==
1013                                     req->l_policy_data.l_inodebits.bits) {
1014                                         /* insert point is last lock of
1015                                          * the policy group */
1016                                         prev->res_link =
1017                                                 &policy_end->l_res_link;
1018                                         prev->mode_link =
1019                                                 &policy_end->l_sl_mode;
1020                                         prev->policy_link =
1021                                                 &policy_end->l_sl_policy;
1022                                         EXIT;
1023                                         return;
1024                                 }
1025
1026                                 if (policy_end == mode_end)
1027                                         /* done with mode group */
1028                                         break;
1029
1030                                 /* go to next policy group within mode group */
1031                                 tmp = policy_end->l_res_link.next;
1032                                 lock = list_entry(tmp, struct ldlm_lock,
1033                                                       l_res_link);
1034                         }  /* loop over policy groups within the mode group */
1035
1036                         /* insert point is last lock of the mode group,
1037                          * new policy group is started */
1038                         prev->res_link = &mode_end->l_res_link;
1039                         prev->mode_link = &mode_end->l_sl_mode;
1040                         prev->policy_link = &req->l_sl_policy;
1041                         EXIT;
1042                         return;
1043                 } else {
1044                         LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock");
1045                         LBUG();
1046                 }
1047         }
1048
1049         /* insert point is last lock on the queue,
1050          * new mode group and new policy group are started */
1051         prev->res_link = queue->prev;
1052         prev->mode_link = &req->l_sl_mode;
1053         prev->policy_link = &req->l_sl_policy;
1054         EXIT;
1055 }
1056
1057 /**
1058  * Add a lock into resource granted list after a position described by
1059  * \a prev.
1060  */
1061 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
1062                                        struct sl_insert_point *prev)
1063 {
1064         struct ldlm_resource *res = lock->l_resource;
1065         ENTRY;
1066
1067         check_res_locked(res);
1068
1069         ldlm_resource_dump(D_INFO, res);
1070         LDLM_DEBUG(lock, "About to add lock:");
1071
1072         if (ldlm_is_destroyed(lock)) {
1073                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1074                 return;
1075         }
1076
1077         LASSERT(list_empty(&lock->l_res_link));
1078         LASSERT(list_empty(&lock->l_sl_mode));
1079         LASSERT(list_empty(&lock->l_sl_policy));
1080
1081         /*
1082          * lock->link == prev->link means lock is first starting the group.
1083          * Don't re-add to itself to suppress kernel warnings.
1084          */
1085         if (&lock->l_res_link != prev->res_link)
1086                 list_add(&lock->l_res_link, prev->res_link);
1087         if (&lock->l_sl_mode != prev->mode_link)
1088                 list_add(&lock->l_sl_mode, prev->mode_link);
1089         if (&lock->l_sl_policy != prev->policy_link)
1090                 list_add(&lock->l_sl_policy, prev->policy_link);
1091
1092         EXIT;
1093 }
1094
1095 /**
1096  * Add a lock to granted list on a resource maintaining skiplist
1097  * correctness.
1098  */
1099 void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
1100 {
1101         struct sl_insert_point prev;
1102
1103         LASSERT(ldlm_is_granted(lock));
1104
1105         search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
1106         ldlm_granted_list_add_lock(lock, &prev);
1107 }
1108
1109 /**
1110  * Perform lock granting bookkeeping.
1111  *
1112  * Includes putting the lock into granted list and updating lock mode.
1113  * NOTE: called by
1114  *  - ldlm_lock_enqueue
1115  *  - ldlm_reprocess_queue
1116  *
1117  * must be called with lr_lock held
1118  */
1119 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
1120 {
1121         struct ldlm_resource *res = lock->l_resource;
1122         ENTRY;
1123
1124         check_res_locked(res);
1125
1126         lock->l_granted_mode = lock->l_req_mode;
1127
1128         if (work_list && lock->l_completion_ast != NULL)
1129                 ldlm_add_ast_work_item(lock, NULL, work_list);
1130
1131         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
1132                 ldlm_grant_lock_with_skiplist(lock);
1133         else if (res->lr_type == LDLM_EXTENT)
1134                 ldlm_extent_add_lock(res, lock);
1135         else if (res->lr_type == LDLM_FLOCK) {
1136                 /* We should not add locks to granted list in the following
1137                  * cases:
1138                  * - this is an UNLOCK but not a real lock;
1139                  * - this is a TEST lock;
1140                  * - this is a F_CANCELLK lock (async flock has req_mode == 0)
1141                  * - this is a deadlock (flock cannot be granted) */
1142                 if (lock->l_req_mode == 0 ||
1143                     lock->l_req_mode == LCK_NL ||
1144                     ldlm_is_test_lock(lock) ||
1145                     ldlm_is_flock_deadlock(lock))
1146                         RETURN_EXIT;
1147                 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1148         } else {
1149                 LBUG();
1150         }
1151
1152         ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
1153         EXIT;
1154 }
1155
1156 /**
1157  * Check if the given @lock meets the criteria for a match.
1158  * A reference on the lock is taken if matched.
1159  *
1160  * \param lock     test-against this lock
1161  * \param data     parameters
1162  */
1163 static int lock_matches(struct ldlm_lock *lock, struct ldlm_match_data *data)
1164 {
1165         union ldlm_policy_data *lpol = &lock->l_policy_data;
1166         enum ldlm_mode match = LCK_MINMODE;
1167
1168         if (lock == data->lmd_old)
1169                 return INTERVAL_ITER_STOP;
1170
1171         /* Check if this lock can be matched.
1172          * Used by LU-2919(exclusive open) for open lease lock */
1173         if (ldlm_is_excl(lock))
1174                 return INTERVAL_ITER_CONT;
1175
1176         /* llite sometimes wants to match locks that will be
1177          * canceled when their users drop, but we allow it to match
1178          * if it passes in CBPENDING and the lock still has users.
1179          * this is generally only going to be used by children
1180          * whose parents already hold a lock so forward progress
1181          * can still happen. */
1182         if (ldlm_is_cbpending(lock) &&
1183             !(data->lmd_flags & LDLM_FL_CBPENDING))
1184                 return INTERVAL_ITER_CONT;
1185         if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
1186             lock->l_readers == 0 && lock->l_writers == 0)
1187                 return INTERVAL_ITER_CONT;
1188
1189         if (!(lock->l_req_mode & *data->lmd_mode))
1190                 return INTERVAL_ITER_CONT;
1191
1192         /* When we search for ast_data, we are not doing a traditional match,
1193          * so we don't worry about IBITS or extent matching.
1194          */
1195         if (data->lmd_has_ast_data) {
1196                 if (!lock->l_ast_data)
1197                         return INTERVAL_ITER_CONT;
1198
1199                 goto matched;
1200         }
1201
1202         match = lock->l_req_mode;
1203
1204         switch (lock->l_resource->lr_type) {
1205         case LDLM_EXTENT:
1206                 if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
1207                     lpol->l_extent.end < data->lmd_policy->l_extent.end)
1208                         return INTERVAL_ITER_CONT;
1209
1210                 if (unlikely(match == LCK_GROUP) &&
1211                     data->lmd_policy->l_extent.gid != LDLM_GID_ANY &&
1212                     lpol->l_extent.gid != data->lmd_policy->l_extent.gid)
1213                         return INTERVAL_ITER_CONT;
1214                 break;
1215         case LDLM_IBITS:
1216                 /* We match if we have existing lock with same or wider set
1217                    of bits. */
1218                 if ((lpol->l_inodebits.bits &
1219                      data->lmd_policy->l_inodebits.bits) !=
1220                     data->lmd_policy->l_inodebits.bits)
1221                         return INTERVAL_ITER_CONT;
1222                 break;
1223         default:
1224                 ;
1225         }
1226
1227         /* We match if we have existing lock with same or wider set
1228            of bits. */
1229         if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
1230                 return INTERVAL_ITER_CONT;
1231
1232         if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock)))
1233                 return INTERVAL_ITER_CONT;
1234
1235         /* Filter locks by skipping flags */
1236         if (data->lmd_skip_flags & lock->l_flags)
1237                 return INTERVAL_ITER_CONT;
1238
1239 matched:
1240         if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
1241                 LDLM_LOCK_GET(lock);
1242                 ldlm_lock_touch_in_lru(lock);
1243         } else {
1244                 ldlm_lock_addref_internal_nolock(lock, match);
1245         }
1246
1247         *data->lmd_mode = match;
1248         data->lmd_lock = lock;
1249
1250         return INTERVAL_ITER_STOP;
1251 }
1252
1253 static unsigned int itree_overlap_cb(struct interval_node *in, void *args)
1254 {
1255         struct ldlm_interval *node = to_ldlm_interval(in);
1256         struct ldlm_match_data *data = args;
1257         struct ldlm_lock *lock;
1258         int rc;
1259
1260         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
1261                 rc = lock_matches(lock, data);
1262                 if (rc == INTERVAL_ITER_STOP)
1263                         return INTERVAL_ITER_STOP;
1264         }
1265         return INTERVAL_ITER_CONT;
1266 }
1267
1268 /**
1269  * Search for a lock with given parameters in interval trees.
1270  *
1271  * \param res      search for a lock in this resource
1272  * \param data     parameters
1273  *
1274  * \retval a referenced lock or NULL.
1275  */
1276 struct ldlm_lock *search_itree(struct ldlm_resource *res,
1277                                struct ldlm_match_data *data)
1278 {
1279         struct interval_node_extent ext = {
1280                 .start     = data->lmd_policy->l_extent.start,
1281                 .end       = data->lmd_policy->l_extent.end
1282         };
1283         int idx;
1284
1285         data->lmd_lock = NULL;
1286
1287         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1288                 struct ldlm_interval_tree *tree = &res->lr_itree[idx];
1289
1290                 if (tree->lit_root == NULL)
1291                         continue;
1292
1293                 if (!(tree->lit_mode & *data->lmd_mode))
1294                         continue;
1295
1296                 interval_search(tree->lit_root, &ext,
1297                                 itree_overlap_cb, data);
1298                 if (data->lmd_lock)
1299                         return data->lmd_lock;
1300         }
1301
1302         return NULL;
1303 }
1304 EXPORT_SYMBOL(search_itree);
1305
1306
1307 /**
1308  * Search for a lock with given properties in a queue.
1309  *
1310  * \param queue    search for a lock in this queue
1311  * \param data     parameters
1312  *
1313  * \retval a referenced lock or NULL.
1314  */
1315 static struct ldlm_lock *search_queue(struct list_head *queue,
1316                                       struct ldlm_match_data *data)
1317 {
1318         struct ldlm_lock *lock;
1319         int rc;
1320
1321         data->lmd_lock = NULL;
1322
1323         list_for_each_entry(lock, queue, l_res_link) {
1324                 rc = lock_matches(lock, data);
1325                 if (rc == INTERVAL_ITER_STOP)
1326                         return data->lmd_lock;
1327         }
1328
1329         return NULL;
1330 }
1331
1332 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
1333 {
1334         if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
1335                 lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
1336                 wake_up_all(&lock->l_waitq);
1337         }
1338 }
1339 EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
1340
1341 void ldlm_lock_fail_match(struct ldlm_lock *lock)
1342 {
1343         lock_res_and_lock(lock);
1344         ldlm_lock_fail_match_locked(lock);
1345         unlock_res_and_lock(lock);
1346 }
1347
1348 /**
1349  * Mark lock as "matchable" by OST.
1350  *
1351  * Used to prevent certain races in LOV/OSC where the lock is granted, but LVB
1352  * is not yet valid.
1353  * Assumes LDLM lock is already locked.
1354  */
1355 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
1356 {
1357         ldlm_set_lvb_ready(lock);
1358         wake_up_all(&lock->l_waitq);
1359 }
1360 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
1361
1362 /**
1363  * Mark lock as "matchable" by OST.
1364  * Locks the lock and then \see ldlm_lock_allow_match_locked
1365  */
1366 void ldlm_lock_allow_match(struct ldlm_lock *lock)
1367 {
1368         lock_res_and_lock(lock);
1369         ldlm_lock_allow_match_locked(lock);
1370         unlock_res_and_lock(lock);
1371 }
1372 EXPORT_SYMBOL(ldlm_lock_allow_match);
1373
1374 /**
1375  * Attempt to find a lock with specified properties.
1376  *
1377  * Typically returns a reference to matched lock unless LDLM_FL_TEST_LOCK is
1378  * set in \a flags
1379  *
1380  * Can be called in two ways:
1381  *
1382  * If 'ns' is NULL, then lockh describes an existing lock that we want to look
1383  * for a duplicate of.
1384  *
1385  * Otherwise, all of the fields must be filled in, to match against.
1386  *
1387  * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
1388  *     server (ie, connh is NULL)
1389  * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
1390  *     list will be considered
1391  * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
1392  *     to be canceled can still be matched as long as they still have reader
1393  *     or writer refernces
1394  * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1395  *     just tell us if we would have matched.
1396  *
1397  * \retval 1 if it finds an already-existing lock that is compatible; in this
1398  * case, lockh is filled in with a addref()ed lock
1399  *
1400  * We also check security context, and if that fails we simply return 0 (to
1401  * keep caller code unchanged), the context failure will be discovered by
1402  * caller sometime later.
1403  */
1404 enum ldlm_mode ldlm_lock_match_with_skip(struct ldlm_namespace *ns,
1405                                          __u64 flags, __u64 skip_flags,
1406                                          const struct ldlm_res_id *res_id,
1407                                          enum ldlm_type type,
1408                                          union ldlm_policy_data *policy,
1409                                          enum ldlm_mode mode,
1410                                          struct lustre_handle *lockh, int unref)
1411 {
1412         struct ldlm_match_data data = {
1413                 .lmd_old = NULL,
1414                 .lmd_lock = NULL,
1415                 .lmd_mode = &mode,
1416                 .lmd_policy = policy,
1417                 .lmd_flags = flags,
1418                 .lmd_skip_flags = skip_flags,
1419                 .lmd_unref = unref,
1420                 .lmd_has_ast_data = false,
1421         };
1422         struct ldlm_resource *res;
1423         struct ldlm_lock *lock;
1424         int matched;
1425
1426         ENTRY;
1427
1428         if (ns == NULL) {
1429                 data.lmd_old = ldlm_handle2lock(lockh);
1430                 LASSERT(data.lmd_old != NULL);
1431
1432                 ns = ldlm_lock_to_ns(data.lmd_old);
1433                 res_id = &data.lmd_old->l_resource->lr_name;
1434                 type = data.lmd_old->l_resource->lr_type;
1435                 *data.lmd_mode = data.lmd_old->l_req_mode;
1436         }
1437
1438         res = ldlm_resource_get(ns, NULL, res_id, type, 0);
1439         if (IS_ERR(res)) {
1440                 LASSERT(data.lmd_old == NULL);
1441                 RETURN(0);
1442         }
1443
1444         LDLM_RESOURCE_ADDREF(res);
1445         lock_res(res);
1446         if (res->lr_type == LDLM_EXTENT)
1447                 lock = search_itree(res, &data);
1448         else
1449                 lock = search_queue(&res->lr_granted, &data);
1450         if (!lock && !(flags & LDLM_FL_BLOCK_GRANTED))
1451                 lock = search_queue(&res->lr_waiting, &data);
1452         matched = lock ? mode : 0;
1453         unlock_res(res);
1454         LDLM_RESOURCE_DELREF(res);
1455         ldlm_resource_putref(res);
1456
1457         if (lock) {
1458                 ldlm_lock2handle(lock, lockh);
1459                 if ((flags & LDLM_FL_LVB_READY) &&
1460                     (!ldlm_is_lvb_ready(lock))) {
1461                         __u64 wait_flags = LDLM_FL_LVB_READY |
1462                                 LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
1463
1464                         if (lock->l_completion_ast) {
1465                                 int err = lock->l_completion_ast(lock,
1466                                                         LDLM_FL_WAIT_NOREPROC,
1467                                                         NULL);
1468                                 if (err)
1469                                         GOTO(out_fail_match, matched = 0);
1470                         }
1471
1472                         wait_event_idle_timeout(
1473                                 lock->l_waitq,
1474                                 lock->l_flags & wait_flags,
1475                                 cfs_time_seconds(obd_timeout));
1476
1477                         if (!ldlm_is_lvb_ready(lock))
1478                                 GOTO(out_fail_match, matched = 0);
1479                 }
1480
1481                 /* check user's security context */
1482                 if (lock->l_conn_export &&
1483                     sptlrpc_import_check_ctx(
1484                                 class_exp2cliimp(lock->l_conn_export)))
1485                         GOTO(out_fail_match, matched = 0);
1486
1487                 LDLM_DEBUG(lock, "matched (%llu %llu)",
1488                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1489                            res_id->name[2] : policy->l_extent.start,
1490                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1491                            res_id->name[3] : policy->l_extent.end);
1492
1493 out_fail_match:
1494                 if (flags & LDLM_FL_TEST_LOCK)
1495                         LDLM_LOCK_RELEASE(lock);
1496                 else if (!matched)
1497                         ldlm_lock_decref_internal(lock, mode);
1498         }
1499
1500         /* less verbose for test-only */
1501         if (!matched && !(flags & LDLM_FL_TEST_LOCK)) {
1502                 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
1503                                   "%llu/%llu (%llu %llu)", ns,
1504                                   type, mode, res_id->name[0], res_id->name[1],
1505                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1506                                   res_id->name[2] : policy->l_extent.start,
1507                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1508                                   res_id->name[3] : policy->l_extent.end);
1509         }
1510         if (data.lmd_old != NULL)
1511                 LDLM_LOCK_PUT(data.lmd_old);
1512
1513         return matched;
1514 }
1515 EXPORT_SYMBOL(ldlm_lock_match_with_skip);
1516
1517 enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
1518                                            __u64 *bits)
1519 {
1520         struct ldlm_lock *lock;
1521         enum ldlm_mode mode = 0;
1522         ENTRY;
1523
1524         lock = ldlm_handle2lock(lockh);
1525         if (lock != NULL) {
1526                 lock_res_and_lock(lock);
1527                 if (LDLM_HAVE_MASK(lock, GONE))
1528                         GOTO(out, mode);
1529
1530                 if (ldlm_is_cbpending(lock) &&
1531                     lock->l_readers == 0 && lock->l_writers == 0)
1532                         GOTO(out, mode);
1533
1534                 if (bits)
1535                         *bits = lock->l_policy_data.l_inodebits.bits;
1536                 mode = lock->l_granted_mode;
1537                 ldlm_lock_addref_internal_nolock(lock, mode);
1538         }
1539
1540         EXIT;
1541
1542 out:
1543         if (lock != NULL) {
1544                 unlock_res_and_lock(lock);
1545                 LDLM_LOCK_PUT(lock);
1546         }
1547         return mode;
1548 }
1549 EXPORT_SYMBOL(ldlm_revalidate_lock_handle);
1550
1551 /** The caller must guarantee that the buffer is large enough. */
1552 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
1553                   enum req_location loc, void *data, int size)
1554 {
1555         void *lvb;
1556         ENTRY;
1557
1558         LASSERT(data != NULL);
1559         LASSERT(size >= 0);
1560
1561         switch (lock->l_lvb_type) {
1562         case LVB_T_OST:
1563                 if (size == sizeof(struct ost_lvb)) {
1564                         if (loc == RCL_CLIENT)
1565                                 lvb = req_capsule_client_swab_get(pill,
1566                                                 &RMF_DLM_LVB,
1567                                                 lustre_swab_ost_lvb);
1568                         else
1569                                 lvb = req_capsule_server_swab_get(pill,
1570                                                 &RMF_DLM_LVB,
1571                                                 lustre_swab_ost_lvb);
1572                         if (unlikely(lvb == NULL)) {
1573                                 LDLM_ERROR(lock, "no LVB");
1574                                 RETURN(-EPROTO);
1575                         }
1576
1577                         memcpy(data, lvb, size);
1578                 } else if (size == sizeof(struct ost_lvb_v1)) {
1579                         struct ost_lvb *olvb = data;
1580
1581                         if (loc == RCL_CLIENT)
1582                                 lvb = req_capsule_client_swab_get(pill,
1583                                                 &RMF_DLM_LVB,
1584                                                 lustre_swab_ost_lvb_v1);
1585                         else
1586                                 lvb = req_capsule_server_sized_swab_get(pill,
1587                                                 &RMF_DLM_LVB, size,
1588                                                 lustre_swab_ost_lvb_v1);
1589                         if (unlikely(lvb == NULL)) {
1590                                 LDLM_ERROR(lock, "no LVB");
1591                                 RETURN(-EPROTO);
1592                         }
1593
1594                         memcpy(data, lvb, size);
1595                         olvb->lvb_mtime_ns = 0;
1596                         olvb->lvb_atime_ns = 0;
1597                         olvb->lvb_ctime_ns = 0;
1598                 } else {
1599                         LDLM_ERROR(lock, "Replied unexpected ost LVB size %d",
1600                                    size);
1601                         RETURN(-EINVAL);
1602                 }
1603                 break;
1604         case LVB_T_LQUOTA:
1605                 if (size == sizeof(struct lquota_lvb)) {
1606                         if (loc == RCL_CLIENT)
1607                                 lvb = req_capsule_client_swab_get(pill,
1608                                                 &RMF_DLM_LVB,
1609                                                 lustre_swab_lquota_lvb);
1610                         else
1611                                 lvb = req_capsule_server_swab_get(pill,
1612                                                 &RMF_DLM_LVB,
1613                                                 lustre_swab_lquota_lvb);
1614                         if (unlikely(lvb == NULL)) {
1615                                 LDLM_ERROR(lock, "no LVB");
1616                                 RETURN(-EPROTO);
1617                         }
1618
1619                         memcpy(data, lvb, size);
1620                 } else {
1621                         LDLM_ERROR(lock, "Replied unexpected lquota LVB size %d",
1622                                    size);
1623                         RETURN(-EINVAL);
1624                 }
1625                 break;
1626         case LVB_T_LAYOUT:
1627                 if (size == 0)
1628                         break;
1629
1630                 if (loc == RCL_CLIENT)
1631                         lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
1632                 else
1633                         lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
1634                 if (unlikely(lvb == NULL)) {
1635                         LDLM_ERROR(lock, "no LVB");
1636                         RETURN(-EPROTO);
1637                 }
1638
1639                 memcpy(data, lvb, size);
1640                 break;
1641         default:
1642                 LDLM_ERROR(lock, "Unknown LVB type: %d", lock->l_lvb_type);
1643                 libcfs_debug_dumpstack(NULL);
1644                 RETURN(-EINVAL);
1645         }
1646
1647         RETURN(0);
1648 }
1649
1650 /**
1651  * Create and fill in new LDLM lock with specified properties.
1652  * Returns a referenced lock
1653  */
1654 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1655                                    const struct ldlm_res_id *res_id,
1656                                    enum ldlm_type type,
1657                                    enum ldlm_mode mode,
1658                                    const struct ldlm_callback_suite *cbs,
1659                                    void *data, __u32 lvb_len,
1660                                    enum lvb_type lvb_type)
1661 {
1662         struct ldlm_lock        *lock;
1663         struct ldlm_resource    *res;
1664         int                     rc;
1665         ENTRY;
1666
1667         res = ldlm_resource_get(ns, NULL, res_id, type, 1);
1668         if (IS_ERR(res))
1669                 RETURN(ERR_CAST(res));
1670
1671         lock = ldlm_lock_new(res);
1672         if (lock == NULL)
1673                 RETURN(ERR_PTR(-ENOMEM));
1674
1675         lock->l_req_mode = mode;
1676         lock->l_ast_data = data;
1677         lock->l_pid = current_pid();
1678         if (ns_is_server(ns))
1679                 ldlm_set_ns_srv(lock);
1680         if (cbs) {
1681                 lock->l_blocking_ast = cbs->lcs_blocking;
1682                 lock->l_completion_ast = cbs->lcs_completion;
1683                 lock->l_glimpse_ast = cbs->lcs_glimpse;
1684         }
1685
1686         switch (type) {
1687         case LDLM_EXTENT:
1688                 rc = ldlm_extent_alloc_lock(lock);
1689                 break;
1690         case LDLM_IBITS:
1691                 rc = ldlm_inodebits_alloc_lock(lock);
1692                 break;
1693         default:
1694                 rc = 0;
1695         }
1696         if (rc)
1697                 GOTO(out, rc);
1698
1699         if (lvb_len) {
1700                 lock->l_lvb_len = lvb_len;
1701                 OBD_ALLOC_LARGE(lock->l_lvb_data, lvb_len);
1702                 if (lock->l_lvb_data == NULL)
1703                         GOTO(out, rc = -ENOMEM);
1704         }
1705
1706         lock->l_lvb_type = lvb_type;
1707         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
1708                 GOTO(out, rc = -ENOENT);
1709
1710         RETURN(lock);
1711
1712 out:
1713         ldlm_lock_destroy(lock);
1714         LDLM_LOCK_RELEASE(lock);
1715         RETURN(ERR_PTR(rc));
1716 }
1717
1718 #ifdef HAVE_SERVER_SUPPORT
1719 static enum ldlm_error ldlm_lock_enqueue_helper(struct ldlm_lock *lock,
1720                                              __u64 *flags)
1721 {
1722         struct ldlm_resource *res = lock->l_resource;
1723         enum ldlm_error rc = ELDLM_OK;
1724         LIST_HEAD(rpc_list);
1725         ldlm_processing_policy policy;
1726
1727         ENTRY;
1728
1729         policy = ldlm_get_processing_policy(res);
1730 restart:
1731         policy(lock, flags, LDLM_PROCESS_ENQUEUE, &rc, &rpc_list);
1732         if (rc == ELDLM_OK && lock->l_granted_mode != lock->l_req_mode &&
1733             res->lr_type != LDLM_FLOCK) {
1734                 rc = ldlm_handle_conflict_lock(lock, flags, &rpc_list);
1735                 if (rc == -ERESTART)
1736                         GOTO(restart, rc);
1737         }
1738
1739         if (!list_empty(&rpc_list))
1740                 ldlm_discard_bl_list(&rpc_list);
1741
1742         RETURN(rc);
1743 }
1744 #endif
1745
1746 /**
1747  * Enqueue (request) a lock.
1748  *
1749  * Does not block. As a result of enqueue the lock would be put
1750  * into granted or waiting list.
1751  *
1752  * If namespace has intent policy sent and the lock has LDLM_FL_HAS_INTENT flag
1753  * set, skip all the enqueueing and delegate lock processing to intent policy
1754  * function.
1755  */
1756 enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
1757                                   struct ldlm_namespace *ns,
1758                                   struct ldlm_lock **lockp,
1759                                   void *cookie, __u64 *flags)
1760 {
1761         struct ldlm_lock *lock = *lockp;
1762         struct ldlm_resource *res = lock->l_resource;
1763         int local = ns_is_client(ldlm_res_to_ns(res));
1764         enum ldlm_error rc = ELDLM_OK;
1765         struct ldlm_interval *node = NULL;
1766 #ifdef HAVE_SERVER_SUPPORT
1767         bool reconstruct = false;
1768 #endif
1769         ENTRY;
1770
1771         /* policies are not executed on the client or during replay */
1772         if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1773             && !local && ns->ns_policy) {
1774                 rc = ns->ns_policy(env, ns, lockp, cookie, lock->l_req_mode,
1775                                    *flags, NULL);
1776                 if (rc == ELDLM_LOCK_REPLACED) {
1777                         /* The lock that was returned has already been granted,
1778                          * and placed into lockp.  If it's not the same as the
1779                          * one we passed in, then destroy the old one and our
1780                          * work here is done. */
1781                         if (lock != *lockp) {
1782                                 ldlm_lock_destroy(lock);
1783                                 LDLM_LOCK_RELEASE(lock);
1784                         }
1785                         *flags |= LDLM_FL_LOCK_CHANGED;
1786                         RETURN(0);
1787                 } else if (rc != ELDLM_OK &&
1788                            ldlm_is_granted(lock)) {
1789                         LASSERT(*flags & LDLM_FL_RESENT);
1790                         /* It may happen that ns_policy returns an error in
1791                          * resend case, object may be unlinked or just some
1792                          * error occurs. It is unclear if lock reached the
1793                          * client in the original reply, just leave the lock on
1794                          * server, not returning it again to client. Due to
1795                          * LU-6529, the server will not OOM. */
1796                         RETURN(rc);
1797                 } else if (rc != ELDLM_OK ||
1798                            (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1799                         ldlm_lock_destroy(lock);
1800                         RETURN(rc);
1801                 }
1802         }
1803
1804         if (*flags & LDLM_FL_RESENT) {
1805                 /* Reconstruct LDLM_FL_SRV_ENQ_MASK @flags for reply.
1806                  * Set LOCK_CHANGED always.
1807                  * Check if the lock is granted for BLOCK_GRANTED.
1808                  * Take NO_TIMEOUT from the lock as it is inherited through
1809                  * LDLM_FL_INHERIT_MASK */
1810                 *flags |= LDLM_FL_LOCK_CHANGED;
1811                 if (!ldlm_is_granted(lock))
1812                         *flags |= LDLM_FL_BLOCK_GRANTED;
1813                 *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT;
1814                 RETURN(ELDLM_OK);
1815         }
1816
1817         /* For a replaying lock, it might be already in granted list. So
1818          * unlinking the lock will cause the interval node to be freed, we
1819          * have to allocate the interval node early otherwise we can't regrant
1820          * this lock in the future. - jay */
1821         if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
1822                 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
1823
1824 #ifdef HAVE_SERVER_SUPPORT
1825         reconstruct = !local && res->lr_type == LDLM_FLOCK &&
1826                       !(*flags & LDLM_FL_TEST_LOCK);
1827         if (reconstruct) {
1828                 rc = req_can_reconstruct(cookie, NULL);
1829                 if (rc != 0) {
1830                         if (rc == 1)
1831                                 rc = 0;
1832                         RETURN(rc);
1833                 }
1834         }
1835 #endif
1836
1837         lock_res_and_lock(lock);
1838         if (local && ldlm_is_granted(lock)) {
1839                 /* The server returned a blocked lock, but it was granted
1840                  * before we got a chance to actually enqueue it.  We don't
1841                  * need to do anything else. */
1842                 *flags &= ~LDLM_FL_BLOCKED_MASK;
1843                 GOTO(out, rc = ELDLM_OK);
1844         }
1845
1846         ldlm_resource_unlink_lock(lock);
1847         if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
1848                 if (node == NULL) {
1849                         ldlm_lock_destroy_nolock(lock);
1850                         GOTO(out, rc = -ENOMEM);
1851                 }
1852
1853                 INIT_LIST_HEAD(&node->li_group);
1854                 ldlm_interval_attach(node, lock);
1855                 node = NULL;
1856         }
1857
1858         /* Some flags from the enqueue want to make it into the AST, via the
1859          * lock's l_flags. */
1860         if (*flags & LDLM_FL_AST_DISCARD_DATA)
1861                 ldlm_set_ast_discard_data(lock);
1862         if (*flags & LDLM_FL_TEST_LOCK)
1863                 ldlm_set_test_lock(lock);
1864         if (*flags & LDLM_FL_COS_INCOMPAT)
1865                 ldlm_set_cos_incompat(lock);
1866         if (*flags & LDLM_FL_COS_ENABLED)
1867                 ldlm_set_cos_enabled(lock);
1868
1869         /* This distinction between local lock trees is very important; a client
1870          * namespace only has information about locks taken by that client, and
1871          * thus doesn't have enough information to decide for itself if it can
1872          * be granted (below).  In this case, we do exactly what the server
1873          * tells us to do, as dictated by the 'flags'.
1874          *
1875          * We do exactly the same thing during recovery, when the server is
1876          * more or less trusting the clients not to lie.
1877          *
1878          * FIXME (bug 268): Detect obvious lies by checking compatibility in
1879          * granted queue. */
1880         if (local) {
1881                 if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1882                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1883                 else
1884                         ldlm_grant_lock(lock, NULL);
1885                 GOTO(out, rc = ELDLM_OK);
1886 #ifdef HAVE_SERVER_SUPPORT
1887         } else if (*flags & LDLM_FL_REPLAY) {
1888                 if (*flags & LDLM_FL_BLOCK_WAIT) {
1889                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1890                         GOTO(out, rc = ELDLM_OK);
1891                 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1892                         ldlm_grant_lock(lock, NULL);
1893                         GOTO(out, rc = ELDLM_OK);
1894                 }
1895                 /* If no flags, fall through to normal enqueue path. */
1896         }
1897
1898         rc = ldlm_lock_enqueue_helper(lock, flags);
1899         GOTO(out, rc);
1900 #else
1901         } else {
1902                 CERROR("This is client-side-only module, cannot handle "
1903                        "LDLM_NAMESPACE_SERVER resource type lock.\n");
1904                 LBUG();
1905         }
1906 #endif
1907
1908 out:
1909         unlock_res_and_lock(lock);
1910
1911 #ifdef HAVE_SERVER_SUPPORT
1912         if (reconstruct) {
1913                 struct ptlrpc_request *req = cookie;
1914
1915                 tgt_mk_reply_data(NULL, NULL,
1916                                   &req->rq_export->exp_target_data,
1917                                   req, 0, NULL, false, 0);
1918         }
1919 #endif
1920         if (node)
1921                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1922         return rc;
1923 }
1924
1925 #ifdef HAVE_SERVER_SUPPORT
1926 /**
1927  * Iterate through all waiting locks on a given resource queue and attempt to
1928  * grant them.
1929  *
1930  * Must be called with resource lock held.
1931  */
1932 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
1933                          struct list_head *work_list,
1934                          enum ldlm_process_intention intention,
1935                          struct ldlm_lock *hint)
1936 {
1937         struct list_head *tmp, *pos;
1938         ldlm_processing_policy policy;
1939         __u64 flags;
1940         int rc = LDLM_ITER_CONTINUE;
1941         enum ldlm_error err;
1942         LIST_HEAD(bl_ast_list);
1943
1944         ENTRY;
1945
1946         check_res_locked(res);
1947
1948         policy = ldlm_get_processing_policy(res);
1949         LASSERT(policy);
1950         LASSERT(intention == LDLM_PROCESS_RESCAN ||
1951                 intention == LDLM_PROCESS_RECOVERY);
1952
1953 restart:
1954         list_for_each_safe(tmp, pos, queue) {
1955                 struct ldlm_lock *pending;
1956                 LIST_HEAD(rpc_list);
1957
1958                 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
1959
1960                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1961
1962                 flags = 0;
1963                 rc = policy(pending, &flags, intention, &err, &rpc_list);
1964                 if (pending->l_granted_mode == pending->l_req_mode ||
1965                     res->lr_type == LDLM_FLOCK) {
1966                         list_splice(&rpc_list, work_list);
1967                 } else {
1968                         list_splice(&rpc_list, &bl_ast_list);
1969                 }
1970                 /*
1971                  * When this is called from recovery done, we always want
1972                  * to scan the whole list no matter what 'rc' is returned.
1973                  */
1974                 if (rc != LDLM_ITER_CONTINUE &&
1975                     intention == LDLM_PROCESS_RESCAN)
1976                         break;
1977         }
1978
1979         if (!list_empty(&bl_ast_list)) {
1980                 unlock_res(res);
1981
1982                 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &bl_ast_list,
1983                                        LDLM_WORK_BL_AST);
1984
1985                 lock_res(res);
1986                 if (rc == -ERESTART)
1987                         GOTO(restart, rc);
1988         }
1989
1990         if (!list_empty(&bl_ast_list))
1991                 ldlm_discard_bl_list(&bl_ast_list);
1992
1993         RETURN(intention == LDLM_PROCESS_RESCAN ? rc : LDLM_ITER_CONTINUE);
1994 }
1995
1996 /**
1997  * Conflicting locks are detected for a lock to be enqueued, add the lock
1998  * into waiting list and send blocking ASTs to the conflicting locks.
1999  *
2000  * \param[in] lock              The lock to be enqueued.
2001  * \param[out] flags            Lock flags for the lock to be enqueued.
2002  * \param[in] rpc_list          Conflicting locks list.
2003  *
2004  * \retval -ERESTART:   Some lock was instantly canceled while sending
2005  *                      blocking ASTs, caller needs to re-check conflicting
2006  *                      locks.
2007  * \retval -EAGAIN:     Lock was destroyed, caller should return error.
2008  * \reval 0:            Lock is successfully added in waiting list.
2009  */
2010 int ldlm_handle_conflict_lock(struct ldlm_lock *lock, __u64 *flags,
2011                               struct list_head *rpc_list)
2012 {
2013         struct ldlm_resource *res = lock->l_resource;
2014         int rc;
2015         ENTRY;
2016
2017         check_res_locked(res);
2018
2019         /* If either of the compat_queue()s returned failure, then we
2020          * have ASTs to send and must go onto the waiting list.
2021          *
2022          * bug 2322: we used to unlink and re-add here, which was a
2023          * terrible folly -- if we goto restart, we could get
2024          * re-ordered!  Causes deadlock, because ASTs aren't sent! */
2025         if (list_empty(&lock->l_res_link))
2026                 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
2027         unlock_res(res);
2028
2029         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), rpc_list,
2030                                LDLM_WORK_BL_AST);
2031
2032         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
2033             !ns_is_client(ldlm_res_to_ns(res)))
2034                 class_fail_export(lock->l_export);
2035
2036         lock_res(res);
2037         if (rc == -ERESTART) {
2038                 /* 15715: The lock was granted and destroyed after
2039                  * resource lock was dropped. Interval node was freed
2040                  * in ldlm_lock_destroy. Anyway, this always happens
2041                  * when a client is being evicted. So it would be
2042                  * ok to return an error. -jay */
2043                 if (ldlm_is_destroyed(lock))
2044                         RETURN(-EAGAIN);
2045
2046                 /* lock was granted while resource was unlocked. */
2047                 if (ldlm_is_granted(lock)) {
2048                         /* bug 11300: if the lock has been granted,
2049                          * break earlier because otherwise, we will go
2050                          * to restart and ldlm_resource_unlink will be
2051                          * called and it causes the interval node to be
2052                          * freed. Then we will fail at
2053                          * ldlm_extent_add_lock() */
2054                         *flags &= ~LDLM_FL_BLOCKED_MASK;
2055                         RETURN(0);
2056                 }
2057
2058                 RETURN(rc);
2059         }
2060         *flags |= LDLM_FL_BLOCK_GRANTED;
2061
2062         RETURN(0);
2063 }
2064
2065 /**
2066  * Discard all AST work items from list.
2067  *
2068  * If for whatever reason we do not want to send ASTs to conflicting locks
2069  * anymore, disassemble the list with this function.
2070  */
2071 void ldlm_discard_bl_list(struct list_head *bl_list)
2072 {
2073         struct ldlm_lock *lock, *tmp;
2074
2075         ENTRY;
2076
2077         list_for_each_entry_safe(lock, tmp, bl_list, l_bl_ast) {
2078                 LASSERT(!list_empty(&lock->l_bl_ast));
2079                 list_del_init(&lock->l_bl_ast);
2080                 ldlm_clear_ast_sent(lock);
2081                 LASSERT(lock->l_bl_ast_run == 0);
2082                 ldlm_clear_blocking_lock(lock);
2083                 LDLM_LOCK_RELEASE(lock);
2084         }
2085         EXIT;
2086 }
2087
2088 /**
2089  * Process a call to blocking AST callback for a lock in ast_work list
2090  */
2091 static int
2092 ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
2093 {
2094         struct ldlm_cb_set_arg *arg = opaq;
2095         struct ldlm_lock *lock;
2096         struct ldlm_lock_desc d;
2097         struct ldlm_bl_desc bld;
2098         int rc;
2099
2100         ENTRY;
2101
2102         if (list_empty(arg->list))
2103                 RETURN(-ENOENT);
2104
2105         lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
2106
2107         /* nobody should touch l_bl_ast but some locks in the list may become
2108          * granted after lock convert or COS downgrade, these locks should be
2109          * just skipped here and removed from the list.
2110          */
2111         lock_res_and_lock(lock);
2112         list_del_init(&lock->l_bl_ast);
2113
2114         /* lock is not blocking lock anymore, but was kept in the list because
2115          * it can managed only here.
2116          */
2117         if (!ldlm_is_ast_sent(lock)) {
2118                 unlock_res_and_lock(lock);
2119                 LDLM_LOCK_RELEASE(lock);
2120                 RETURN(0);
2121         }
2122
2123         LASSERT(lock->l_blocking_lock);
2124         ldlm_lock2desc(lock->l_blocking_lock, &d);
2125         /* copy blocking lock ibits in cancel_bits as well,
2126          * new client may use them for lock convert and it is
2127          * important to use new field to convert locks from
2128          * new servers only
2129          */
2130         d.l_policy_data.l_inodebits.cancel_bits =
2131                 lock->l_blocking_lock->l_policy_data.l_inodebits.bits;
2132
2133         /* Blocking lock is being destroyed here but some information about it
2134          * may be needed inside l_blocking_ast() function below,
2135          * e.g. in mdt_blocking_ast(). So save needed data in bl_desc.
2136          */
2137         bld.bl_same_client = lock->l_client_cookie ==
2138                              lock->l_blocking_lock->l_client_cookie;
2139         bld.bl_cos_incompat = ldlm_is_cos_incompat(lock->l_blocking_lock);
2140         arg->bl_desc = &bld;
2141
2142         LASSERT(ldlm_is_ast_sent(lock));
2143         LASSERT(lock->l_bl_ast_run == 0);
2144         lock->l_bl_ast_run++;
2145         ldlm_clear_blocking_lock(lock);
2146         unlock_res_and_lock(lock);
2147
2148         rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING);
2149
2150         LDLM_LOCK_RELEASE(lock);
2151
2152         RETURN(rc);
2153 }
2154
2155 /**
2156  * Process a call to revocation AST callback for a lock in ast_work list
2157  */
2158 static int
2159 ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
2160 {
2161         struct ldlm_cb_set_arg *arg = opaq;
2162         struct ldlm_lock_desc   desc;
2163         int                     rc;
2164         struct ldlm_lock       *lock;
2165         ENTRY;
2166
2167         if (list_empty(arg->list))
2168                 RETURN(-ENOENT);
2169
2170         lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
2171         list_del_init(&lock->l_rk_ast);
2172
2173         /* the desc just pretend to exclusive */
2174         ldlm_lock2desc(lock, &desc);
2175         desc.l_req_mode = LCK_EX;
2176         desc.l_granted_mode = 0;
2177
2178         rc = lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
2179         LDLM_LOCK_RELEASE(lock);
2180
2181         RETURN(rc);
2182 }
2183
2184 /**
2185  * Process a call to glimpse AST callback for a lock in ast_work list
2186  */
2187 int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
2188 {
2189         struct ldlm_cb_set_arg          *arg = opaq;
2190         struct ldlm_glimpse_work        *gl_work;
2191         struct ldlm_lock                *lock;
2192         int                              rc = 0;
2193         ENTRY;
2194
2195         if (list_empty(arg->list))
2196                 RETURN(-ENOENT);
2197
2198         gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
2199                                  gl_list);
2200         list_del_init(&gl_work->gl_list);
2201
2202         lock = gl_work->gl_lock;
2203
2204         /* transfer the glimpse descriptor to ldlm_cb_set_arg */
2205         arg->gl_desc = gl_work->gl_desc;
2206         arg->gl_interpret_reply = gl_work->gl_interpret_reply;
2207         arg->gl_interpret_data = gl_work->gl_interpret_data;
2208
2209         /* invoke the actual glimpse callback */
2210         if (lock->l_glimpse_ast(lock, (void*)arg) == 0)
2211                 rc = 1;
2212
2213         LDLM_LOCK_RELEASE(lock);
2214         if (gl_work->gl_flags & LDLM_GL_WORK_SLAB_ALLOCATED)
2215                 OBD_SLAB_FREE_PTR(gl_work, ldlm_glimpse_work_kmem);
2216         else
2217                 OBD_FREE_PTR(gl_work);
2218
2219         RETURN(rc);
2220 }
2221 #endif
2222
2223 /**
2224  * Process a call to completion AST callback for a lock in ast_work list
2225  */
2226 static int
2227 ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
2228 {
2229         struct ldlm_cb_set_arg *arg = opaq;
2230         struct ldlm_lock *lock;
2231         ldlm_completion_callback completion_callback;
2232         int rc = 0;
2233
2234         ENTRY;
2235
2236         if (list_empty(arg->list))
2237                 RETURN(-ENOENT);
2238
2239         lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
2240
2241         /* It's possible to receive a completion AST before we've set
2242          * the l_completion_ast pointer: either because the AST arrived
2243          * before the reply, or simply because there's a small race
2244          * window between receiving the reply and finishing the local
2245          * enqueue. (bug 842)
2246          *
2247          * This can't happen with the blocking_ast, however, because we
2248          * will never call the local blocking_ast until we drop our
2249          * reader/writer reference, which we won't do until we get the
2250          * reply and finish enqueueing. */
2251
2252         /* nobody should touch l_cp_ast */
2253         lock_res_and_lock(lock);
2254         list_del_init(&lock->l_cp_ast);
2255         LASSERT(ldlm_is_cp_reqd(lock));
2256         /* save l_completion_ast since it can be changed by
2257          * mds_intent_policy(), see bug 14225 */
2258         completion_callback = lock->l_completion_ast;
2259         ldlm_clear_cp_reqd(lock);
2260         unlock_res_and_lock(lock);
2261
2262         if (completion_callback != NULL)
2263                 rc = completion_callback(lock, 0, (void *)arg);
2264         LDLM_LOCK_RELEASE(lock);
2265
2266         RETURN(rc);
2267 }
2268
2269 /**
2270  * Process list of locks in need of ASTs being sent.
2271  *
2272  * Used on server to send multiple ASTs together instead of sending one by
2273  * one.
2274  */
2275 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
2276                       ldlm_desc_ast_t ast_type)
2277 {
2278         struct ldlm_cb_set_arg *arg;
2279         set_producer_func work_ast_lock;
2280         int rc;
2281
2282         if (list_empty(rpc_list))
2283                 RETURN(0);
2284
2285         OBD_ALLOC_PTR(arg);
2286         if (arg == NULL)
2287                 RETURN(-ENOMEM);
2288
2289         atomic_set(&arg->restart, 0);
2290         arg->list = rpc_list;
2291
2292         switch (ast_type) {
2293         case LDLM_WORK_CP_AST:
2294                 arg->type = LDLM_CP_CALLBACK;
2295                 work_ast_lock = ldlm_work_cp_ast_lock;
2296                 break;
2297 #ifdef HAVE_SERVER_SUPPORT
2298         case LDLM_WORK_BL_AST:
2299                 arg->type = LDLM_BL_CALLBACK;
2300                 work_ast_lock = ldlm_work_bl_ast_lock;
2301                 break;
2302         case LDLM_WORK_REVOKE_AST:
2303                 arg->type = LDLM_BL_CALLBACK;
2304                 work_ast_lock = ldlm_work_revoke_ast_lock;
2305                 break;
2306         case LDLM_WORK_GL_AST:
2307                 arg->type = LDLM_GL_CALLBACK;
2308                 work_ast_lock = ldlm_work_gl_ast_lock;
2309                 break;
2310 #endif
2311         default:
2312                 LBUG();
2313         }
2314
2315         /* We create a ptlrpc request set with flow control extension.
2316          * This request set will use the work_ast_lock function to produce new
2317          * requests and will send a new request each time one completes in order
2318          * to keep the number of requests in flight to ns_max_parallel_ast */
2319         arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
2320                                      work_ast_lock, arg);
2321         if (arg->set == NULL)
2322                 GOTO(out, rc = -ENOMEM);
2323
2324         ptlrpc_set_wait(NULL, arg->set);
2325         ptlrpc_set_destroy(arg->set);
2326
2327         rc = atomic_read(&arg->restart) ? -ERESTART : 0;
2328         GOTO(out, rc);
2329 out:
2330         OBD_FREE_PTR(arg);
2331         return rc;
2332 }
2333
2334 /**
2335  * Try to grant all waiting locks on a resource.
2336  *
2337  * Calls ldlm_reprocess_queue on waiting queue.
2338  *
2339  * Typically called after some resource locks are cancelled to see
2340  * if anything could be granted as a result of the cancellation.
2341  */
2342 static void __ldlm_reprocess_all(struct ldlm_resource *res,
2343                                  enum ldlm_process_intention intention,
2344                                  struct ldlm_lock *hint)
2345 {
2346         struct list_head rpc_list;
2347 #ifdef HAVE_SERVER_SUPPORT
2348         ldlm_reprocessing_policy reprocess;
2349         struct obd_device *obd;
2350         int rc;
2351
2352         ENTRY;
2353
2354         INIT_LIST_HEAD(&rpc_list);
2355         /* Local lock trees don't get reprocessed. */
2356         if (ns_is_client(ldlm_res_to_ns(res))) {
2357                 EXIT;
2358                 return;
2359         }
2360
2361         /* Disable reprocess during lock replay stage but allow during
2362          * request replay stage.
2363          */
2364         obd = ldlm_res_to_ns(res)->ns_obd;
2365         if (obd->obd_recovering &&
2366             atomic_read(&obd->obd_req_replay_clients) == 0)
2367                 RETURN_EXIT;
2368 restart:
2369         lock_res(res);
2370         reprocess = ldlm_get_reprocessing_policy(res);
2371         reprocess(res, &res->lr_waiting, &rpc_list, intention, hint);
2372         unlock_res(res);
2373
2374         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
2375                                LDLM_WORK_CP_AST);
2376         if (rc == -ERESTART) {
2377                 LASSERT(list_empty(&rpc_list));
2378                 goto restart;
2379         }
2380 #else
2381         ENTRY;
2382
2383         INIT_LIST_HEAD(&rpc_list);
2384         if (!ns_is_client(ldlm_res_to_ns(res))) {
2385                 CERROR("This is client-side-only module, cannot handle "
2386                        "LDLM_NAMESPACE_SERVER resource type lock.\n");
2387                 LBUG();
2388         }
2389 #endif
2390         EXIT;
2391 }
2392
2393 void ldlm_reprocess_all(struct ldlm_resource *res, struct ldlm_lock *hint)
2394 {
2395         __ldlm_reprocess_all(res, LDLM_PROCESS_RESCAN, hint);
2396 }
2397 EXPORT_SYMBOL(ldlm_reprocess_all);
2398
2399 static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2400                               struct hlist_node *hnode, void *arg)
2401 {
2402         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
2403
2404         /* This is only called once after recovery done. LU-8306. */
2405         __ldlm_reprocess_all(res, LDLM_PROCESS_RECOVERY, NULL);
2406         return 0;
2407 }
2408
2409 /**
2410  * Iterate through all resources on a namespace attempting to grant waiting
2411  * locks.
2412  */
2413 void ldlm_reprocess_recovery_done(struct ldlm_namespace *ns)
2414 {
2415         ENTRY;
2416
2417         if (ns != NULL) {
2418                 cfs_hash_for_each_nolock(ns->ns_rs_hash,
2419                                          ldlm_reprocess_res, NULL, 0);
2420         }
2421         EXIT;
2422 }
2423
2424 /**
2425  * Helper function to call blocking AST for LDLM lock \a lock in a
2426  * "cancelling" mode.
2427  */
2428 void ldlm_cancel_callback(struct ldlm_lock *lock)
2429 {
2430         check_res_locked(lock->l_resource);
2431         if (!ldlm_is_cancel(lock)) {
2432                 ldlm_set_cancel(lock);
2433                 if (lock->l_blocking_ast) {
2434                         unlock_res_and_lock(lock);
2435                         lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
2436                                              LDLM_CB_CANCELING);
2437                         lock_res_and_lock(lock);
2438                 } else {
2439                         LDLM_DEBUG(lock, "no blocking ast");
2440                 }
2441
2442                 /* only canceller can set bl_done bit */
2443                 ldlm_set_bl_done(lock);
2444                 wake_up_all(&lock->l_waitq);
2445         } else if (!ldlm_is_bl_done(lock)) {
2446                 /* The lock is guaranteed to have been canceled once
2447                  * returning from this function. */
2448                 unlock_res_and_lock(lock);
2449                 wait_event_idle(lock->l_waitq, is_bl_done(lock));
2450                 lock_res_and_lock(lock);
2451         }
2452 }
2453
2454 /**
2455  * Remove skiplist-enabled LDLM lock \a req from granted list
2456  */
2457 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
2458 {
2459         if (req->l_resource->lr_type != LDLM_PLAIN &&
2460             req->l_resource->lr_type != LDLM_IBITS)
2461                 return;
2462
2463         list_del_init(&req->l_sl_policy);
2464         list_del_init(&req->l_sl_mode);
2465 }
2466
2467 /**
2468  * Attempts to cancel LDLM lock \a lock that has no reader/writer references.
2469  */
2470 void ldlm_lock_cancel(struct ldlm_lock *lock)
2471 {
2472         struct ldlm_resource *res;
2473         struct ldlm_namespace *ns;
2474         ENTRY;
2475
2476         lock_res_and_lock(lock);
2477
2478         res = lock->l_resource;
2479         ns  = ldlm_res_to_ns(res);
2480
2481         /* Please do not, no matter how tempting, remove this LBUG without
2482          * talking to me first. -phik */
2483         if (lock->l_readers || lock->l_writers) {
2484                 LDLM_ERROR(lock, "lock still has references");
2485                 unlock_res_and_lock(lock);
2486                 LBUG();
2487         }
2488
2489         if (ldlm_is_waited(lock))
2490                 ldlm_del_waiting_lock(lock);
2491
2492         /* Releases cancel callback. */
2493         ldlm_cancel_callback(lock);
2494
2495         /* Yes, second time, just in case it was added again while we were
2496          * running with no res lock in ldlm_cancel_callback */
2497         if (ldlm_is_waited(lock))
2498                 ldlm_del_waiting_lock(lock);
2499
2500         ldlm_resource_unlink_lock(lock);
2501         ldlm_lock_destroy_nolock(lock);
2502
2503         if (ldlm_is_granted(lock))
2504                 ldlm_pool_del(&ns->ns_pool, lock);
2505
2506         /* Make sure we will not be called again for same lock what is possible
2507          * if not to zero out lock->l_granted_mode */
2508         lock->l_granted_mode = LCK_MINMODE;
2509         unlock_res_and_lock(lock);
2510
2511         EXIT;
2512 }
2513 EXPORT_SYMBOL(ldlm_lock_cancel);
2514
2515 /**
2516  * Set opaque data into the lock that only makes sense to upper layer.
2517  */
2518 int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data)
2519 {
2520         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2521         int rc = -EINVAL;
2522         ENTRY;
2523
2524         if (lock) {
2525                 if (lock->l_ast_data == NULL)
2526                         lock->l_ast_data = data;
2527                 if (lock->l_ast_data == data)
2528                         rc = 0;
2529                 LDLM_LOCK_PUT(lock);
2530         }
2531         RETURN(rc);
2532 }
2533 EXPORT_SYMBOL(ldlm_lock_set_data);
2534
2535 struct export_cl_data {
2536         const struct lu_env     *ecl_env;
2537         struct obd_export       *ecl_exp;
2538         int                     ecl_loop;
2539 };
2540
2541 static void ldlm_cancel_lock_for_export(struct obd_export *exp,
2542                                         struct ldlm_lock *lock,
2543                                         struct export_cl_data *ecl)
2544 {
2545         struct ldlm_resource *res;
2546
2547         res = ldlm_resource_getref(lock->l_resource);
2548
2549         ldlm_lvbo_update(res, lock, NULL, 1);
2550         ldlm_lock_cancel(lock);
2551         if (!exp->exp_obd->obd_stopping)
2552                 ldlm_reprocess_all(res, lock);
2553         ldlm_resource_putref(res);
2554
2555         ecl->ecl_loop++;
2556         if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
2557                 CDEBUG(D_INFO, "Export %p, %d locks cancelled.\n",
2558                        exp, ecl->ecl_loop);
2559         }
2560 }
2561
2562 /**
2563  * Iterator function for ldlm_export_cancel_locks.
2564  * Cancels passed locks.
2565  */
2566 static int
2567 ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2568                                 struct hlist_node *hnode, void *data)
2569
2570 {
2571         struct export_cl_data   *ecl = (struct export_cl_data *)data;
2572         struct obd_export       *exp  = ecl->ecl_exp;
2573         struct ldlm_lock        *lock = cfs_hash_object(hs, hnode);
2574
2575         LDLM_LOCK_GET(lock);
2576         ldlm_cancel_lock_for_export(exp, lock, ecl);
2577         LDLM_LOCK_RELEASE(lock);
2578
2579         return 0;
2580 }
2581
2582 /**
2583  * Cancel all blocked locks for given export.
2584  *
2585  * Typically called on client disconnection/eviction
2586  */
2587 int ldlm_export_cancel_blocked_locks(struct obd_export *exp)
2588 {
2589         struct lu_env env;
2590         struct export_cl_data   ecl = {
2591                 .ecl_exp        = exp,
2592                 .ecl_loop       = 0,
2593         };
2594         int rc;
2595
2596         rc = lu_env_init(&env, LCT_DT_THREAD);
2597         if (rc)
2598                 RETURN(rc);
2599         ecl.ecl_env = &env;
2600
2601         while (!list_empty(&exp->exp_bl_list)) {
2602                 struct ldlm_lock *lock;
2603
2604                 spin_lock_bh(&exp->exp_bl_list_lock);
2605                 if (!list_empty(&exp->exp_bl_list)) {
2606                         lock = list_entry(exp->exp_bl_list.next,
2607                                           struct ldlm_lock, l_exp_list);
2608                         LDLM_LOCK_GET(lock);
2609                         list_del_init(&lock->l_exp_list);
2610                 } else {
2611                         lock = NULL;
2612                 }
2613                 spin_unlock_bh(&exp->exp_bl_list_lock);
2614
2615                 if (lock == NULL)
2616                         break;
2617
2618                 ldlm_cancel_lock_for_export(exp, lock, &ecl);
2619                 LDLM_LOCK_RELEASE(lock);
2620         }
2621
2622         lu_env_fini(&env);
2623
2624         CDEBUG(D_DLMTRACE, "Export %p, canceled %d locks, "
2625                "left on hash table %d.\n", exp, ecl.ecl_loop,
2626                atomic_read(&exp->exp_lock_hash->hs_count));
2627
2628         return ecl.ecl_loop;
2629 }
2630
2631 /**
2632  * Cancel all locks for given export.
2633  *
2634  * Typically called after client disconnection/eviction
2635  */
2636 int ldlm_export_cancel_locks(struct obd_export *exp)
2637 {
2638         struct export_cl_data ecl;
2639         struct lu_env env;
2640         int rc;
2641
2642         rc = lu_env_init(&env, LCT_DT_THREAD);
2643         if (rc)
2644                 RETURN(rc);
2645         ecl.ecl_env = &env;
2646         ecl.ecl_exp = exp;
2647         ecl.ecl_loop = 0;
2648
2649         cfs_hash_for_each_empty(exp->exp_lock_hash,
2650                                 ldlm_cancel_locks_for_export_cb, &ecl);
2651
2652         CDEBUG(D_DLMTRACE, "Export %p, canceled %d locks, "
2653                "left on hash table %d.\n", exp, ecl.ecl_loop,
2654                atomic_read(&exp->exp_lock_hash->hs_count));
2655
2656         if (ecl.ecl_loop > 0 &&
2657             atomic_read(&exp->exp_lock_hash->hs_count) == 0 &&
2658             exp->exp_obd->obd_stopping)
2659                 ldlm_reprocess_recovery_done(exp->exp_obd->obd_namespace);
2660
2661         lu_env_fini(&env);
2662
2663         return ecl.ecl_loop;
2664 }
2665
2666 /**
2667  * Downgrade an PW/EX lock to COS | CR mode.
2668  *
2669  * A lock mode convertion from PW/EX mode to less conflict mode. The
2670  * convertion may fail if lock was canceled before downgrade, but it doesn't
2671  * indicate any problem, because such lock has no reader or writer, and will
2672  * be released soon.
2673  *
2674  * Used by Commit on Sharing (COS) code to force object changes commit in case
2675  * of conflict. Converted lock is considered as new lock and all blocking AST
2676  * things are cleared, so any pending or new blocked lock on that lock will
2677  * cause new call to blocking_ast and force resource object commit.
2678  *
2679  * Also used by layout_change to replace EX lock to CR lock.
2680  *
2681  * \param lock A lock to convert
2682  * \param new_mode new lock mode
2683  */
2684 void ldlm_lock_mode_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode)
2685 {
2686 #ifdef HAVE_SERVER_SUPPORT
2687         ENTRY;
2688
2689         LASSERT(new_mode == LCK_COS || new_mode == LCK_CR);
2690
2691         lock_res_and_lock(lock);
2692
2693         if (!(lock->l_granted_mode & (LCK_PW | LCK_EX))) {
2694                 unlock_res_and_lock(lock);
2695
2696                 LASSERT(lock->l_granted_mode == LCK_MINMODE);
2697                 LDLM_DEBUG(lock, "lock was canceled before downgrade");
2698                 RETURN_EXIT;
2699         }
2700
2701         ldlm_resource_unlink_lock(lock);
2702         /*
2703          * Remove the lock from pool as it will be added again in
2704          * ldlm_grant_lock() called below.
2705          */
2706         ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
2707
2708         /* Consider downgraded lock as a new lock and clear all states
2709          * related to a previous blocking AST processing.
2710          */
2711         ldlm_clear_blocking_data(lock);
2712
2713         lock->l_req_mode = new_mode;
2714         ldlm_grant_lock(lock, NULL);
2715         unlock_res_and_lock(lock);
2716
2717         ldlm_reprocess_all(lock->l_resource, lock);
2718
2719         EXIT;
2720 #endif
2721 }
2722 EXPORT_SYMBOL(ldlm_lock_mode_downgrade);
2723
2724 /**
2725  * Print lock with lock handle \a lockh description into debug log.
2726  *
2727  * Used when printing all locks on a resource for debug purposes.
2728  */
2729 void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh)
2730 {
2731         struct ldlm_lock *lock;
2732
2733         if (!((libcfs_debug | D_ERROR) & level))
2734                 return;
2735
2736         lock = ldlm_handle2lock(lockh);
2737         if (lock == NULL)
2738                 return;
2739
2740         LDLM_DEBUG_LIMIT(level, lock, "###");
2741
2742         LDLM_LOCK_PUT(lock);
2743 }
2744 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2745
2746 /**
2747  * Print lock information with custom message into debug log.
2748  * Helper function.
2749  */
2750 void _ldlm_lock_debug(struct ldlm_lock *lock,
2751                       struct libcfs_debug_msg_data *msgdata,
2752                       const char *fmt, ...)
2753 {
2754         va_list args;
2755         struct obd_export *exp = lock->l_export;
2756         struct ldlm_resource *resource = NULL;
2757         struct va_format vaf;
2758         char *nid = "local";
2759
2760         /* on server-side resource of lock doesn't change */
2761         if ((lock->l_flags & LDLM_FL_NS_SRV) != 0) {
2762                 if (lock->l_resource != NULL)
2763                         resource = ldlm_resource_getref(lock->l_resource);
2764         } else if (spin_trylock(&lock->l_lock)) {
2765                 if (lock->l_resource != NULL)
2766                         resource = ldlm_resource_getref(lock->l_resource);
2767                 spin_unlock(&lock->l_lock);
2768         }
2769
2770         va_start(args, fmt);
2771         vaf.fmt = fmt;
2772         vaf.va = &args;
2773
2774         if (exp && exp->exp_connection) {
2775                 nid = obd_export_nid2str(exp);
2776         } else if (exp && exp->exp_obd != NULL) {
2777                 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2778                 nid = obd_import_nid2str(imp);
2779         }
2780
2781         if (resource == NULL) {
2782                 libcfs_debug_msg(msgdata,
2783                                  "%pV ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
2784                                  &vaf,
2785                                  lock,
2786                                  lock->l_handle.h_cookie,
2787                                  refcount_read(&lock->l_handle.h_ref),
2788                                  lock->l_readers, lock->l_writers,
2789                                  ldlm_lockname[lock->l_granted_mode],
2790                                  ldlm_lockname[lock->l_req_mode],
2791                                  lock->l_flags, nid,
2792                                  lock->l_remote_handle.cookie,
2793                                  exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
2794                                  lock->l_pid, lock->l_callback_timeout,
2795                                  lock->l_lvb_type);
2796                 va_end(args);
2797                 return;
2798         }
2799
2800         switch (resource->lr_type) {
2801         case LDLM_EXTENT:
2802                 libcfs_debug_msg(msgdata,
2803                                  "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
2804                                  &vaf,
2805                                  ldlm_lock_to_ns_name(lock), lock,
2806                                  lock->l_handle.h_cookie,
2807                                  refcount_read(&lock->l_handle.h_ref),
2808                                  lock->l_readers, lock->l_writers,
2809                                  ldlm_lockname[lock->l_granted_mode],
2810                                  ldlm_lockname[lock->l_req_mode],
2811                                  PLDLMRES(resource),
2812                                  atomic_read(&resource->lr_refcount),
2813                                  ldlm_typename[resource->lr_type],
2814                                  lock->l_policy_data.l_extent.start,
2815                                  lock->l_policy_data.l_extent.end,
2816                                  lock->l_req_extent.start, lock->l_req_extent.end,
2817                                  lock->l_flags, nid,
2818                                  lock->l_remote_handle.cookie,
2819                                  exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
2820                                  lock->l_pid, lock->l_callback_timeout,
2821                                  lock->l_lvb_type);
2822                 break;
2823
2824         case LDLM_FLOCK:
2825                 libcfs_debug_msg(msgdata,
2826                                  "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld\n",
2827                                  &vaf,
2828                                  ldlm_lock_to_ns_name(lock), lock,
2829                                  lock->l_handle.h_cookie,
2830                                  refcount_read(&lock->l_handle.h_ref),
2831                                  lock->l_readers, lock->l_writers,
2832                                  ldlm_lockname[lock->l_granted_mode],
2833                                  ldlm_lockname[lock->l_req_mode],
2834                                  PLDLMRES(resource),
2835                                  atomic_read(&resource->lr_refcount),
2836                                  ldlm_typename[resource->lr_type],
2837                                  lock->l_policy_data.l_flock.pid,
2838                                  lock->l_policy_data.l_flock.start,
2839                                  lock->l_policy_data.l_flock.end,
2840                                  lock->l_flags, nid,
2841                                  lock->l_remote_handle.cookie,
2842                                  exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
2843                                  lock->l_pid, lock->l_callback_timeout);
2844                 break;
2845
2846         case LDLM_IBITS:
2847                 libcfs_debug_msg(msgdata,
2848                                  "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx/%#llx rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
2849                                  &vaf,
2850                                  ldlm_lock_to_ns_name(lock),
2851                                  lock, lock->l_handle.h_cookie,
2852                                  refcount_read(&lock->l_handle.h_ref),
2853                                  lock->l_readers, lock->l_writers,
2854                                  ldlm_lockname[lock->l_granted_mode],
2855                                  ldlm_lockname[lock->l_req_mode],
2856                                  PLDLMRES(resource),
2857                                  lock->l_policy_data.l_inodebits.bits,
2858                                  lock->l_policy_data.l_inodebits.try_bits,
2859                                  atomic_read(&resource->lr_refcount),
2860                                  ldlm_typename[resource->lr_type],
2861                                  lock->l_flags, nid,
2862                                  lock->l_remote_handle.cookie,
2863                                  exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
2864                                  lock->l_pid, lock->l_callback_timeout,
2865                                  lock->l_lvb_type);
2866                 break;
2867
2868         default:
2869                 libcfs_debug_msg(msgdata,
2870                                  "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
2871                                  &vaf,
2872                                  ldlm_lock_to_ns_name(lock),
2873                                  lock, lock->l_handle.h_cookie,
2874                                  refcount_read(&lock->l_handle.h_ref),
2875                                  lock->l_readers, lock->l_writers,
2876                                  ldlm_lockname[lock->l_granted_mode],
2877                                  ldlm_lockname[lock->l_req_mode],
2878                                  PLDLMRES(resource),
2879                                  atomic_read(&resource->lr_refcount),
2880                                  ldlm_typename[resource->lr_type],
2881                                  lock->l_flags, nid,
2882                                  lock->l_remote_handle.cookie,
2883                                  exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
2884                                  lock->l_pid, lock->l_callback_timeout,
2885                                  lock->l_lvb_type);
2886                 break;
2887         }
2888         va_end(args);
2889         ldlm_resource_putref(resource);
2890 }
2891 EXPORT_SYMBOL(_ldlm_lock_debug);