Whamcloud - gitweb
LU-8378 obd: move s3 in lmd_parse to inner loop
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_lock.c
33  *
34  * Author: Peter Braam <braam@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LDLM
39
40 #include <libcfs/libcfs.h>
41
42 #include <lustre_swab.h>
43 #include <obd_class.h>
44
45 #include "ldlm_internal.h"
46
47 /* lock types */
48 char *ldlm_lockname[] = {
49         [0] = "--",
50         [LCK_EX] = "EX",
51         [LCK_PW] = "PW",
52         [LCK_PR] = "PR",
53         [LCK_CW] = "CW",
54         [LCK_CR] = "CR",
55         [LCK_NL] = "NL",
56         [LCK_GROUP] = "GROUP",
57         [LCK_COS] = "COS"
58 };
59 EXPORT_SYMBOL(ldlm_lockname);
60
61 char *ldlm_typename[] = {
62         [LDLM_PLAIN] = "PLN",
63         [LDLM_EXTENT] = "EXT",
64         [LDLM_FLOCK] = "FLK",
65         [LDLM_IBITS] = "IBT",
66 };
67
68 static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
69         [LDLM_PLAIN - LDLM_MIN_TYPE]  = ldlm_plain_policy_wire_to_local,
70         [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
71         [LDLM_FLOCK - LDLM_MIN_TYPE]  = ldlm_flock_policy_wire_to_local,
72         [LDLM_IBITS - LDLM_MIN_TYPE]  = ldlm_ibits_policy_wire_to_local,
73 };
74
75 static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
76         [LDLM_PLAIN - LDLM_MIN_TYPE]  = ldlm_plain_policy_local_to_wire,
77         [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire,
78         [LDLM_FLOCK - LDLM_MIN_TYPE]  = ldlm_flock_policy_local_to_wire,
79         [LDLM_IBITS - LDLM_MIN_TYPE]  = ldlm_ibits_policy_local_to_wire,
80 };
81
82 /**
83  * Converts lock policy from local format to on the wire lock_desc format
84  */
85 void ldlm_convert_policy_to_wire(enum ldlm_type type,
86                                  const union ldlm_policy_data *lpolicy,
87                                  union ldlm_wire_policy_data *wpolicy)
88 {
89         ldlm_policy_local_to_wire_t convert;
90
91         convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
92
93         convert(lpolicy, wpolicy);
94 }
95
96 /**
97  * Converts lock policy from on the wire lock_desc format to local format
98  */
99 void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
100                                   const union ldlm_wire_policy_data *wpolicy,
101                                   union ldlm_policy_data *lpolicy)
102 {
103         ldlm_policy_wire_to_local_t convert;
104
105         convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
106
107         convert(wpolicy, lpolicy);
108 }
109
110 const char *ldlm_it2str(enum ldlm_intent_flags it)
111 {
112         switch (it) {
113         case IT_OPEN:
114                 return "open";
115         case IT_CREAT:
116                 return "creat";
117         case (IT_OPEN | IT_CREAT):
118                 return "open|creat";
119         case IT_READDIR:
120                 return "readdir";
121         case IT_GETATTR:
122                 return "getattr";
123         case IT_LOOKUP:
124                 return "lookup";
125         case IT_UNLINK:
126                 return "unlink";
127         case IT_GETXATTR:
128                 return "getxattr";
129         case IT_LAYOUT:
130                 return "layout";
131         default:
132                 CERROR("Unknown intent 0x%08x\n", it);
133                 return "UNKNOWN";
134         }
135 }
136 EXPORT_SYMBOL(ldlm_it2str);
137
138 extern struct kmem_cache *ldlm_lock_slab;
139
140 #ifdef HAVE_SERVER_SUPPORT
141 static ldlm_processing_policy ldlm_processing_policy_table[] = {
142         [LDLM_PLAIN]    = ldlm_process_plain_lock,
143         [LDLM_EXTENT]   = ldlm_process_extent_lock,
144         [LDLM_FLOCK]    = ldlm_process_flock_lock,
145         [LDLM_IBITS]    = ldlm_process_inodebits_lock,
146 };
147
148 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
149 {
150         return ldlm_processing_policy_table[res->lr_type];
151 }
152 EXPORT_SYMBOL(ldlm_get_processing_policy);
153 #endif /* HAVE_SERVER_SUPPORT */
154
155 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
156 {
157         ns->ns_policy = arg;
158 }
159 EXPORT_SYMBOL(ldlm_register_intent);
160
161 /*
162  * REFCOUNTED LOCK OBJECTS
163  */
164
165
166 /**
167  * Get a reference on a lock.
168  *
169  * Lock refcounts, during creation:
170  *   - one special one for allocation, dec'd only once in destroy
171  *   - one for being a lock that's in-use
172  *   - one for the addref associated with a new lock
173  */
174 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
175 {
176         atomic_inc(&lock->l_refc);
177         return lock;
178 }
179 EXPORT_SYMBOL(ldlm_lock_get);
180
181 /**
182  * Release lock reference.
183  *
184  * Also frees the lock if it was last reference.
185  */
186 void ldlm_lock_put(struct ldlm_lock *lock)
187 {
188         ENTRY;
189
190         LASSERT(lock->l_resource != LP_POISON);
191         LASSERT(atomic_read(&lock->l_refc) > 0);
192         if (atomic_dec_and_test(&lock->l_refc)) {
193                 struct ldlm_resource *res;
194
195                 LDLM_DEBUG(lock,
196                            "final lock_put on destroyed lock, freeing it.");
197
198                 res = lock->l_resource;
199                 LASSERT(ldlm_is_destroyed(lock));
200                 LASSERT(list_empty(&lock->l_exp_list));
201                 LASSERT(list_empty(&lock->l_res_link));
202                 LASSERT(list_empty(&lock->l_pending_chain));
203
204                 lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
205                                      LDLM_NSS_LOCKS);
206                 lu_ref_del(&res->lr_reference, "lock", lock);
207                 ldlm_resource_putref(res);
208                 lock->l_resource = NULL;
209                 if (lock->l_export) {
210                         class_export_lock_put(lock->l_export, lock);
211                         lock->l_export = NULL;
212                 }
213
214                 if (lock->l_lvb_data != NULL)
215                         OBD_FREE_LARGE(lock->l_lvb_data, lock->l_lvb_len);
216
217                 ldlm_interval_free(ldlm_interval_detach(lock));
218                 lu_ref_fini(&lock->l_reference);
219                 OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle);
220         }
221
222         EXIT;
223 }
224 EXPORT_SYMBOL(ldlm_lock_put);
225
226 /**
227  * Removes LDLM lock \a lock from LRU. Assumes LRU is already locked.
228  */
229 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
230 {
231         int rc = 0;
232         if (!list_empty(&lock->l_lru)) {
233                 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
234
235                 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
236                 list_del_init(&lock->l_lru);
237                 LASSERT(ns->ns_nr_unused > 0);
238                 ns->ns_nr_unused--;
239                 rc = 1;
240         }
241         return rc;
242 }
243
244 /**
245  * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
246  *
247  * If \a last_use is non-zero, it will remove the lock from LRU only if
248  * it matches lock's l_last_used.
249  *
250  * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
251  *           doesn't match lock's l_last_used;
252  *           otherwise, the lock hasn't been in the LRU list.
253  * \retval 1 the lock was in LRU list and removed.
254  */
255 int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, cfs_time_t last_use)
256 {
257         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
258         int rc = 0;
259
260         ENTRY;
261         if (ldlm_is_ns_srv(lock)) {
262                 LASSERT(list_empty(&lock->l_lru));
263                 RETURN(0);
264         }
265
266         spin_lock(&ns->ns_lock);
267         if (last_use == 0 || last_use == lock->l_last_used)
268                 rc = ldlm_lock_remove_from_lru_nolock(lock);
269         spin_unlock(&ns->ns_lock);
270
271         RETURN(rc);
272 }
273
274 /**
275  * Adds LDLM lock \a lock to namespace LRU. Assumes LRU is already locked.
276  */
277 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
278 {
279         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
280
281         lock->l_last_used = cfs_time_current();
282         LASSERT(list_empty(&lock->l_lru));
283         LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
284         list_add_tail(&lock->l_lru, &ns->ns_unused_list);
285         ldlm_clear_skipped(lock);
286         LASSERT(ns->ns_nr_unused >= 0);
287         ns->ns_nr_unused++;
288 }
289
290 /**
291  * Adds LDLM lock \a lock to namespace LRU. Obtains necessary LRU locks
292  * first.
293  */
294 void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
295 {
296         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
297
298         ENTRY;
299         spin_lock(&ns->ns_lock);
300         ldlm_lock_add_to_lru_nolock(lock);
301         spin_unlock(&ns->ns_lock);
302         EXIT;
303 }
304
305 /**
306  * Moves LDLM lock \a lock that is already in namespace LRU to the tail of
307  * the LRU. Performs necessary LRU locking
308  */
309 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
310 {
311         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
312
313         ENTRY;
314         if (ldlm_is_ns_srv(lock)) {
315                 LASSERT(list_empty(&lock->l_lru));
316                 EXIT;
317                 return;
318         }
319
320         spin_lock(&ns->ns_lock);
321         if (!list_empty(&lock->l_lru)) {
322                 ldlm_lock_remove_from_lru_nolock(lock);
323                 ldlm_lock_add_to_lru_nolock(lock);
324         }
325         spin_unlock(&ns->ns_lock);
326         EXIT;
327 }
328
329 /**
330  * Helper to destroy a locked lock.
331  *
332  * Used by ldlm_lock_destroy and ldlm_lock_destroy_nolock
333  * Must be called with l_lock and lr_lock held.
334  *
335  * Does not actually free the lock data, but rather marks the lock as
336  * destroyed by setting l_destroyed field in the lock to 1.  Destroys a
337  * handle->lock association too, so that the lock can no longer be found
338  * and removes the lock from LRU list.  Actual lock freeing occurs when
339  * last lock reference goes away.
340  *
341  * Original comment (of some historical value):
342  * This used to have a 'strict' flag, which recovery would use to mark an
343  * in-use lock as needing-to-die.  Lest I am ever tempted to put it back, I
344  * shall explain why it's gone: with the new hash table scheme, once you call
345  * ldlm_lock_destroy, you can never drop your final references on this lock.
346  * Because it's not in the hash table anymore.  -phil
347  */
348 static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
349 {
350         ENTRY;
351
352         if (lock->l_readers || lock->l_writers) {
353                 LDLM_ERROR(lock, "lock still has references");
354                 LBUG();
355         }
356
357         if (!list_empty(&lock->l_res_link)) {
358                 LDLM_ERROR(lock, "lock still on resource");
359                 LBUG();
360         }
361
362         if (ldlm_is_destroyed(lock)) {
363                 LASSERT(list_empty(&lock->l_lru));
364                 EXIT;
365                 return 0;
366         }
367         ldlm_set_destroyed(lock);
368
369         if (lock->l_export && lock->l_export->exp_lock_hash) {
370                 /* NB: it's safe to call cfs_hash_del() even lock isn't
371                  * in exp_lock_hash. */
372                 /* In the function below, .hs_keycmp resolves to
373                  * ldlm_export_lock_keycmp() */
374                 /* coverity[overrun-buffer-val] */
375                 cfs_hash_del(lock->l_export->exp_lock_hash,
376                              &lock->l_remote_handle, &lock->l_exp_hash);
377         }
378
379         ldlm_lock_remove_from_lru(lock);
380         class_handle_unhash(&lock->l_handle);
381
382         EXIT;
383         return 1;
384 }
385
386 /**
387  * Destroys a LDLM lock \a lock. Performs necessary locking first.
388  */
389 void ldlm_lock_destroy(struct ldlm_lock *lock)
390 {
391         int first;
392         ENTRY;
393         lock_res_and_lock(lock);
394         first = ldlm_lock_destroy_internal(lock);
395         unlock_res_and_lock(lock);
396
397         /* drop reference from hashtable only for first destroy */
398         if (first) {
399                 lu_ref_del(&lock->l_reference, "hash", lock);
400                 LDLM_LOCK_RELEASE(lock);
401         }
402         EXIT;
403 }
404
405 /**
406  * Destroys a LDLM lock \a lock that is already locked.
407  */
408 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
409 {
410         int first;
411         ENTRY;
412         first = ldlm_lock_destroy_internal(lock);
413         /* drop reference from hashtable only for first destroy */
414         if (first) {
415                 lu_ref_del(&lock->l_reference, "hash", lock);
416                 LDLM_LOCK_RELEASE(lock);
417         }
418         EXIT;
419 }
420
421 /* this is called by portals_handle2object with the handle lock taken */
422 static void lock_handle_addref(void *lock)
423 {
424         LDLM_LOCK_GET((struct ldlm_lock *)lock);
425 }
426
427 static void lock_handle_free(void *lock, int size)
428 {
429         LASSERT(size == sizeof(struct ldlm_lock));
430         OBD_SLAB_FREE(lock, ldlm_lock_slab, size);
431 }
432
433 static struct portals_handle_ops lock_handle_ops = {
434         .hop_addref = lock_handle_addref,
435         .hop_free   = lock_handle_free,
436 };
437
438 /**
439  *
440  * Allocate and initialize new lock structure.
441  *
442  * usage: pass in a resource on which you have done ldlm_resource_get
443  *        new lock will take over the refcount.
444  * returns: lock with refcount 2 - one for current caller and one for remote
445  */
446 static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
447 {
448         struct ldlm_lock *lock;
449         ENTRY;
450
451         if (resource == NULL)
452                 LBUG();
453
454         OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, GFP_NOFS);
455         if (lock == NULL)
456                 RETURN(NULL);
457
458         spin_lock_init(&lock->l_lock);
459         lock->l_resource = resource;
460         lu_ref_add(&resource->lr_reference, "lock", lock);
461
462         atomic_set(&lock->l_refc, 2);
463         INIT_LIST_HEAD(&lock->l_res_link);
464         INIT_LIST_HEAD(&lock->l_lru);
465         INIT_LIST_HEAD(&lock->l_pending_chain);
466         INIT_LIST_HEAD(&lock->l_bl_ast);
467         INIT_LIST_HEAD(&lock->l_cp_ast);
468         INIT_LIST_HEAD(&lock->l_rk_ast);
469         init_waitqueue_head(&lock->l_waitq);
470         lock->l_blocking_lock = NULL;
471         INIT_LIST_HEAD(&lock->l_sl_mode);
472         INIT_LIST_HEAD(&lock->l_sl_policy);
473         INIT_HLIST_NODE(&lock->l_exp_hash);
474         INIT_HLIST_NODE(&lock->l_exp_flock_hash);
475
476         lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
477                              LDLM_NSS_LOCKS);
478         INIT_LIST_HEAD(&lock->l_handle.h_link);
479         class_handle_hash(&lock->l_handle, &lock_handle_ops);
480
481         lu_ref_init(&lock->l_reference);
482         lu_ref_add(&lock->l_reference, "hash", lock);
483         lock->l_callback_timeout = 0;
484
485 #if LUSTRE_TRACKS_LOCK_EXP_REFS
486         INIT_LIST_HEAD(&lock->l_exp_refs_link);
487         lock->l_exp_refs_nr = 0;
488         lock->l_exp_refs_target = NULL;
489 #endif
490         INIT_LIST_HEAD(&lock->l_exp_list);
491
492         RETURN(lock);
493 }
494
495 /**
496  * Moves LDLM lock \a lock to another resource.
497  * This is used on client when server returns some other lock than requested
498  * (typically as a result of intent operation)
499  */
500 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
501                               const struct ldlm_res_id *new_resid)
502 {
503         struct ldlm_resource *oldres = lock->l_resource;
504         struct ldlm_resource *newres;
505         int type;
506         ENTRY;
507
508         LASSERT(ns_is_client(ns));
509
510         lock_res_and_lock(lock);
511         if (memcmp(new_resid, &lock->l_resource->lr_name,
512                    sizeof(lock->l_resource->lr_name)) == 0) {
513                 /* Nothing to do */
514                 unlock_res_and_lock(lock);
515                 RETURN(0);
516         }
517
518         LASSERT(new_resid->name[0] != 0);
519
520         /* This function assumes that the lock isn't on any lists */
521         LASSERT(list_empty(&lock->l_res_link));
522
523         type = oldres->lr_type;
524         unlock_res_and_lock(lock);
525
526         newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
527         if (IS_ERR(newres))
528                 RETURN(PTR_ERR(newres));
529
530         lu_ref_add(&newres->lr_reference, "lock", lock);
531         /*
532          * To flip the lock from the old to the new resource, lock, oldres and
533          * newres have to be locked. Resource spin-locks are nested within
534          * lock->l_lock, and are taken in the memory address order to avoid
535          * dead-locks.
536          */
537         spin_lock(&lock->l_lock);
538         oldres = lock->l_resource;
539         if (oldres < newres) {
540                 lock_res(oldres);
541                 lock_res_nested(newres, LRT_NEW);
542         } else {
543                 lock_res(newres);
544                 lock_res_nested(oldres, LRT_NEW);
545         }
546         LASSERT(memcmp(new_resid, &oldres->lr_name,
547                        sizeof oldres->lr_name) != 0);
548         lock->l_resource = newres;
549         unlock_res(oldres);
550         unlock_res_and_lock(lock);
551
552         /* ...and the flowers are still standing! */
553         lu_ref_del(&oldres->lr_reference, "lock", lock);
554         ldlm_resource_putref(oldres);
555
556         RETURN(0);
557 }
558
559 /** \defgroup ldlm_handles LDLM HANDLES
560  * Ways to get hold of locks without any addresses.
561  * @{
562  */
563
564 /**
565  * Fills in handle for LDLM lock \a lock into supplied \a lockh
566  * Does not take any references.
567  */
568 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
569 {
570         lockh->cookie = lock->l_handle.h_cookie;
571 }
572 EXPORT_SYMBOL(ldlm_lock2handle);
573
574 /**
575  * Obtain a lock reference by handle.
576  *
577  * if \a flags: atomically get the lock and set the flags.
578  *              Return NULL if flag already set
579  */
580 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
581                                      __u64 flags)
582 {
583         struct ldlm_lock *lock;
584         ENTRY;
585
586         LASSERT(handle);
587
588         lock = class_handle2object(handle->cookie, NULL);
589         if (lock == NULL)
590                 RETURN(NULL);
591
592         if (lock->l_export != NULL && lock->l_export->exp_failed) {
593                 CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
594                        lock, lock->l_export);
595                 LDLM_LOCK_PUT(lock);
596                 RETURN(NULL);
597         }
598
599         /* It's unlikely but possible that someone marked the lock as
600          * destroyed after we did handle2object on it */
601         if ((flags == 0) && !ldlm_is_destroyed(lock)) {
602                 lu_ref_add(&lock->l_reference, "handle", current);
603                 RETURN(lock);
604         }
605
606         lock_res_and_lock(lock);
607
608         LASSERT(lock->l_resource != NULL);
609
610         lu_ref_add_atomic(&lock->l_reference, "handle", current);
611         if (unlikely(ldlm_is_destroyed(lock))) {
612                 unlock_res_and_lock(lock);
613                 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
614                 LDLM_LOCK_PUT(lock);
615                 RETURN(NULL);
616         }
617
618         /* If we're setting flags, make sure none of them are already set. */
619         if (flags != 0) {
620                 if ((lock->l_flags & flags) != 0) {
621                         unlock_res_and_lock(lock);
622                         LDLM_LOCK_PUT(lock);
623                         RETURN(NULL);
624                 }
625
626                 lock->l_flags |= flags;
627         }
628
629         unlock_res_and_lock(lock);
630         RETURN(lock);
631 }
632 EXPORT_SYMBOL(__ldlm_handle2lock);
633 /** @} ldlm_handles */
634
635 /**
636  * Fill in "on the wire" representation for given LDLM lock into supplied
637  * lock descriptor \a desc structure.
638  */
639 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
640 {
641         ldlm_res2desc(lock->l_resource, &desc->l_resource);
642         desc->l_req_mode = lock->l_req_mode;
643         desc->l_granted_mode = lock->l_granted_mode;
644         ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
645                                     &lock->l_policy_data,
646                                     &desc->l_policy_data);
647 }
648
649 /**
650  * Add a lock to list of conflicting locks to send AST to.
651  *
652  * Only add if we have not sent a blocking AST to the lock yet.
653  */
654 static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
655                                   struct list_head *work_list)
656 {
657         if (!ldlm_is_ast_sent(lock)) {
658                 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
659                 ldlm_set_ast_sent(lock);
660                 /* If the enqueuing client said so, tell the AST recipient to
661                  * discard dirty data, rather than writing back. */
662                 if (ldlm_is_ast_discard_data(new))
663                         ldlm_set_discard_data(lock);
664                 LASSERT(list_empty(&lock->l_bl_ast));
665                 list_add(&lock->l_bl_ast, work_list);
666                 LDLM_LOCK_GET(lock);
667                 LASSERT(lock->l_blocking_lock == NULL);
668                 lock->l_blocking_lock = LDLM_LOCK_GET(new);
669         }
670 }
671
672 /**
673  * Add a lock to list of just granted locks to send completion AST to.
674  */
675 static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
676                                   struct list_head *work_list)
677 {
678         if (!ldlm_is_cp_reqd(lock)) {
679                 ldlm_set_cp_reqd(lock);
680                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
681                 LASSERT(list_empty(&lock->l_cp_ast));
682                 list_add(&lock->l_cp_ast, work_list);
683                 LDLM_LOCK_GET(lock);
684         }
685 }
686
687 /**
688  * Aggregator function to add AST work items into a list. Determines
689  * what sort of an AST work needs to be done and calls the proper
690  * adding function.
691  * Must be called with lr_lock held.
692  */
693 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
694                             struct list_head *work_list)
695 {
696         ENTRY;
697         check_res_locked(lock->l_resource);
698         if (new)
699                 ldlm_add_bl_work_item(lock, new, work_list);
700         else
701                 ldlm_add_cp_work_item(lock, work_list);
702         EXIT;
703 }
704
705 /**
706  * Add specified reader/writer reference to LDLM lock with handle \a lockh.
707  * r/w reference type is determined by \a mode
708  * Calls ldlm_lock_addref_internal.
709  */
710 void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode)
711 {
712         struct ldlm_lock *lock;
713
714         lock = ldlm_handle2lock(lockh);
715         LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
716         ldlm_lock_addref_internal(lock, mode);
717         LDLM_LOCK_PUT(lock);
718 }
719 EXPORT_SYMBOL(ldlm_lock_addref);
720
721 /**
722  * Helper function.
723  * Add specified reader/writer reference to LDLM lock \a lock.
724  * r/w reference type is determined by \a mode
725  * Removes lock from LRU if it is there.
726  * Assumes the LDLM lock is already locked.
727  */
728 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
729                                       enum ldlm_mode mode)
730 {
731         ldlm_lock_remove_from_lru(lock);
732         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
733                 lock->l_readers++;
734                 lu_ref_add_atomic(&lock->l_reference, "reader", lock);
735         }
736         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
737                 lock->l_writers++;
738                 lu_ref_add_atomic(&lock->l_reference, "writer", lock);
739         }
740         LDLM_LOCK_GET(lock);
741         lu_ref_add_atomic(&lock->l_reference, "user", lock);
742         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
743 }
744
745 /**
746  * Attempts to add reader/writer reference to a lock with handle \a lockh, and
747  * fails if lock is already LDLM_FL_CBPENDING or destroyed.
748  *
749  * \retval 0 success, lock was addref-ed
750  *
751  * \retval -EAGAIN lock is being canceled.
752  */
753 int ldlm_lock_addref_try(const struct lustre_handle *lockh, enum ldlm_mode mode)
754 {
755         struct ldlm_lock *lock;
756         int               result;
757
758         result = -EAGAIN;
759         lock = ldlm_handle2lock(lockh);
760         if (lock != NULL) {
761                 lock_res_and_lock(lock);
762                 if (lock->l_readers != 0 || lock->l_writers != 0 ||
763                     !ldlm_is_cbpending(lock)) {
764                         ldlm_lock_addref_internal_nolock(lock, mode);
765                         result = 0;
766                 }
767                 unlock_res_and_lock(lock);
768                 LDLM_LOCK_PUT(lock);
769         }
770         return result;
771 }
772 EXPORT_SYMBOL(ldlm_lock_addref_try);
773
774 /**
775  * Add specified reader/writer reference to LDLM lock \a lock.
776  * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
777  * Only called for local locks.
778  */
779 void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
780 {
781         lock_res_and_lock(lock);
782         ldlm_lock_addref_internal_nolock(lock, mode);
783         unlock_res_and_lock(lock);
784 }
785
786 /**
787  * Removes reader/writer reference for LDLM lock \a lock.
788  * Assumes LDLM lock is already locked.
789  * only called in ldlm_flock_destroy and for local locks.
790  * Does NOT add lock to LRU if no r/w references left to accomodate flock locks
791  * that cannot be placed in LRU.
792  */
793 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
794                                       enum ldlm_mode mode)
795 {
796         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
797         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
798                 LASSERT(lock->l_readers > 0);
799                 lu_ref_del(&lock->l_reference, "reader", lock);
800                 lock->l_readers--;
801         }
802         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
803                 LASSERT(lock->l_writers > 0);
804                 lu_ref_del(&lock->l_reference, "writer", lock);
805                 lock->l_writers--;
806         }
807
808         lu_ref_del(&lock->l_reference, "user", lock);
809         LDLM_LOCK_RELEASE(lock);    /* matches the LDLM_LOCK_GET() in addref */
810 }
811
812 /**
813  * Removes reader/writer reference for LDLM lock \a lock.
814  * Locks LDLM lock first.
815  * If the lock is determined to be client lock on a client and r/w refcount
816  * drops to zero and the lock is not blocked, the lock is added to LRU lock
817  * on the namespace.
818  * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
819  */
820 void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
821 {
822         struct ldlm_namespace *ns;
823         ENTRY;
824
825         lock_res_and_lock(lock);
826
827         ns = ldlm_lock_to_ns(lock);
828
829         ldlm_lock_decref_internal_nolock(lock, mode);
830
831         if ((ldlm_is_local(lock) || lock->l_req_mode == LCK_GROUP) &&
832             !lock->l_readers && !lock->l_writers) {
833                 /* If this is a local lock on a server namespace and this was
834                  * the last reference, cancel the lock.
835                  *
836                  * Group locks are special:
837                  * They must not go in LRU, but they are not called back
838                  * like non-group locks, instead they are manually released.
839                  * They have an l_writers reference which they keep until
840                  * they are manually released, so we remove them when they have
841                  * no more reader or writer references. - LU-6368 */
842                 ldlm_set_cbpending(lock);
843         }
844
845         if (!lock->l_readers && !lock->l_writers && ldlm_is_cbpending(lock)) {
846                 /* If we received a blocked AST and this was the last reference,
847                  * run the callback. */
848                 if (ldlm_is_ns_srv(lock) && lock->l_export)
849                         CERROR("FL_CBPENDING set on non-local lock--just a "
850                                "warning\n");
851
852                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
853
854                 LDLM_LOCK_GET(lock); /* dropped by bl thread */
855                 ldlm_lock_remove_from_lru(lock);
856                 unlock_res_and_lock(lock);
857
858                 if (ldlm_is_fail_loc(lock))
859                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
860
861                 if (ldlm_is_atomic_cb(lock) ||
862                     ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
863                         ldlm_handle_bl_callback(ns, NULL, lock);
864         } else if (ns_is_client(ns) &&
865                    !lock->l_readers && !lock->l_writers &&
866                    !ldlm_is_no_lru(lock) &&
867                    !ldlm_is_bl_ast(lock)) {
868
869                 LDLM_DEBUG(lock, "add lock into lru list");
870
871                 /* If this is a client-side namespace and this was the last
872                  * reference, put it on the LRU. */
873                 ldlm_lock_add_to_lru(lock);
874                 unlock_res_and_lock(lock);
875
876                 if (ldlm_is_fail_loc(lock))
877                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
878
879                 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
880                  * are not supported by the server, otherwise, it is done on
881                  * enqueue. */
882                 if (!exp_connect_cancelset(lock->l_conn_export) &&
883                     !ns_connect_lru_resize(ns))
884                         ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
885         } else {
886                 LDLM_DEBUG(lock, "do not add lock into lru list");
887                 unlock_res_and_lock(lock);
888         }
889
890         EXIT;
891 }
892
893 /**
894  * Decrease reader/writer refcount for LDLM lock with handle \a lockh
895  */
896 void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode)
897 {
898         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
899         LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
900         ldlm_lock_decref_internal(lock, mode);
901         LDLM_LOCK_PUT(lock);
902 }
903 EXPORT_SYMBOL(ldlm_lock_decref);
904
905 /**
906  * Decrease reader/writer refcount for LDLM lock with handle
907  * \a lockh and mark it for subsequent cancellation once r/w refcount
908  * drops to zero instead of putting into LRU.
909  *
910  */
911 void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
912                                  enum ldlm_mode mode)
913 {
914         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
915         ENTRY;
916
917         LASSERT(lock != NULL);
918
919         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
920         lock_res_and_lock(lock);
921         ldlm_set_cbpending(lock);
922         unlock_res_and_lock(lock);
923         ldlm_lock_decref_internal(lock, mode);
924         LDLM_LOCK_PUT(lock);
925 }
926 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
927
928 struct sl_insert_point {
929         struct list_head *res_link;
930         struct list_head *mode_link;
931         struct list_head *policy_link;
932 };
933
934 /**
935  * Finds a position to insert the new lock into granted lock list.
936  *
937  * Used for locks eligible for skiplist optimization.
938  *
939  * Parameters:
940  *      queue [input]:  the granted list where search acts on;
941  *      req [input]:    the lock whose position to be located;
942  *      prev [output]:  positions within 3 lists to insert @req to
943  * Return Value:
944  *      filled @prev
945  * NOTE: called by
946  *  - ldlm_grant_lock_with_skiplist
947  */
948 static void search_granted_lock(struct list_head *queue,
949                                 struct ldlm_lock *req,
950                                 struct sl_insert_point *prev)
951 {
952         struct list_head *tmp;
953         struct ldlm_lock *lock, *mode_end, *policy_end;
954         ENTRY;
955
956         list_for_each(tmp, queue) {
957                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
958
959                 mode_end = list_entry(lock->l_sl_mode.prev,
960                                           struct ldlm_lock, l_sl_mode);
961
962                 if (lock->l_req_mode != req->l_req_mode) {
963                         /* jump to last lock of mode group */
964                         tmp = &mode_end->l_res_link;
965                         continue;
966                 }
967
968                 /* suitable mode group is found */
969                 if (lock->l_resource->lr_type == LDLM_PLAIN) {
970                         /* insert point is last lock of the mode group */
971                         prev->res_link = &mode_end->l_res_link;
972                         prev->mode_link = &mode_end->l_sl_mode;
973                         prev->policy_link = &req->l_sl_policy;
974                         EXIT;
975                         return;
976                 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
977                         for (;;) {
978                                 policy_end =
979                                         list_entry(lock->l_sl_policy.prev,
980                                                        struct ldlm_lock,
981                                                        l_sl_policy);
982
983                                 if (lock->l_policy_data.l_inodebits.bits ==
984                                     req->l_policy_data.l_inodebits.bits) {
985                                         /* insert point is last lock of
986                                          * the policy group */
987                                         prev->res_link =
988                                                 &policy_end->l_res_link;
989                                         prev->mode_link =
990                                                 &policy_end->l_sl_mode;
991                                         prev->policy_link =
992                                                 &policy_end->l_sl_policy;
993                                         EXIT;
994                                         return;
995                                 }
996
997                                 if (policy_end == mode_end)
998                                         /* done with mode group */
999                                         break;
1000
1001                                 /* go to next policy group within mode group */
1002                                 tmp = policy_end->l_res_link.next;
1003                                 lock = list_entry(tmp, struct ldlm_lock,
1004                                                       l_res_link);
1005                         }  /* loop over policy groups within the mode group */
1006
1007                         /* insert point is last lock of the mode group,
1008                          * new policy group is started */
1009                         prev->res_link = &mode_end->l_res_link;
1010                         prev->mode_link = &mode_end->l_sl_mode;
1011                         prev->policy_link = &req->l_sl_policy;
1012                         EXIT;
1013                         return;
1014                 } else {
1015                         LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock");
1016                         LBUG();
1017                 }
1018         }
1019
1020         /* insert point is last lock on the queue,
1021          * new mode group and new policy group are started */
1022         prev->res_link = queue->prev;
1023         prev->mode_link = &req->l_sl_mode;
1024         prev->policy_link = &req->l_sl_policy;
1025         EXIT;
1026         return;
1027 }
1028
1029 /**
1030  * Add a lock into resource granted list after a position described by
1031  * \a prev.
1032  */
1033 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
1034                                        struct sl_insert_point *prev)
1035 {
1036         struct ldlm_resource *res = lock->l_resource;
1037         ENTRY;
1038
1039         check_res_locked(res);
1040
1041         ldlm_resource_dump(D_INFO, res);
1042         LDLM_DEBUG(lock, "About to add lock:");
1043
1044         if (ldlm_is_destroyed(lock)) {
1045                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1046                 return;
1047         }
1048
1049         LASSERT(list_empty(&lock->l_res_link));
1050         LASSERT(list_empty(&lock->l_sl_mode));
1051         LASSERT(list_empty(&lock->l_sl_policy));
1052
1053         /*
1054          * lock->link == prev->link means lock is first starting the group.
1055          * Don't re-add to itself to suppress kernel warnings.
1056          */
1057         if (&lock->l_res_link != prev->res_link)
1058                 list_add(&lock->l_res_link, prev->res_link);
1059         if (&lock->l_sl_mode != prev->mode_link)
1060                 list_add(&lock->l_sl_mode, prev->mode_link);
1061         if (&lock->l_sl_policy != prev->policy_link)
1062                 list_add(&lock->l_sl_policy, prev->policy_link);
1063
1064         EXIT;
1065 }
1066
1067 /**
1068  * Add a lock to granted list on a resource maintaining skiplist
1069  * correctness.
1070  */
1071 static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
1072 {
1073         struct sl_insert_point prev;
1074         ENTRY;
1075
1076         LASSERT(lock->l_req_mode == lock->l_granted_mode);
1077
1078         search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
1079         ldlm_granted_list_add_lock(lock, &prev);
1080         EXIT;
1081 }
1082
1083 /**
1084  * Perform lock granting bookkeeping.
1085  *
1086  * Includes putting the lock into granted list and updating lock mode.
1087  * NOTE: called by
1088  *  - ldlm_lock_enqueue
1089  *  - ldlm_reprocess_queue
1090  *  - ldlm_lock_convert
1091  *
1092  * must be called with lr_lock held
1093  */
1094 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
1095 {
1096         struct ldlm_resource *res = lock->l_resource;
1097         ENTRY;
1098
1099         check_res_locked(res);
1100
1101         lock->l_granted_mode = lock->l_req_mode;
1102
1103         if (work_list && lock->l_completion_ast != NULL)
1104                 ldlm_add_ast_work_item(lock, NULL, work_list);
1105
1106         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
1107                 ldlm_grant_lock_with_skiplist(lock);
1108         else if (res->lr_type == LDLM_EXTENT)
1109                 ldlm_extent_add_lock(res, lock);
1110         else if (res->lr_type == LDLM_FLOCK) {
1111                 /* We should not add locks to granted list in the following
1112                  * cases:
1113                  * - this is an UNLOCK but not a real lock;
1114                  * - this is a TEST lock;
1115                  * - this is a F_CANCELLK lock (async flock has req_mode == 0)
1116                  * - this is a deadlock (flock cannot be granted) */
1117                 if (lock->l_req_mode == 0 ||
1118                     lock->l_req_mode == LCK_NL ||
1119                     ldlm_is_test_lock(lock) ||
1120                     ldlm_is_flock_deadlock(lock))
1121                         RETURN_EXIT;
1122                 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1123         } else {
1124                 LBUG();
1125         }
1126
1127         ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
1128         EXIT;
1129 }
1130
1131 /**
1132  * Describe the overlap between two locks.  itree_overlap_cb data.
1133  */
1134 struct lock_match_data {
1135         struct ldlm_lock        *lmd_old;
1136         struct ldlm_lock        *lmd_lock;
1137         enum ldlm_mode          *lmd_mode;
1138         union ldlm_policy_data  *lmd_policy;
1139         __u64                    lmd_flags;
1140         int                      lmd_unref;
1141 };
1142
1143 /**
1144  * Check if the given @lock meets the criteria for a match.
1145  * A reference on the lock is taken if matched.
1146  *
1147  * \param lock     test-against this lock
1148  * \param data     parameters
1149  */
1150 static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
1151 {
1152         union ldlm_policy_data *lpol = &lock->l_policy_data;
1153         enum ldlm_mode match;
1154
1155         if (lock == data->lmd_old)
1156                 return INTERVAL_ITER_STOP;
1157
1158         /* Check if this lock can be matched.
1159          * Used by LU-2919(exclusive open) for open lease lock */
1160         if (ldlm_is_excl(lock))
1161                 return INTERVAL_ITER_CONT;
1162
1163         /* llite sometimes wants to match locks that will be
1164          * canceled when their users drop, but we allow it to match
1165          * if it passes in CBPENDING and the lock still has users.
1166          * this is generally only going to be used by children
1167          * whose parents already hold a lock so forward progress
1168          * can still happen. */
1169         if (ldlm_is_cbpending(lock) &&
1170             !(data->lmd_flags & LDLM_FL_CBPENDING))
1171                 return INTERVAL_ITER_CONT;
1172         if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
1173             lock->l_readers == 0 && lock->l_writers == 0)
1174                 return INTERVAL_ITER_CONT;
1175
1176         if (!(lock->l_req_mode & *data->lmd_mode))
1177                 return INTERVAL_ITER_CONT;
1178         match = lock->l_req_mode;
1179
1180         switch (lock->l_resource->lr_type) {
1181         case LDLM_EXTENT:
1182                 if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
1183                     lpol->l_extent.end < data->lmd_policy->l_extent.end)
1184                         return INTERVAL_ITER_CONT;
1185
1186                 if (unlikely(match == LCK_GROUP) &&
1187                     data->lmd_policy->l_extent.gid != LDLM_GID_ANY &&
1188                     lpol->l_extent.gid != data->lmd_policy->l_extent.gid)
1189                         return INTERVAL_ITER_CONT;
1190                 break;
1191         case LDLM_IBITS:
1192                 /* We match if we have existing lock with same or wider set
1193                    of bits. */
1194                 if ((lpol->l_inodebits.bits &
1195                      data->lmd_policy->l_inodebits.bits) !=
1196                     data->lmd_policy->l_inodebits.bits)
1197                         return INTERVAL_ITER_CONT;
1198                 break;
1199         default:
1200                 ;
1201         }
1202
1203         /* We match if we have existing lock with same or wider set
1204            of bits. */
1205         if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
1206                 return INTERVAL_ITER_CONT;
1207
1208         if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock)))
1209                 return INTERVAL_ITER_CONT;
1210
1211         if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
1212                 LDLM_LOCK_GET(lock);
1213                 ldlm_lock_touch_in_lru(lock);
1214         } else {
1215                 ldlm_lock_addref_internal_nolock(lock, match);
1216         }
1217
1218         *data->lmd_mode = match;
1219         data->lmd_lock = lock;
1220
1221         return INTERVAL_ITER_STOP;
1222 }
1223
1224 static unsigned int itree_overlap_cb(struct interval_node *in, void *args)
1225 {
1226         struct ldlm_interval *node = to_ldlm_interval(in);
1227         struct lock_match_data *data = args;
1228         struct ldlm_lock *lock;
1229         int rc;
1230
1231         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
1232                 rc = lock_matches(lock, data);
1233                 if (rc == INTERVAL_ITER_STOP)
1234                         return INTERVAL_ITER_STOP;
1235         }
1236         return INTERVAL_ITER_CONT;
1237 }
1238
1239 /**
1240  * Search for a lock with given parameters in interval trees.
1241  *
1242  * \param res      search for a lock in this resource
1243  * \param data     parameters
1244  *
1245  * \retval a referenced lock or NULL.
1246  */
1247 static struct ldlm_lock *search_itree(struct ldlm_resource *res,
1248                                       struct lock_match_data *data)
1249 {
1250         struct interval_node_extent ext = {
1251                 .start     = data->lmd_policy->l_extent.start,
1252                 .end       = data->lmd_policy->l_extent.end
1253         };
1254         int idx;
1255
1256         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1257                 struct ldlm_interval_tree *tree = &res->lr_itree[idx];
1258
1259                 if (tree->lit_root == NULL)
1260                         continue;
1261
1262                 if (!(tree->lit_mode & *data->lmd_mode))
1263                         continue;
1264
1265                 interval_search(tree->lit_root, &ext,
1266                                 itree_overlap_cb, data);
1267         }
1268         return data->lmd_lock;
1269 }
1270
1271
1272 /**
1273  * Search for a lock with given properties in a queue.
1274  *
1275  * \param queue    search for a lock in this queue
1276  * \param data     parameters
1277  *
1278  * \retval a referenced lock or NULL.
1279  */
1280 static struct ldlm_lock *search_queue(struct list_head *queue,
1281                                       struct lock_match_data *data)
1282 {
1283         struct ldlm_lock *lock;
1284         int rc;
1285
1286         list_for_each_entry(lock, queue, l_res_link) {
1287                 rc = lock_matches(lock, data);
1288                 if (rc == INTERVAL_ITER_STOP)
1289                         return data->lmd_lock;
1290         }
1291         return NULL;
1292 }
1293
1294 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
1295 {
1296         if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
1297                 lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
1298                 wake_up_all(&lock->l_waitq);
1299         }
1300 }
1301 EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
1302
1303 void ldlm_lock_fail_match(struct ldlm_lock *lock)
1304 {
1305         lock_res_and_lock(lock);
1306         ldlm_lock_fail_match_locked(lock);
1307         unlock_res_and_lock(lock);
1308 }
1309
1310 /**
1311  * Mark lock as "matchable" by OST.
1312  *
1313  * Used to prevent certain races in LOV/OSC where the lock is granted, but LVB
1314  * is not yet valid.
1315  * Assumes LDLM lock is already locked.
1316  */
1317 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
1318 {
1319         ldlm_set_lvb_ready(lock);
1320         wake_up_all(&lock->l_waitq);
1321 }
1322 EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
1323
1324 /**
1325  * Mark lock as "matchable" by OST.
1326  * Locks the lock and then \see ldlm_lock_allow_match_locked
1327  */
1328 void ldlm_lock_allow_match(struct ldlm_lock *lock)
1329 {
1330         lock_res_and_lock(lock);
1331         ldlm_lock_allow_match_locked(lock);
1332         unlock_res_and_lock(lock);
1333 }
1334 EXPORT_SYMBOL(ldlm_lock_allow_match);
1335
1336 /**
1337  * Attempt to find a lock with specified properties.
1338  *
1339  * Typically returns a reference to matched lock unless LDLM_FL_TEST_LOCK is
1340  * set in \a flags
1341  *
1342  * Can be called in two ways:
1343  *
1344  * If 'ns' is NULL, then lockh describes an existing lock that we want to look
1345  * for a duplicate of.
1346  *
1347  * Otherwise, all of the fields must be filled in, to match against.
1348  *
1349  * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
1350  *     server (ie, connh is NULL)
1351  * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
1352  *     list will be considered
1353  * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
1354  *     to be canceled can still be matched as long as they still have reader
1355  *     or writer refernces
1356  * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1357  *     just tell us if we would have matched.
1358  *
1359  * \retval 1 if it finds an already-existing lock that is compatible; in this
1360  * case, lockh is filled in with a addref()ed lock
1361  *
1362  * We also check security context, and if that fails we simply return 0 (to
1363  * keep caller code unchanged), the context failure will be discovered by
1364  * caller sometime later.
1365  */
1366 enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1367                                const struct ldlm_res_id *res_id,
1368                                enum ldlm_type type,
1369                                union ldlm_policy_data *policy,
1370                                enum ldlm_mode mode,
1371                                struct lustre_handle *lockh, int unref)
1372 {
1373         struct lock_match_data data = {
1374                 .lmd_old        = NULL,
1375                 .lmd_lock       = NULL,
1376                 .lmd_mode       = &mode,
1377                 .lmd_policy     = policy,
1378                 .lmd_flags      = flags,
1379                 .lmd_unref      = unref,
1380         };
1381         struct ldlm_resource *res;
1382         struct ldlm_lock *lock;
1383         int rc = 0;
1384         ENTRY;
1385
1386         if (ns == NULL) {
1387                 data.lmd_old = ldlm_handle2lock(lockh);
1388                 LASSERT(data.lmd_old != NULL);
1389
1390                 ns = ldlm_lock_to_ns(data.lmd_old);
1391                 res_id = &data.lmd_old->l_resource->lr_name;
1392                 type = data.lmd_old->l_resource->lr_type;
1393                 *data.lmd_mode = data.lmd_old->l_req_mode;
1394         }
1395
1396         res = ldlm_resource_get(ns, NULL, res_id, type, 0);
1397         if (IS_ERR(res)) {
1398                 LASSERT(data.lmd_old == NULL);
1399                 RETURN(0);
1400         }
1401
1402         LDLM_RESOURCE_ADDREF(res);
1403         lock_res(res);
1404
1405         if (res->lr_type == LDLM_EXTENT)
1406                 lock = search_itree(res, &data);
1407         else
1408                 lock = search_queue(&res->lr_granted, &data);
1409         if (lock != NULL)
1410                 GOTO(out, rc = 1);
1411         if (flags & LDLM_FL_BLOCK_GRANTED)
1412                 GOTO(out, rc = 0);
1413         lock = search_queue(&res->lr_converting, &data);
1414         if (lock != NULL)
1415                 GOTO(out, rc = 1);
1416         lock = search_queue(&res->lr_waiting, &data);
1417         if (lock != NULL)
1418                 GOTO(out, rc = 1);
1419
1420         EXIT;
1421  out:
1422         unlock_res(res);
1423         LDLM_RESOURCE_DELREF(res);
1424         ldlm_resource_putref(res);
1425
1426         if (lock) {
1427                 ldlm_lock2handle(lock, lockh);
1428                 if ((flags & LDLM_FL_LVB_READY) &&
1429                     (!ldlm_is_lvb_ready(lock))) {
1430                         __u64 wait_flags = LDLM_FL_LVB_READY |
1431                                 LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
1432                         struct l_wait_info lwi;
1433                         if (lock->l_completion_ast) {
1434                                 int err = lock->l_completion_ast(lock,
1435                                                           LDLM_FL_WAIT_NOREPROC,
1436                                                                  NULL);
1437                                 if (err) {
1438                                         if (flags & LDLM_FL_TEST_LOCK)
1439                                                 LDLM_LOCK_RELEASE(lock);
1440                                         else
1441                                                 ldlm_lock_decref_internal(lock,
1442                                                                           mode);
1443                                         rc = 0;
1444                                         goto out2;
1445                                 }
1446                         }
1447
1448                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
1449                                                NULL, LWI_ON_SIGNAL_NOOP, NULL);
1450
1451                         /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
1452                         l_wait_event(lock->l_waitq,
1453                                      lock->l_flags & wait_flags,
1454                                      &lwi);
1455                         if (!ldlm_is_lvb_ready(lock)) {
1456                                 if (flags & LDLM_FL_TEST_LOCK)
1457                                         LDLM_LOCK_RELEASE(lock);
1458                                 else
1459                                         ldlm_lock_decref_internal(lock, mode);
1460                                 rc = 0;
1461                         }
1462                 }
1463         }
1464  out2:
1465         if (rc) {
1466                 LDLM_DEBUG(lock, "matched (%llu %llu)",
1467                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1468                                 res_id->name[2] : policy->l_extent.start,
1469                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1470                                 res_id->name[3] : policy->l_extent.end);
1471
1472                 /* check user's security context */
1473                 if (lock->l_conn_export &&
1474                     sptlrpc_import_check_ctx(
1475                                 class_exp2cliimp(lock->l_conn_export))) {
1476                         if (!(flags & LDLM_FL_TEST_LOCK))
1477                                 ldlm_lock_decref_internal(lock, mode);
1478                         rc = 0;
1479                 }
1480
1481                 if (flags & LDLM_FL_TEST_LOCK)
1482                         LDLM_LOCK_RELEASE(lock);
1483
1484         } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
1485                 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
1486                                   "%llu/%llu (%llu %llu)", ns,
1487                                   type, mode, res_id->name[0], res_id->name[1],
1488                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1489                                         res_id->name[2] :policy->l_extent.start,
1490                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1491                                         res_id->name[3] : policy->l_extent.end);
1492         }
1493         if (data.lmd_old != NULL)
1494                 LDLM_LOCK_PUT(data.lmd_old);
1495
1496         return rc ? mode : 0;
1497 }
1498 EXPORT_SYMBOL(ldlm_lock_match);
1499
1500 enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
1501                                            __u64 *bits)
1502 {
1503         struct ldlm_lock *lock;
1504         enum ldlm_mode mode = 0;
1505         ENTRY;
1506
1507         lock = ldlm_handle2lock(lockh);
1508         if (lock != NULL) {
1509                 lock_res_and_lock(lock);
1510                 if (LDLM_HAVE_MASK(lock, GONE))
1511                         GOTO(out, mode);
1512
1513                 if (ldlm_is_cbpending(lock) &&
1514                     lock->l_readers == 0 && lock->l_writers == 0)
1515                         GOTO(out, mode);
1516
1517                 if (bits)
1518                         *bits = lock->l_policy_data.l_inodebits.bits;
1519                 mode = lock->l_granted_mode;
1520                 ldlm_lock_addref_internal_nolock(lock, mode);
1521         }
1522
1523         EXIT;
1524
1525 out:
1526         if (lock != NULL) {
1527                 unlock_res_and_lock(lock);
1528                 LDLM_LOCK_PUT(lock);
1529         }
1530         return mode;
1531 }
1532 EXPORT_SYMBOL(ldlm_revalidate_lock_handle);
1533
1534 /** The caller must guarantee that the buffer is large enough. */
1535 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
1536                   enum req_location loc, void *data, int size)
1537 {
1538         void *lvb;
1539         ENTRY;
1540
1541         LASSERT(data != NULL);
1542         LASSERT(size >= 0);
1543
1544         switch (lock->l_lvb_type) {
1545         case LVB_T_OST:
1546                 if (size == sizeof(struct ost_lvb)) {
1547                         if (loc == RCL_CLIENT)
1548                                 lvb = req_capsule_client_swab_get(pill,
1549                                                 &RMF_DLM_LVB,
1550                                                 lustre_swab_ost_lvb);
1551                         else
1552                                 lvb = req_capsule_server_swab_get(pill,
1553                                                 &RMF_DLM_LVB,
1554                                                 lustre_swab_ost_lvb);
1555                         if (unlikely(lvb == NULL)) {
1556                                 LDLM_ERROR(lock, "no LVB");
1557                                 RETURN(-EPROTO);
1558                         }
1559
1560                         memcpy(data, lvb, size);
1561                 } else if (size == sizeof(struct ost_lvb_v1)) {
1562                         struct ost_lvb *olvb = data;
1563
1564                         if (loc == RCL_CLIENT)
1565                                 lvb = req_capsule_client_swab_get(pill,
1566                                                 &RMF_DLM_LVB,
1567                                                 lustre_swab_ost_lvb_v1);
1568                         else
1569                                 lvb = req_capsule_server_sized_swab_get(pill,
1570                                                 &RMF_DLM_LVB, size,
1571                                                 lustre_swab_ost_lvb_v1);
1572                         if (unlikely(lvb == NULL)) {
1573                                 LDLM_ERROR(lock, "no LVB");
1574                                 RETURN(-EPROTO);
1575                         }
1576
1577                         memcpy(data, lvb, size);
1578                         olvb->lvb_mtime_ns = 0;
1579                         olvb->lvb_atime_ns = 0;
1580                         olvb->lvb_ctime_ns = 0;
1581                 } else {
1582                         LDLM_ERROR(lock, "Replied unexpected ost LVB size %d",
1583                                    size);
1584                         RETURN(-EINVAL);
1585                 }
1586                 break;
1587         case LVB_T_LQUOTA:
1588                 if (size == sizeof(struct lquota_lvb)) {
1589                         if (loc == RCL_CLIENT)
1590                                 lvb = req_capsule_client_swab_get(pill,
1591                                                 &RMF_DLM_LVB,
1592                                                 lustre_swab_lquota_lvb);
1593                         else
1594                                 lvb = req_capsule_server_swab_get(pill,
1595                                                 &RMF_DLM_LVB,
1596                                                 lustre_swab_lquota_lvb);
1597                         if (unlikely(lvb == NULL)) {
1598                                 LDLM_ERROR(lock, "no LVB");
1599                                 RETURN(-EPROTO);
1600                         }
1601
1602                         memcpy(data, lvb, size);
1603                 } else {
1604                         LDLM_ERROR(lock, "Replied unexpected lquota LVB size %d",
1605                                    size);
1606                         RETURN(-EINVAL);
1607                 }
1608                 break;
1609         case LVB_T_LAYOUT:
1610                 if (size == 0)
1611                         break;
1612
1613                 if (loc == RCL_CLIENT)
1614                         lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
1615                 else
1616                         lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
1617                 if (unlikely(lvb == NULL)) {
1618                         LDLM_ERROR(lock, "no LVB");
1619                         RETURN(-EPROTO);
1620                 }
1621
1622                 memcpy(data, lvb, size);
1623                 break;
1624         default:
1625                 LDLM_ERROR(lock, "Unknown LVB type: %d", lock->l_lvb_type);
1626                 libcfs_debug_dumpstack(NULL);
1627                 RETURN(-EINVAL);
1628         }
1629
1630         RETURN(0);
1631 }
1632
1633 /**
1634  * Create and fill in new LDLM lock with specified properties.
1635  * Returns a referenced lock
1636  */
1637 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1638                                    const struct ldlm_res_id *res_id,
1639                                    enum ldlm_type type,
1640                                    enum ldlm_mode mode,
1641                                    const struct ldlm_callback_suite *cbs,
1642                                    void *data, __u32 lvb_len,
1643                                    enum lvb_type lvb_type)
1644 {
1645         struct ldlm_lock        *lock;
1646         struct ldlm_resource    *res;
1647         int                     rc;
1648         ENTRY;
1649
1650         res = ldlm_resource_get(ns, NULL, res_id, type, 1);
1651         if (IS_ERR(res))
1652                 RETURN(ERR_CAST(res));
1653
1654         lock = ldlm_lock_new(res);
1655         if (lock == NULL)
1656                 RETURN(ERR_PTR(-ENOMEM));
1657
1658         lock->l_req_mode = mode;
1659         lock->l_ast_data = data;
1660         lock->l_pid = current_pid();
1661         if (ns_is_server(ns))
1662                 ldlm_set_ns_srv(lock);
1663         if (cbs) {
1664                 lock->l_blocking_ast = cbs->lcs_blocking;
1665                 lock->l_completion_ast = cbs->lcs_completion;
1666                 lock->l_glimpse_ast = cbs->lcs_glimpse;
1667         }
1668
1669         lock->l_tree_node = NULL;
1670         /* if this is the extent lock, allocate the interval tree node */
1671         if (type == LDLM_EXTENT)
1672                 if (ldlm_interval_alloc(lock) == NULL)
1673                         GOTO(out, rc = -ENOMEM);
1674
1675         if (lvb_len) {
1676                 lock->l_lvb_len = lvb_len;
1677                 OBD_ALLOC_LARGE(lock->l_lvb_data, lvb_len);
1678                 if (lock->l_lvb_data == NULL)
1679                         GOTO(out, rc = -ENOMEM);
1680         }
1681
1682         lock->l_lvb_type = lvb_type;
1683         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
1684                 GOTO(out, rc = -ENOENT);
1685
1686         RETURN(lock);
1687
1688 out:
1689         ldlm_lock_destroy(lock);
1690         LDLM_LOCK_RELEASE(lock);
1691         RETURN(ERR_PTR(rc));
1692 }
1693
1694 /**
1695  * Enqueue (request) a lock.
1696  *
1697  * Does not block. As a result of enqueue the lock would be put
1698  * into granted or waiting list.
1699  *
1700  * If namespace has intent policy sent and the lock has LDLM_FL_HAS_INTENT flag
1701  * set, skip all the enqueueing and delegate lock processing to intent policy
1702  * function.
1703  */
1704 enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
1705                                   struct ldlm_lock **lockp,
1706                                   void *cookie, __u64 *flags)
1707 {
1708         struct ldlm_lock *lock = *lockp;
1709         struct ldlm_resource *res = lock->l_resource;
1710         int local = ns_is_client(ldlm_res_to_ns(res));
1711 #ifdef HAVE_SERVER_SUPPORT
1712         ldlm_processing_policy policy;
1713 #endif
1714         enum ldlm_error rc = ELDLM_OK;
1715         struct ldlm_interval *node = NULL;
1716         ENTRY;
1717
1718         /* policies are not executed on the client or during replay */
1719         if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1720             && !local && ns->ns_policy) {
1721                 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
1722                                    NULL);
1723                 if (rc == ELDLM_LOCK_REPLACED) {
1724                         /* The lock that was returned has already been granted,
1725                          * and placed into lockp.  If it's not the same as the
1726                          * one we passed in, then destroy the old one and our
1727                          * work here is done. */
1728                         if (lock != *lockp) {
1729                                 ldlm_lock_destroy(lock);
1730                                 LDLM_LOCK_RELEASE(lock);
1731                         }
1732                         *flags |= LDLM_FL_LOCK_CHANGED;
1733                         RETURN(0);
1734                 } else if (rc != ELDLM_OK &&
1735                            lock->l_req_mode == lock->l_granted_mode) {
1736                         LASSERT(*flags & LDLM_FL_RESENT);
1737                         /* It may happen that ns_policy returns an error in
1738                          * resend case, object may be unlinked or just some
1739                          * error occurs. It is unclear if lock reached the
1740                          * client in the original reply, just leave the lock on
1741                          * server, not returning it again to client. Due to
1742                          * LU-6529, the server will not OOM. */
1743                         RETURN(rc);
1744                 } else if (rc != ELDLM_OK ||
1745                            (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1746                         ldlm_lock_destroy(lock);
1747                         RETURN(rc);
1748                 }
1749         }
1750
1751         if (*flags & LDLM_FL_RESENT) {
1752                 /* Reconstruct LDLM_FL_SRV_ENQ_MASK @flags for reply.
1753                  * Set LOCK_CHANGED always.
1754                  * Check if the lock is granted for BLOCK_GRANTED.
1755                  * Take NO_TIMEOUT from the lock as it is inherited through
1756                  * LDLM_FL_INHERIT_MASK */
1757                 *flags |= LDLM_FL_LOCK_CHANGED;
1758                 if (lock->l_req_mode != lock->l_granted_mode)
1759                         *flags |= LDLM_FL_BLOCK_GRANTED;
1760                 *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT;
1761                 RETURN(ELDLM_OK);
1762         }
1763
1764         /* For a replaying lock, it might be already in granted list. So
1765          * unlinking the lock will cause the interval node to be freed, we
1766          * have to allocate the interval node early otherwise we can't regrant
1767          * this lock in the future. - jay */
1768         if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
1769                 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
1770
1771         lock_res_and_lock(lock);
1772         if (local && lock->l_req_mode == lock->l_granted_mode) {
1773                 /* The server returned a blocked lock, but it was granted
1774                  * before we got a chance to actually enqueue it.  We don't
1775                  * need to do anything else. */
1776                 *flags &= ~LDLM_FL_BLOCKED_MASK;
1777                 GOTO(out, rc = ELDLM_OK);
1778         }
1779
1780         ldlm_resource_unlink_lock(lock);
1781         if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
1782                 if (node == NULL) {
1783                         ldlm_lock_destroy_nolock(lock);
1784                         GOTO(out, rc = -ENOMEM);
1785                 }
1786
1787                 INIT_LIST_HEAD(&node->li_group);
1788                 ldlm_interval_attach(node, lock);
1789                 node = NULL;
1790         }
1791
1792         /* Some flags from the enqueue want to make it into the AST, via the
1793          * lock's l_flags. */
1794         if (*flags & LDLM_FL_AST_DISCARD_DATA)
1795                 ldlm_set_ast_discard_data(lock);
1796         if (*flags & LDLM_FL_TEST_LOCK)
1797                 ldlm_set_test_lock(lock);
1798         if (*flags & LDLM_FL_COS_INCOMPAT)
1799                 ldlm_set_cos_incompat(lock);
1800         if (*flags & LDLM_FL_COS_ENABLED)
1801                 ldlm_set_cos_enabled(lock);
1802
1803         /* This distinction between local lock trees is very important; a client
1804          * namespace only has information about locks taken by that client, and
1805          * thus doesn't have enough information to decide for itself if it can
1806          * be granted (below).  In this case, we do exactly what the server
1807          * tells us to do, as dictated by the 'flags'.
1808          *
1809          * We do exactly the same thing during recovery, when the server is
1810          * more or less trusting the clients not to lie.
1811          *
1812          * FIXME (bug 268): Detect obvious lies by checking compatibility in
1813          * granted/converting queues. */
1814         if (local) {
1815                 if (*flags & LDLM_FL_BLOCK_CONV)
1816                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1817                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1818                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1819                 else
1820                         ldlm_grant_lock(lock, NULL);
1821                 GOTO(out, rc = ELDLM_OK);
1822 #ifdef HAVE_SERVER_SUPPORT
1823         } else if (*flags & LDLM_FL_REPLAY) {
1824                 if (*flags & LDLM_FL_BLOCK_CONV) {
1825                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1826                         GOTO(out, rc = ELDLM_OK);
1827                 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
1828                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1829                         GOTO(out, rc = ELDLM_OK);
1830                 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1831                         ldlm_grant_lock(lock, NULL);
1832                         GOTO(out, rc = ELDLM_OK);
1833                 }
1834                 /* If no flags, fall through to normal enqueue path. */
1835         }
1836
1837         policy = ldlm_processing_policy_table[res->lr_type];
1838         policy(lock, flags, 1, &rc, NULL);
1839         GOTO(out, rc);
1840 #else
1841         } else {
1842                 CERROR("This is client-side-only module, cannot handle "
1843                        "LDLM_NAMESPACE_SERVER resource type lock.\n");
1844                 LBUG();
1845         }
1846 #endif
1847
1848 out:
1849         unlock_res_and_lock(lock);
1850         if (node)
1851                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1852         return rc;
1853 }
1854
1855 #ifdef HAVE_SERVER_SUPPORT
1856 /**
1857  * Iterate through all waiting locks on a given resource queue and attempt to
1858  * grant them.
1859  *
1860  * Must be called with resource lock held.
1861  */
1862 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
1863                          struct list_head *work_list)
1864 {
1865         struct list_head *tmp, *pos;
1866         ldlm_processing_policy policy;
1867         __u64 flags;
1868         int rc = LDLM_ITER_CONTINUE;
1869         enum ldlm_error err;
1870         ENTRY;
1871
1872         check_res_locked(res);
1873
1874         policy = ldlm_processing_policy_table[res->lr_type];
1875         LASSERT(policy);
1876
1877         list_for_each_safe(tmp, pos, queue) {
1878                 struct ldlm_lock *pending;
1879
1880                 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
1881
1882                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1883
1884                 flags = 0;
1885                 rc = policy(pending, &flags, 0, &err, work_list);
1886                 if (rc != LDLM_ITER_CONTINUE)
1887                         break;
1888         }
1889
1890         RETURN(rc);
1891 }
1892 #endif
1893
1894 /**
1895  * Process a call to blocking AST callback for a lock in ast_work list
1896  */
1897 static int
1898 ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1899 {
1900         struct ldlm_cb_set_arg *arg = opaq;
1901         struct ldlm_lock_desc   d;
1902         int                     rc;
1903         struct ldlm_lock       *lock;
1904         ENTRY;
1905
1906         if (list_empty(arg->list))
1907                 RETURN(-ENOENT);
1908
1909         lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
1910
1911         /* nobody should touch l_bl_ast */
1912         lock_res_and_lock(lock);
1913         list_del_init(&lock->l_bl_ast);
1914
1915         LASSERT(ldlm_is_ast_sent(lock));
1916         LASSERT(lock->l_bl_ast_run == 0);
1917         LASSERT(lock->l_blocking_lock);
1918         lock->l_bl_ast_run++;
1919         unlock_res_and_lock(lock);
1920
1921         ldlm_lock2desc(lock->l_blocking_lock, &d);
1922
1923         rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING);
1924         LDLM_LOCK_RELEASE(lock->l_blocking_lock);
1925         lock->l_blocking_lock = NULL;
1926         LDLM_LOCK_RELEASE(lock);
1927
1928         RETURN(rc);
1929 }
1930
1931 /**
1932  * Process a call to completion AST callback for a lock in ast_work list
1933  */
1934 static int
1935 ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1936 {
1937         struct ldlm_cb_set_arg  *arg = opaq;
1938         int                      rc = 0;
1939         struct ldlm_lock        *lock;
1940         ldlm_completion_callback completion_callback;
1941         ENTRY;
1942
1943         if (list_empty(arg->list))
1944                 RETURN(-ENOENT);
1945
1946         lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
1947
1948         /* It's possible to receive a completion AST before we've set
1949          * the l_completion_ast pointer: either because the AST arrived
1950          * before the reply, or simply because there's a small race
1951          * window between receiving the reply and finishing the local
1952          * enqueue. (bug 842)
1953          *
1954          * This can't happen with the blocking_ast, however, because we
1955          * will never call the local blocking_ast until we drop our
1956          * reader/writer reference, which we won't do until we get the
1957          * reply and finish enqueueing. */
1958
1959         /* nobody should touch l_cp_ast */
1960         lock_res_and_lock(lock);
1961         list_del_init(&lock->l_cp_ast);
1962         LASSERT(ldlm_is_cp_reqd(lock));
1963         /* save l_completion_ast since it can be changed by
1964          * mds_intent_policy(), see bug 14225 */
1965         completion_callback = lock->l_completion_ast;
1966         ldlm_clear_cp_reqd(lock);
1967         unlock_res_and_lock(lock);
1968
1969         if (completion_callback != NULL)
1970                 rc = completion_callback(lock, 0, (void *)arg);
1971         LDLM_LOCK_RELEASE(lock);
1972
1973         RETURN(rc);
1974 }
1975
1976 /**
1977  * Process a call to revocation AST callback for a lock in ast_work list
1978  */
1979 static int
1980 ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
1981 {
1982         struct ldlm_cb_set_arg *arg = opaq;
1983         struct ldlm_lock_desc   desc;
1984         int                     rc;
1985         struct ldlm_lock       *lock;
1986         ENTRY;
1987
1988         if (list_empty(arg->list))
1989                 RETURN(-ENOENT);
1990
1991         lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
1992         list_del_init(&lock->l_rk_ast);
1993
1994         /* the desc just pretend to exclusive */
1995         ldlm_lock2desc(lock, &desc);
1996         desc.l_req_mode = LCK_EX;
1997         desc.l_granted_mode = 0;
1998
1999         rc = lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
2000         LDLM_LOCK_RELEASE(lock);
2001
2002         RETURN(rc);
2003 }
2004
2005 /**
2006  * Process a call to glimpse AST callback for a lock in ast_work list
2007  */
2008 int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
2009 {
2010         struct ldlm_cb_set_arg          *arg = opaq;
2011         struct ldlm_glimpse_work        *gl_work;
2012         struct ldlm_lock                *lock;
2013         int                              rc = 0;
2014         ENTRY;
2015
2016         if (list_empty(arg->list))
2017                 RETURN(-ENOENT);
2018
2019         gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
2020                                  gl_list);
2021         list_del_init(&gl_work->gl_list);
2022
2023         lock = gl_work->gl_lock;
2024
2025         /* transfer the glimpse descriptor to ldlm_cb_set_arg */
2026         arg->gl_desc = gl_work->gl_desc;
2027
2028         /* invoke the actual glimpse callback */
2029         if (lock->l_glimpse_ast(lock, (void*)arg) == 0)
2030                 rc = 1;
2031
2032         LDLM_LOCK_RELEASE(lock);
2033
2034         if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0)
2035                 OBD_FREE_PTR(gl_work);
2036
2037         RETURN(rc);
2038 }
2039
2040 /**
2041  * Process list of locks in need of ASTs being sent.
2042  *
2043  * Used on server to send multiple ASTs together instead of sending one by
2044  * one.
2045  */
2046 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
2047                       ldlm_desc_ast_t ast_type)
2048 {
2049         struct ldlm_cb_set_arg *arg;
2050         set_producer_func       work_ast_lock;
2051         int                     rc;
2052
2053         if (list_empty(rpc_list))
2054                 RETURN(0);
2055
2056         OBD_ALLOC_PTR(arg);
2057         if (arg == NULL)
2058                 RETURN(-ENOMEM);
2059
2060         atomic_set(&arg->restart, 0);
2061         arg->list = rpc_list;
2062
2063         switch (ast_type) {
2064                 case LDLM_WORK_BL_AST:
2065                         arg->type = LDLM_BL_CALLBACK;
2066                         work_ast_lock = ldlm_work_bl_ast_lock;
2067                         break;
2068                 case LDLM_WORK_CP_AST:
2069                         arg->type = LDLM_CP_CALLBACK;
2070                         work_ast_lock = ldlm_work_cp_ast_lock;
2071                         break;
2072                 case LDLM_WORK_REVOKE_AST:
2073                         arg->type = LDLM_BL_CALLBACK;
2074                         work_ast_lock = ldlm_work_revoke_ast_lock;
2075                         break;
2076                 case LDLM_WORK_GL_AST:
2077                         arg->type = LDLM_GL_CALLBACK;
2078                         work_ast_lock = ldlm_work_gl_ast_lock;
2079                         break;
2080                 default:
2081                         LBUG();
2082         }
2083
2084         /* We create a ptlrpc request set with flow control extension.
2085          * This request set will use the work_ast_lock function to produce new
2086          * requests and will send a new request each time one completes in order
2087          * to keep the number of requests in flight to ns_max_parallel_ast */
2088         arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
2089                                      work_ast_lock, arg);
2090         if (arg->set == NULL)
2091                 GOTO(out, rc = -ENOMEM);
2092
2093         ptlrpc_set_wait(arg->set);
2094         ptlrpc_set_destroy(arg->set);
2095
2096         rc = atomic_read(&arg->restart) ? -ERESTART : 0;
2097         GOTO(out, rc);
2098 out:
2099         OBD_FREE_PTR(arg);
2100         return rc;
2101 }
2102
2103 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
2104 {
2105         ldlm_reprocess_all(res);
2106         return LDLM_ITER_CONTINUE;
2107 }
2108
2109 static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2110                               struct hlist_node *hnode, void *arg)
2111 {
2112         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
2113         int    rc;
2114
2115         rc = reprocess_one_queue(res, arg);
2116
2117         return rc == LDLM_ITER_STOP;
2118 }
2119
2120 /**
2121  * Iterate through all resources on a namespace attempting to grant waiting
2122  * locks.
2123  */
2124 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
2125 {
2126         ENTRY;
2127
2128         if (ns != NULL) {
2129                 cfs_hash_for_each_nolock(ns->ns_rs_hash,
2130                                          ldlm_reprocess_res, NULL, 0);
2131         }
2132         EXIT;
2133 }
2134
2135 /**
2136  * Try to grant all waiting locks on a resource.
2137  *
2138  * Calls ldlm_reprocess_queue on converting and waiting queues.
2139  *
2140  * Typically called after some resource locks are cancelled to see
2141  * if anything could be granted as a result of the cancellation.
2142  */
2143 void ldlm_reprocess_all(struct ldlm_resource *res)
2144 {
2145         struct list_head rpc_list;
2146 #ifdef HAVE_SERVER_SUPPORT
2147         struct obd_device *obd;
2148         int rc;
2149         ENTRY;
2150
2151         INIT_LIST_HEAD(&rpc_list);
2152         /* Local lock trees don't get reprocessed. */
2153         if (ns_is_client(ldlm_res_to_ns(res))) {
2154                 EXIT;
2155                 return;
2156         }
2157
2158         /* Disable reprocess during lock replay stage but allow during
2159          * request replay stage.
2160          */
2161         obd = ldlm_res_to_ns(res)->ns_obd;
2162         if (obd->obd_recovering &&
2163             atomic_read(&obd->obd_req_replay_clients) == 0)
2164                 RETURN_EXIT;
2165 restart:
2166         lock_res(res);
2167         rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
2168         if (rc == LDLM_ITER_CONTINUE)
2169                 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
2170         unlock_res(res);
2171
2172         rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
2173                                LDLM_WORK_CP_AST);
2174         if (rc == -ERESTART) {
2175                 LASSERT(list_empty(&rpc_list));
2176                 goto restart;
2177         }
2178 #else
2179         ENTRY;
2180
2181         INIT_LIST_HEAD(&rpc_list);
2182         if (!ns_is_client(ldlm_res_to_ns(res))) {
2183                 CERROR("This is client-side-only module, cannot handle "
2184                        "LDLM_NAMESPACE_SERVER resource type lock.\n");
2185                 LBUG();
2186         }
2187 #endif
2188         EXIT;
2189 }
2190 EXPORT_SYMBOL(ldlm_reprocess_all);
2191
2192 static bool is_bl_done(struct ldlm_lock *lock)
2193 {
2194         bool bl_done = true;
2195
2196         if (!ldlm_is_bl_done(lock)) {
2197                 lock_res_and_lock(lock);
2198                 bl_done = ldlm_is_bl_done(lock);
2199                 unlock_res_and_lock(lock);
2200         }
2201
2202         return bl_done;
2203 }
2204
2205 /**
2206  * Helper function to call blocking AST for LDLM lock \a lock in a
2207  * "cancelling" mode.
2208  */
2209 void ldlm_cancel_callback(struct ldlm_lock *lock)
2210 {
2211         check_res_locked(lock->l_resource);
2212         if (!ldlm_is_cancel(lock)) {
2213                 ldlm_set_cancel(lock);
2214                 if (lock->l_blocking_ast) {
2215                         unlock_res_and_lock(lock);
2216                         lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
2217                                              LDLM_CB_CANCELING);
2218                         lock_res_and_lock(lock);
2219                 } else {
2220                         LDLM_DEBUG(lock, "no blocking ast");
2221                 }
2222
2223                 /* only canceller can set bl_done bit */
2224                 ldlm_set_bl_done(lock);
2225                 wake_up_all(&lock->l_waitq);
2226         } else if (!ldlm_is_bl_done(lock)) {
2227                 struct l_wait_info lwi = { 0 };
2228
2229                 /* The lock is guaranteed to have been canceled once
2230                  * returning from this function. */
2231                 unlock_res_and_lock(lock);
2232                 l_wait_event(lock->l_waitq, is_bl_done(lock), &lwi);
2233                 lock_res_and_lock(lock);
2234         }
2235 }
2236
2237 /**
2238  * Remove skiplist-enabled LDLM lock \a req from granted list
2239  */
2240 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
2241 {
2242         if (req->l_resource->lr_type != LDLM_PLAIN &&
2243             req->l_resource->lr_type != LDLM_IBITS)
2244                 return;
2245
2246         list_del_init(&req->l_sl_policy);
2247         list_del_init(&req->l_sl_mode);
2248 }
2249
2250 /**
2251  * Attempts to cancel LDLM lock \a lock that has no reader/writer references.
2252  */
2253 void ldlm_lock_cancel(struct ldlm_lock *lock)
2254 {
2255         struct ldlm_resource *res;
2256         struct ldlm_namespace *ns;
2257         ENTRY;
2258
2259         lock_res_and_lock(lock);
2260
2261         res = lock->l_resource;
2262         ns  = ldlm_res_to_ns(res);
2263
2264         /* Please do not, no matter how tempting, remove this LBUG without
2265          * talking to me first. -phik */
2266         if (lock->l_readers || lock->l_writers) {
2267                 LDLM_ERROR(lock, "lock still has references");
2268                 LBUG();
2269         }
2270
2271         if (ldlm_is_waited(lock))
2272                 ldlm_del_waiting_lock(lock);
2273
2274         /* Releases cancel callback. */
2275         ldlm_cancel_callback(lock);
2276
2277         /* Yes, second time, just in case it was added again while we were
2278          * running with no res lock in ldlm_cancel_callback */
2279         if (ldlm_is_waited(lock))
2280                 ldlm_del_waiting_lock(lock);
2281
2282         ldlm_resource_unlink_lock(lock);
2283         ldlm_lock_destroy_nolock(lock);
2284
2285         if (lock->l_granted_mode == lock->l_req_mode)
2286                 ldlm_pool_del(&ns->ns_pool, lock);
2287
2288         /* Make sure we will not be called again for same lock what is possible
2289          * if not to zero out lock->l_granted_mode */
2290         lock->l_granted_mode = LCK_MINMODE;
2291         unlock_res_and_lock(lock);
2292
2293         EXIT;
2294 }
2295 EXPORT_SYMBOL(ldlm_lock_cancel);
2296
2297 /**
2298  * Set opaque data into the lock that only makes sense to upper layer.
2299  */
2300 int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data)
2301 {
2302         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2303         int rc = -EINVAL;
2304         ENTRY;
2305
2306         if (lock) {
2307                 if (lock->l_ast_data == NULL)
2308                         lock->l_ast_data = data;
2309                 if (lock->l_ast_data == data)
2310                         rc = 0;
2311                 LDLM_LOCK_PUT(lock);
2312         }
2313         RETURN(rc);
2314 }
2315 EXPORT_SYMBOL(ldlm_lock_set_data);
2316
2317 struct export_cl_data {
2318         struct obd_export       *ecl_exp;
2319         int                     ecl_loop;
2320 };
2321
2322 static void ldlm_cancel_lock_for_export(struct obd_export *exp,
2323                                         struct ldlm_lock *lock,
2324                                         struct export_cl_data *ecl)
2325 {
2326         struct ldlm_resource *res;
2327
2328         res = ldlm_resource_getref(lock->l_resource);
2329
2330         ldlm_res_lvbo_update(res, NULL, 1);
2331         ldlm_lock_cancel(lock);
2332         if (!exp->exp_obd->obd_stopping)
2333                 ldlm_reprocess_all(res);
2334         ldlm_resource_putref(res);
2335
2336         ecl->ecl_loop++;
2337         if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
2338                 CDEBUG(D_INFO, "Export %p, %d locks cancelled.\n",
2339                        exp, ecl->ecl_loop);
2340         }
2341 }
2342
2343 /**
2344  * Iterator function for ldlm_export_cancel_locks.
2345  * Cancels passed locks.
2346  */
2347 static int
2348 ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
2349                                 struct hlist_node *hnode, void *data)
2350
2351 {
2352         struct export_cl_data   *ecl = (struct export_cl_data *)data;
2353         struct obd_export       *exp  = ecl->ecl_exp;
2354         struct ldlm_lock        *lock = cfs_hash_object(hs, hnode);
2355
2356         LDLM_LOCK_GET(lock);
2357         ldlm_cancel_lock_for_export(exp, lock, ecl);
2358         LDLM_LOCK_RELEASE(lock);
2359
2360         return 0;
2361 }
2362
2363 /**
2364  * Cancel all blocked locks for given export.
2365  *
2366  * Typically called on client disconnection/eviction
2367  */
2368 int ldlm_export_cancel_blocked_locks(struct obd_export *exp)
2369 {
2370         struct export_cl_data   ecl = {
2371                 .ecl_exp        = exp,
2372                 .ecl_loop       = 0,
2373         };
2374
2375         while (!list_empty(&exp->exp_bl_list)) {
2376                 struct ldlm_lock *lock;
2377
2378                 spin_lock_bh(&exp->exp_bl_list_lock);
2379                 if (!list_empty(&exp->exp_bl_list)) {
2380                         lock = list_entry(exp->exp_bl_list.next,
2381                                           struct ldlm_lock, l_exp_list);
2382                         LDLM_LOCK_GET(lock);
2383                         list_del_init(&lock->l_exp_list);
2384                 } else {
2385                         lock = NULL;
2386                 }
2387                 spin_unlock_bh(&exp->exp_bl_list_lock);
2388
2389                 if (lock == NULL)
2390                         break;
2391
2392                 ldlm_cancel_lock_for_export(exp, lock, &ecl);
2393                 LDLM_LOCK_RELEASE(lock);
2394         }
2395
2396         CDEBUG(D_DLMTRACE, "Export %p, canceled %d locks, "
2397                "left on hash table %d.\n", exp, ecl.ecl_loop,
2398                atomic_read(&exp->exp_lock_hash->hs_count));
2399
2400         return ecl.ecl_loop;
2401 }
2402
2403 /**
2404  * Cancel all locks for given export.
2405  *
2406  * Typically called after client disconnection/eviction
2407  */
2408 int ldlm_export_cancel_locks(struct obd_export *exp)
2409 {
2410         struct export_cl_data   ecl = {
2411                 .ecl_exp        = exp,
2412                 .ecl_loop       = 0,
2413         };
2414
2415         cfs_hash_for_each_empty(exp->exp_lock_hash,
2416                                 ldlm_cancel_locks_for_export_cb, &ecl);
2417
2418         CDEBUG(D_DLMTRACE, "Export %p, canceled %d locks, "
2419                "left on hash table %d.\n", exp, ecl.ecl_loop,
2420                atomic_read(&exp->exp_lock_hash->hs_count));
2421
2422         return ecl.ecl_loop;
2423 }
2424
2425 /**
2426  * Downgrade an exclusive lock.
2427  *
2428  * A fast variant of ldlm_lock_convert for convertion of exclusive
2429  * locks. The convertion is always successful.
2430  * Used by Commit on Sharing (COS) code.
2431  *
2432  * \param lock A lock to convert
2433  * \param new_mode new lock mode
2434  */
2435 void ldlm_lock_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode)
2436 {
2437         ENTRY;
2438
2439         LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
2440         LASSERT(new_mode == LCK_COS);
2441
2442         lock_res_and_lock(lock);
2443         ldlm_resource_unlink_lock(lock);
2444         /*
2445          * Remove the lock from pool as it will be added again in
2446          * ldlm_grant_lock() called below.
2447          */
2448         ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
2449
2450         lock->l_req_mode = new_mode;
2451         ldlm_grant_lock(lock, NULL);
2452         unlock_res_and_lock(lock);
2453         ldlm_reprocess_all(lock->l_resource);
2454
2455         EXIT;
2456 }
2457 EXPORT_SYMBOL(ldlm_lock_downgrade);
2458
2459 /**
2460  * Attempt to convert already granted lock to a different mode.
2461  *
2462  * While lock conversion is not currently used, future client-side
2463  * optimizations could take advantage of it to avoid discarding cached
2464  * pages on a file.
2465  */
2466 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock,
2467                                         enum ldlm_mode new_mode, __u32 *flags)
2468 {
2469         struct list_head rpc_list;
2470         struct ldlm_resource *res;
2471         struct ldlm_namespace *ns;
2472         int granted = 0;
2473 #ifdef HAVE_SERVER_SUPPORT
2474         int old_mode;
2475         struct sl_insert_point prev;
2476 #endif
2477         struct ldlm_interval *node;
2478         ENTRY;
2479
2480         INIT_LIST_HEAD(&rpc_list);
2481         /* Just return if mode is unchanged. */
2482         if (new_mode == lock->l_granted_mode) {
2483                 *flags |= LDLM_FL_BLOCK_GRANTED;
2484                 RETURN(lock->l_resource);
2485         }
2486
2487         /* I can't check the type of lock here because the bitlock of lock
2488          * is not held here, so do the allocation blindly. -jay */
2489         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
2490         if (node == NULL)  /* Actually, this causes EDEADLOCK to be returned */
2491                 RETURN(NULL);
2492
2493         LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
2494                  "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
2495
2496         lock_res_and_lock(lock);
2497
2498         res = lock->l_resource;
2499         ns  = ldlm_res_to_ns(res);
2500
2501 #ifdef HAVE_SERVER_SUPPORT
2502         old_mode = lock->l_req_mode;
2503 #endif
2504         lock->l_req_mode = new_mode;
2505         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
2506 #ifdef HAVE_SERVER_SUPPORT
2507                 /* remember the lock position where the lock might be
2508                  * added back to the granted list later and also
2509                  * remember the join mode for skiplist fixing. */
2510                 prev.res_link = lock->l_res_link.prev;
2511                 prev.mode_link = lock->l_sl_mode.prev;
2512                 prev.policy_link = lock->l_sl_policy.prev;
2513 #endif
2514                 ldlm_resource_unlink_lock(lock);
2515         } else {
2516                 ldlm_resource_unlink_lock(lock);
2517                 if (res->lr_type == LDLM_EXTENT) {
2518                         /* FIXME: ugly code, I have to attach the lock to a
2519                          * interval node again since perhaps it will be granted
2520                          * soon */
2521                         INIT_LIST_HEAD(&node->li_group);
2522                         ldlm_interval_attach(node, lock);
2523                         node = NULL;
2524                 }
2525         }
2526
2527         /*
2528          * Remove old lock from the pool before adding the lock with new
2529          * mode below in ->policy()
2530          */
2531         ldlm_pool_del(&ns->ns_pool, lock);
2532
2533         /* If this is a local resource, put it on the appropriate list. */
2534         if (ns_is_client(ldlm_res_to_ns(res))) {
2535                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
2536                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
2537                 } else {
2538                         /* This should never happen, because of the way the
2539                          * server handles conversions. */
2540                         LDLM_ERROR(lock, "Erroneous flags %x on local lock\n",
2541                                    *flags);
2542                         LBUG();
2543
2544                         ldlm_grant_lock(lock, &rpc_list);
2545                         granted = 1;
2546                         /* FIXME: completion handling not with lr_lock held ! */
2547                         if (lock->l_completion_ast)
2548                                 lock->l_completion_ast(lock, 0, NULL);
2549                 }
2550 #ifdef HAVE_SERVER_SUPPORT
2551         } else {
2552                 int rc;
2553                 enum ldlm_error err;
2554                 __u64 pflags = 0;
2555                 ldlm_processing_policy policy;
2556
2557                 policy = ldlm_processing_policy_table[res->lr_type];
2558                 rc = policy(lock, &pflags, 0, &err, &rpc_list);
2559                 if (rc == LDLM_ITER_STOP) {
2560                         lock->l_req_mode = old_mode;
2561                         if (res->lr_type == LDLM_EXTENT)
2562                                 ldlm_extent_add_lock(res, lock);
2563                         else
2564                                 ldlm_granted_list_add_lock(lock, &prev);
2565
2566                         res = NULL;
2567                 } else {
2568                         *flags |= LDLM_FL_BLOCK_GRANTED;
2569                         granted = 1;
2570                 }
2571         }
2572 #else
2573         } else {
2574                 CERROR("This is client-side-only module, cannot handle "
2575                        "LDLM_NAMESPACE_SERVER resource type lock.\n");
2576                 LBUG();
2577         }
2578 #endif
2579         unlock_res_and_lock(lock);
2580
2581         if (granted)
2582                 ldlm_run_ast_work(ns, &rpc_list, LDLM_WORK_CP_AST);
2583         if (node)
2584                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
2585         RETURN(res);
2586 }
2587
2588 /**
2589  * Print lock with lock handle \a lockh description into debug log.
2590  *
2591  * Used when printing all locks on a resource for debug purposes.
2592  */
2593 void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh)
2594 {
2595         struct ldlm_lock *lock;
2596
2597         if (!((libcfs_debug | D_ERROR) & level))
2598                 return;
2599
2600         lock = ldlm_handle2lock(lockh);
2601         if (lock == NULL)
2602                 return;
2603
2604         LDLM_DEBUG_LIMIT(level, lock, "###");
2605
2606         LDLM_LOCK_PUT(lock);
2607 }
2608 EXPORT_SYMBOL(ldlm_lock_dump_handle);
2609
2610 /**
2611  * Print lock information with custom message into debug log.
2612  * Helper function.
2613  */
2614 void _ldlm_lock_debug(struct ldlm_lock *lock,
2615                       struct libcfs_debug_msg_data *msgdata,
2616                       const char *fmt, ...)
2617 {
2618         va_list args;
2619         struct obd_export *exp = lock->l_export;
2620         struct ldlm_resource *resource = lock->l_resource;
2621         char *nid = "local";
2622
2623         va_start(args, fmt);
2624
2625         if (exp && exp->exp_connection) {
2626                 nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
2627         } else if (exp && exp->exp_obd != NULL) {
2628                 struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
2629                 nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
2630         }
2631
2632         if (resource == NULL) {
2633                 libcfs_debug_vmsg2(msgdata, fmt, args,
2634                        " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
2635                        "res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s "
2636                        "remote: %#llx expref: %d pid: %u timeout: %lu "
2637                        "lvb_type: %d\n",
2638                        lock,
2639                        lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
2640                        lock->l_readers, lock->l_writers,
2641                        ldlm_lockname[lock->l_granted_mode],
2642                        ldlm_lockname[lock->l_req_mode],
2643                        lock->l_flags, nid, lock->l_remote_handle.cookie,
2644                        exp ? atomic_read(&exp->exp_refcount) : -99,
2645                        lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
2646                 va_end(args);
2647                 return;
2648         }
2649
2650         switch (resource->lr_type) {
2651         case LDLM_EXTENT:
2652                 libcfs_debug_vmsg2(msgdata, fmt, args,
2653                         " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
2654                         "res: "DLDLMRES" rrc: %d type: %s [%llu->%llu] "
2655                         "(req %llu->%llu) flags: %#llx nid: %s remote: "
2656                         "%#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
2657                         ldlm_lock_to_ns_name(lock), lock,
2658                         lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
2659                         lock->l_readers, lock->l_writers,
2660                         ldlm_lockname[lock->l_granted_mode],
2661                         ldlm_lockname[lock->l_req_mode],
2662                         PLDLMRES(resource),
2663                         atomic_read(&resource->lr_refcount),
2664                         ldlm_typename[resource->lr_type],
2665                         lock->l_policy_data.l_extent.start,
2666                         lock->l_policy_data.l_extent.end,
2667                         lock->l_req_extent.start, lock->l_req_extent.end,
2668                         lock->l_flags, nid, lock->l_remote_handle.cookie,
2669                         exp ? atomic_read(&exp->exp_refcount) : -99,
2670                         lock->l_pid, lock->l_callback_timeout,
2671                         lock->l_lvb_type);
2672                 break;
2673
2674         case LDLM_FLOCK:
2675                 libcfs_debug_vmsg2(msgdata, fmt, args,
2676                         " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
2677                         "res: "DLDLMRES" rrc: %d type: %s pid: %d "
2678                         "[%llu->%llu] flags: %#llx nid: %s "
2679                         "remote: %#llx expref: %d pid: %u timeout: %lu\n",
2680                         ldlm_lock_to_ns_name(lock), lock,
2681                         lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
2682                         lock->l_readers, lock->l_writers,
2683                         ldlm_lockname[lock->l_granted_mode],
2684                         ldlm_lockname[lock->l_req_mode],
2685                         PLDLMRES(resource),
2686                         atomic_read(&resource->lr_refcount),
2687                         ldlm_typename[resource->lr_type],
2688                         lock->l_policy_data.l_flock.pid,
2689                         lock->l_policy_data.l_flock.start,
2690                         lock->l_policy_data.l_flock.end,
2691                         lock->l_flags, nid, lock->l_remote_handle.cookie,
2692                         exp ? atomic_read(&exp->exp_refcount) : -99,
2693                         lock->l_pid, lock->l_callback_timeout);
2694                 break;
2695
2696         case LDLM_IBITS:
2697                 libcfs_debug_vmsg2(msgdata, fmt, args,
2698                         " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
2699                         "res: "DLDLMRES" bits %#llx rrc: %d type: %s "
2700                         "flags: %#llx nid: %s remote: %#llx expref: %d "
2701                         "pid: %u timeout: %lu lvb_type: %d\n",
2702                         ldlm_lock_to_ns_name(lock),
2703                         lock, lock->l_handle.h_cookie,
2704                         atomic_read(&lock->l_refc),
2705                         lock->l_readers, lock->l_writers,
2706                         ldlm_lockname[lock->l_granted_mode],
2707                         ldlm_lockname[lock->l_req_mode],
2708                         PLDLMRES(resource),
2709                         lock->l_policy_data.l_inodebits.bits,
2710                         atomic_read(&resource->lr_refcount),
2711                         ldlm_typename[resource->lr_type],
2712                         lock->l_flags, nid, lock->l_remote_handle.cookie,
2713                         exp ? atomic_read(&exp->exp_refcount) : -99,
2714                         lock->l_pid, lock->l_callback_timeout,
2715                         lock->l_lvb_type);
2716                 break;
2717
2718         default:
2719                 libcfs_debug_vmsg2(msgdata, fmt, args,
2720                         " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s "
2721                         "res: "DLDLMRES" rrc: %d type: %s flags: %#llx "
2722                         "nid: %s remote: %#llx expref: %d pid: %u "
2723                         "timeout: %lu lvb_type: %d\n",
2724                         ldlm_lock_to_ns_name(lock),
2725                         lock, lock->l_handle.h_cookie,
2726                         atomic_read(&lock->l_refc),
2727                         lock->l_readers, lock->l_writers,
2728                         ldlm_lockname[lock->l_granted_mode],
2729                         ldlm_lockname[lock->l_req_mode],
2730                         PLDLMRES(resource),
2731                         atomic_read(&resource->lr_refcount),
2732                         ldlm_typename[resource->lr_type],
2733                         lock->l_flags, nid, lock->l_remote_handle.cookie,
2734                         exp ? atomic_read(&exp->exp_refcount) : -99,
2735                         lock->l_pid, lock->l_callback_timeout,
2736                         lock->l_lvb_type);
2737                 break;
2738         }
2739         va_end(args);
2740 }
2741 EXPORT_SYMBOL(_ldlm_lock_debug);