Whamcloud - gitweb
LU-104 Properly address ownership of posix and flock locks
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_lock.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LDLM
43
44 #ifdef __KERNEL__
45 # include <libcfs/libcfs.h>
46 # include <linux/lustre_intent.h>
47 #else
48 # include <liblustre.h>
49 #endif
50
51 #include <obd_class.h>
52 #include "ldlm_internal.h"
53
54 /* lock types */
55 char *ldlm_lockname[] = {
56         [0] "--",
57         [LCK_EX] "EX",
58         [LCK_PW] "PW",
59         [LCK_PR] "PR",
60         [LCK_CW] "CW",
61         [LCK_CR] "CR",
62         [LCK_NL] "NL",
63         [LCK_GROUP] "GROUP",
64         [LCK_COS] "COS"
65 };
66
67 char *ldlm_typename[] = {
68         [LDLM_PLAIN] "PLN",
69         [LDLM_EXTENT] "EXT",
70         [LDLM_FLOCK] "FLK",
71         [LDLM_IBITS] "IBT",
72 };
73
74 static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
75         [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_wire_to_local,
76         [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_wire_to_local,
77         [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_wire_to_local,
78         [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_wire_to_local,
79 };
80
81 static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
82         [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_local_to_wire,
83         [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_local_to_wire,
84         [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_local_to_wire,
85         [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_local_to_wire,
86 };
87
88 /**
89  * Converts lock policy from local format to on the wire lock_desc format
90  */
91 void ldlm_convert_policy_to_wire(ldlm_type_t type,
92                                  const ldlm_policy_data_t *lpolicy,
93                                  ldlm_wire_policy_data_t *wpolicy)
94 {
95         ldlm_policy_local_to_wire_t convert;
96
97         convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
98
99         convert(lpolicy, wpolicy);
100 }
101
102 /**
103  * Converts lock policy from on the wire lock_desc format to local format
104  */
105 void ldlm_convert_policy_to_local(ldlm_type_t type,
106                                   const ldlm_wire_policy_data_t *wpolicy,
107                                   ldlm_policy_data_t *lpolicy)
108 {
109         ldlm_policy_wire_to_local_t convert;
110
111         convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
112
113         convert(wpolicy, lpolicy);
114 }
115
116 char *ldlm_it2str(int it)
117 {
118         switch (it) {
119         case IT_OPEN:
120                 return "open";
121         case IT_CREAT:
122                 return "creat";
123         case (IT_OPEN | IT_CREAT):
124                 return "open|creat";
125         case IT_READDIR:
126                 return "readdir";
127         case IT_GETATTR:
128                 return "getattr";
129         case IT_LOOKUP:
130                 return "lookup";
131         case IT_UNLINK:
132                 return "unlink";
133         case IT_GETXATTR:
134                 return "getxattr";
135         default:
136                 CERROR("Unknown intent %d\n", it);
137                 return "UNKNOWN";
138         }
139 }
140
141 extern cfs_mem_cache_t *ldlm_lock_slab;
142
143 static ldlm_processing_policy ldlm_processing_policy_table[] = {
144         [LDLM_PLAIN] ldlm_process_plain_lock,
145         [LDLM_EXTENT] ldlm_process_extent_lock,
146 #ifdef __KERNEL__
147         [LDLM_FLOCK] ldlm_process_flock_lock,
148 #endif
149         [LDLM_IBITS] ldlm_process_inodebits_lock,
150 };
151
152 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res)
153 {
154         return ldlm_processing_policy_table[res->lr_type];
155 }
156
157 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
158 {
159         ns->ns_policy = arg;
160 }
161
162 /*
163  * REFCOUNTED LOCK OBJECTS
164  */
165
166
167 /*
168  * Lock refcounts, during creation:
169  *   - one special one for allocation, dec'd only once in destroy
170  *   - one for being a lock that's in-use
171  *   - one for the addref associated with a new lock
172  */
173 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
174 {
175         cfs_atomic_inc(&lock->l_refc);
176         return lock;
177 }
178
179 static void ldlm_lock_free(struct ldlm_lock *lock, size_t size)
180 {
181         LASSERT(size == sizeof(*lock));
182         OBD_SLAB_FREE(lock, ldlm_lock_slab, sizeof(*lock));
183 }
184
185 void ldlm_lock_put(struct ldlm_lock *lock)
186 {
187         ENTRY;
188
189         LASSERT(lock->l_resource != LP_POISON);
190         LASSERT(cfs_atomic_read(&lock->l_refc) > 0);
191         if (cfs_atomic_dec_and_test(&lock->l_refc)) {
192                 struct ldlm_resource *res;
193
194                 LDLM_DEBUG(lock,
195                            "final lock_put on destroyed lock, freeing it.");
196
197                 res = lock->l_resource;
198                 LASSERT(lock->l_destroyed);
199                 LASSERT(cfs_list_empty(&lock->l_res_link));
200                 LASSERT(cfs_list_empty(&lock->l_pending_chain));
201
202                 lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
203                                      LDLM_NSS_LOCKS);
204                 lu_ref_del(&res->lr_reference, "lock", lock);
205                 ldlm_resource_putref(res);
206                 lock->l_resource = NULL;
207                 if (lock->l_export) {
208                         class_export_lock_put(lock->l_export, lock);
209                         lock->l_export = NULL;
210                 }
211
212                 if (lock->l_lvb_data != NULL)
213                         OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
214
215                 ldlm_interval_free(ldlm_interval_detach(lock));
216                 lu_ref_fini(&lock->l_reference);
217                 OBD_FREE_RCU_CB(lock, sizeof(*lock), &lock->l_handle,
218                                 ldlm_lock_free);
219         }
220
221         EXIT;
222 }
223
224 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
225 {
226         int rc = 0;
227         if (!cfs_list_empty(&lock->l_lru)) {
228                 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
229
230                 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
231                 cfs_list_del_init(&lock->l_lru);
232                 if (lock->l_flags & LDLM_FL_SKIPPED)
233                         lock->l_flags &= ~LDLM_FL_SKIPPED;
234                 LASSERT(ns->ns_nr_unused > 0);
235                 ns->ns_nr_unused--;
236                 rc = 1;
237         }
238         return rc;
239 }
240
241 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
242 {
243         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
244         int rc;
245
246         ENTRY;
247         if (lock->l_ns_srv) {
248                 LASSERT(cfs_list_empty(&lock->l_lru));
249                 RETURN(0);
250         }
251
252         cfs_spin_lock(&ns->ns_lock);
253         rc = ldlm_lock_remove_from_lru_nolock(lock);
254         cfs_spin_unlock(&ns->ns_lock);
255         EXIT;
256         return rc;
257 }
258
259 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
260 {
261         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
262
263         lock->l_last_used = cfs_time_current();
264         LASSERT(cfs_list_empty(&lock->l_lru));
265         LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
266         cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
267         LASSERT(ns->ns_nr_unused >= 0);
268         ns->ns_nr_unused++;
269 }
270
271 void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
272 {
273         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
274
275         ENTRY;
276         cfs_spin_lock(&ns->ns_lock);
277         ldlm_lock_add_to_lru_nolock(lock);
278         cfs_spin_unlock(&ns->ns_lock);
279         EXIT;
280 }
281
282 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
283 {
284         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
285
286         ENTRY;
287         if (lock->l_ns_srv) {
288                 LASSERT(cfs_list_empty(&lock->l_lru));
289                 EXIT;
290                 return;
291         }
292
293         cfs_spin_lock(&ns->ns_lock);
294         if (!cfs_list_empty(&lock->l_lru)) {
295                 ldlm_lock_remove_from_lru_nolock(lock);
296                 ldlm_lock_add_to_lru_nolock(lock);
297         }
298         cfs_spin_unlock(&ns->ns_lock);
299         EXIT;
300 }
301
302 /* This used to have a 'strict' flag, which recovery would use to mark an
303  * in-use lock as needing-to-die.  Lest I am ever tempted to put it back, I
304  * shall explain why it's gone: with the new hash table scheme, once you call
305  * ldlm_lock_destroy, you can never drop your final references on this lock.
306  * Because it's not in the hash table anymore.  -phil */
307 int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
308 {
309         ENTRY;
310
311         if (lock->l_readers || lock->l_writers) {
312                 LDLM_ERROR(lock, "lock still has references");
313                 ldlm_lock_dump(D_ERROR, lock, 0);
314                 LBUG();
315         }
316
317         if (!cfs_list_empty(&lock->l_res_link)) {
318                 LDLM_ERROR(lock, "lock still on resource");
319                 ldlm_lock_dump(D_ERROR, lock, 0);
320                 LBUG();
321         }
322
323         if (lock->l_destroyed) {
324                 LASSERT(cfs_list_empty(&lock->l_lru));
325                 EXIT;
326                 return 0;
327         }
328         lock->l_destroyed = 1;
329
330         if (lock->l_export && lock->l_export->exp_lock_hash &&
331             !cfs_hlist_unhashed(&lock->l_exp_hash))
332                 cfs_hash_del(lock->l_export->exp_lock_hash,
333                              &lock->l_remote_handle, &lock->l_exp_hash);
334
335         ldlm_lock_remove_from_lru(lock);
336         class_handle_unhash(&lock->l_handle);
337
338 #if 0
339         /* Wake anyone waiting for this lock */
340         /* FIXME: I should probably add yet another flag, instead of using
341          * l_export to only call this on clients */
342         if (lock->l_export)
343                 class_export_put(lock->l_export);
344         lock->l_export = NULL;
345         if (lock->l_export && lock->l_completion_ast)
346                 lock->l_completion_ast(lock, 0);
347 #endif
348         EXIT;
349         return 1;
350 }
351
352 void ldlm_lock_destroy(struct ldlm_lock *lock)
353 {
354         int first;
355         ENTRY;
356         lock_res_and_lock(lock);
357         first = ldlm_lock_destroy_internal(lock);
358         unlock_res_and_lock(lock);
359
360         /* drop reference from hashtable only for first destroy */
361         if (first) {
362                 lu_ref_del(&lock->l_reference, "hash", lock);
363                 LDLM_LOCK_RELEASE(lock);
364         }
365         EXIT;
366 }
367
368 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
369 {
370         int first;
371         ENTRY;
372         first = ldlm_lock_destroy_internal(lock);
373         /* drop reference from hashtable only for first destroy */
374         if (first) {
375                 lu_ref_del(&lock->l_reference, "hash", lock);
376                 LDLM_LOCK_RELEASE(lock);
377         }
378         EXIT;
379 }
380
381 /* this is called by portals_handle2object with the handle lock taken */
382 static void lock_handle_addref(void *lock)
383 {
384         LDLM_LOCK_GET((struct ldlm_lock *)lock);
385 }
386
387 /*
388  * usage: pass in a resource on which you have done ldlm_resource_get
389  *        new lock will take over the refcount.
390  * returns: lock with refcount 2 - one for current caller and one for remote
391  */
392 static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
393 {
394         struct ldlm_lock *lock;
395         ENTRY;
396
397         if (resource == NULL)
398                 LBUG();
399
400         OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, CFS_ALLOC_IO);
401         if (lock == NULL)
402                 RETURN(NULL);
403
404         cfs_spin_lock_init(&lock->l_lock);
405         lock->l_resource = resource;
406         lu_ref_add(&resource->lr_reference, "lock", lock);
407
408         cfs_atomic_set(&lock->l_refc, 2);
409         CFS_INIT_LIST_HEAD(&lock->l_res_link);
410         CFS_INIT_LIST_HEAD(&lock->l_lru);
411         CFS_INIT_LIST_HEAD(&lock->l_pending_chain);
412         CFS_INIT_LIST_HEAD(&lock->l_bl_ast);
413         CFS_INIT_LIST_HEAD(&lock->l_cp_ast);
414         CFS_INIT_LIST_HEAD(&lock->l_rk_ast);
415         cfs_waitq_init(&lock->l_waitq);
416         lock->l_blocking_lock = NULL;
417         CFS_INIT_LIST_HEAD(&lock->l_sl_mode);
418         CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
419         CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
420
421         lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
422                              LDLM_NSS_LOCKS);
423         CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
424         class_handle_hash(&lock->l_handle, lock_handle_addref);
425
426         lu_ref_init(&lock->l_reference);
427         lu_ref_add(&lock->l_reference, "hash", lock);
428         lock->l_callback_timeout = 0;
429
430 #if LUSTRE_TRACKS_LOCK_EXP_REFS
431         CFS_INIT_LIST_HEAD(&lock->l_exp_refs_link);
432         lock->l_exp_refs_nr = 0;
433         lock->l_exp_refs_target = NULL;
434 #endif
435
436         RETURN(lock);
437 }
438
439 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
440                               const struct ldlm_res_id *new_resid)
441 {
442         struct ldlm_resource *oldres = lock->l_resource;
443         struct ldlm_resource *newres;
444         int type;
445         ENTRY;
446
447         LASSERT(ns_is_client(ns));
448
449         lock_res_and_lock(lock);
450         if (memcmp(new_resid, &lock->l_resource->lr_name,
451                    sizeof(lock->l_resource->lr_name)) == 0) {
452                 /* Nothing to do */
453                 unlock_res_and_lock(lock);
454                 RETURN(0);
455         }
456
457         LASSERT(new_resid->name[0] != 0);
458
459         /* This function assumes that the lock isn't on any lists */
460         LASSERT(cfs_list_empty(&lock->l_res_link));
461
462         type = oldres->lr_type;
463         unlock_res_and_lock(lock);
464
465         newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
466         if (newres == NULL)
467                 RETURN(-ENOMEM);
468
469         lu_ref_add(&newres->lr_reference, "lock", lock);
470         /*
471          * To flip the lock from the old to the new resource, lock, oldres and
472          * newres have to be locked. Resource spin-locks are nested within
473          * lock->l_lock, and are taken in the memory address order to avoid
474          * dead-locks.
475          */
476         cfs_spin_lock(&lock->l_lock);
477         oldres = lock->l_resource;
478         if (oldres < newres) {
479                 lock_res(oldres);
480                 lock_res_nested(newres, LRT_NEW);
481         } else {
482                 lock_res(newres);
483                 lock_res_nested(oldres, LRT_NEW);
484         }
485         LASSERT(memcmp(new_resid, &oldres->lr_name,
486                        sizeof oldres->lr_name) != 0);
487         lock->l_resource = newres;
488         unlock_res(oldres);
489         unlock_res_and_lock(lock);
490
491         /* ...and the flowers are still standing! */
492         lu_ref_del(&oldres->lr_reference, "lock", lock);
493         ldlm_resource_putref(oldres);
494
495         RETURN(0);
496 }
497
498 /*
499  *  HANDLES
500  */
501
502 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
503 {
504         lockh->cookie = lock->l_handle.h_cookie;
505 }
506
507 /* if flags: atomically get the lock and set the flags.
508  *           Return NULL if flag already set
509  */
510
511 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
512                                      int flags)
513 {
514         struct ldlm_lock *lock;
515         ENTRY;
516
517         LASSERT(handle);
518
519         lock = class_handle2object(handle->cookie);
520         if (lock == NULL)
521                 RETURN(NULL);
522
523         /* It's unlikely but possible that someone marked the lock as
524          * destroyed after we did handle2object on it */
525         if (flags == 0 && !lock->l_destroyed) {
526                 lu_ref_add(&lock->l_reference, "handle", cfs_current());
527                 RETURN(lock);
528         }
529
530         lock_res_and_lock(lock);
531
532         LASSERT(lock->l_resource != NULL);
533
534         lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current());
535         if (unlikely(lock->l_destroyed)) {
536                 unlock_res_and_lock(lock);
537                 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
538                 LDLM_LOCK_PUT(lock);
539                 RETURN(NULL);
540         }
541
542         if (flags && (lock->l_flags & flags)) {
543                 unlock_res_and_lock(lock);
544                 LDLM_LOCK_PUT(lock);
545                 RETURN(NULL);
546         }
547
548         if (flags)
549                 lock->l_flags |= flags;
550
551         unlock_res_and_lock(lock);
552         RETURN(lock);
553 }
554
555 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
556 {
557         struct obd_export *exp = lock->l_export?:lock->l_conn_export;
558         /* INODEBITS_INTEROP: If the other side does not support
559          * inodebits, reply with a plain lock descriptor.
560          */
561         if ((lock->l_resource->lr_type == LDLM_IBITS) &&
562             (exp && !(exp->exp_connect_flags & OBD_CONNECT_IBITS))) {
563                 /* Make sure all the right bits are set in this lock we
564                    are going to pass to client */
565                 LASSERTF(lock->l_policy_data.l_inodebits.bits ==
566                          (MDS_INODELOCK_LOOKUP|MDS_INODELOCK_UPDATE),
567                          "Inappropriate inode lock bits during "
568                          "conversion " LPU64 "\n",
569                          lock->l_policy_data.l_inodebits.bits);
570
571                 ldlm_res2desc(lock->l_resource, &desc->l_resource);
572                 desc->l_resource.lr_type = LDLM_PLAIN;
573
574                 /* Convert "new" lock mode to something old client can
575                    understand */
576                 if ((lock->l_req_mode == LCK_CR) ||
577                     (lock->l_req_mode == LCK_CW))
578                         desc->l_req_mode = LCK_PR;
579                 else
580                         desc->l_req_mode = lock->l_req_mode;
581                 if ((lock->l_granted_mode == LCK_CR) ||
582                     (lock->l_granted_mode == LCK_CW)) {
583                         desc->l_granted_mode = LCK_PR;
584                 } else {
585                         /* We never grant PW/EX locks to clients */
586                         LASSERT((lock->l_granted_mode != LCK_PW) &&
587                                 (lock->l_granted_mode != LCK_EX));
588                         desc->l_granted_mode = lock->l_granted_mode;
589                 }
590
591                 /* We do not copy policy here, because there is no
592                    policy for plain locks */
593         } else {
594                 ldlm_res2desc(lock->l_resource, &desc->l_resource);
595                 desc->l_req_mode = lock->l_req_mode;
596                 desc->l_granted_mode = lock->l_granted_mode;
597                 ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
598                                             &lock->l_policy_data,
599                                             &desc->l_policy_data);
600         }
601 }
602
603 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
604                            cfs_list_t *work_list)
605 {
606         if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
607                 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
608                 lock->l_flags |= LDLM_FL_AST_SENT;
609                 /* If the enqueuing client said so, tell the AST recipient to
610                  * discard dirty data, rather than writing back. */
611                 if (new->l_flags & LDLM_AST_DISCARD_DATA)
612                         lock->l_flags |= LDLM_FL_DISCARD_DATA;
613                 LASSERT(cfs_list_empty(&lock->l_bl_ast));
614                 cfs_list_add(&lock->l_bl_ast, work_list);
615                 LDLM_LOCK_GET(lock);
616                 LASSERT(lock->l_blocking_lock == NULL);
617                 lock->l_blocking_lock = LDLM_LOCK_GET(new);
618         }
619 }
620
621 void ldlm_add_cp_work_item(struct ldlm_lock *lock, cfs_list_t *work_list)
622 {
623         if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
624                 lock->l_flags |= LDLM_FL_CP_REQD;
625                 LDLM_DEBUG(lock, "lock granted; sending completion AST.");
626                 LASSERT(cfs_list_empty(&lock->l_cp_ast));
627                 cfs_list_add(&lock->l_cp_ast, work_list);
628                 LDLM_LOCK_GET(lock);
629         }
630 }
631
632 /* must be called with lr_lock held */
633 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
634                             cfs_list_t *work_list)
635 {
636         ENTRY;
637         check_res_locked(lock->l_resource);
638         if (new)
639                 ldlm_add_bl_work_item(lock, new, work_list);
640         else
641                 ldlm_add_cp_work_item(lock, work_list);
642         EXIT;
643 }
644
645 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
646 {
647         struct ldlm_lock *lock;
648
649         lock = ldlm_handle2lock(lockh);
650         LASSERT(lock != NULL);
651         ldlm_lock_addref_internal(lock, mode);
652         LDLM_LOCK_PUT(lock);
653 }
654
655 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
656 {
657         ldlm_lock_remove_from_lru(lock);
658         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
659                 lock->l_readers++;
660                 lu_ref_add_atomic(&lock->l_reference, "reader", lock);
661         }
662         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
663                 lock->l_writers++;
664                 lu_ref_add_atomic(&lock->l_reference, "writer", lock);
665         }
666         LDLM_LOCK_GET(lock);
667         lu_ref_add_atomic(&lock->l_reference, "user", lock);
668         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
669 }
670
671 /**
672  * Attempts to addref a lock, and fails if lock is already LDLM_FL_CBPENDING
673  * or destroyed.
674  *
675  * \retval 0 success, lock was addref-ed
676  *
677  * \retval -EAGAIN lock is being canceled.
678  */
679 int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
680 {
681         struct ldlm_lock *lock;
682         int               result;
683
684         result = -EAGAIN;
685         lock = ldlm_handle2lock(lockh);
686         if (lock != NULL) {
687                 lock_res_and_lock(lock);
688                 if (lock->l_readers != 0 || lock->l_writers != 0 ||
689                     !(lock->l_flags & LDLM_FL_CBPENDING)) {
690                         ldlm_lock_addref_internal_nolock(lock, mode);
691                         result = 0;
692                 }
693                 unlock_res_and_lock(lock);
694                 LDLM_LOCK_PUT(lock);
695         }
696         return result;
697 }
698
699 /* only called for local locks */
700 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
701 {
702         lock_res_and_lock(lock);
703         ldlm_lock_addref_internal_nolock(lock, mode);
704         unlock_res_and_lock(lock);
705 }
706
707 /* only called in ldlm_flock_destroy and for local locks.
708  *  * for LDLM_FLOCK type locks, l_blocking_ast is null, and
709  *   * ldlm_lock_remove_from_lru() does nothing, it is safe
710  *    * for ldlm_flock_destroy usage by dropping some code */
711 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
712 {
713         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
714         if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
715                 LASSERT(lock->l_readers > 0);
716                 lu_ref_del(&lock->l_reference, "reader", lock);
717                 lock->l_readers--;
718         }
719         if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
720                 LASSERT(lock->l_writers > 0);
721                 lu_ref_del(&lock->l_reference, "writer", lock);
722                 lock->l_writers--;
723         }
724
725         lu_ref_del(&lock->l_reference, "user", lock);
726         LDLM_LOCK_RELEASE(lock);    /* matches the LDLM_LOCK_GET() in addref */
727 }
728
729 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
730 {
731         struct ldlm_namespace *ns;
732         ENTRY;
733
734         lock_res_and_lock(lock);
735
736         ns = ldlm_lock_to_ns(lock);
737
738         ldlm_lock_decref_internal_nolock(lock, mode);
739
740         if (lock->l_flags & LDLM_FL_LOCAL &&
741             !lock->l_readers && !lock->l_writers) {
742                 /* If this is a local lock on a server namespace and this was
743                  * the last reference, cancel the lock. */
744                 CDEBUG(D_INFO, "forcing cancel of local lock\n");
745                 lock->l_flags |= LDLM_FL_CBPENDING;
746         }
747
748         if (!lock->l_readers && !lock->l_writers &&
749             (lock->l_flags & LDLM_FL_CBPENDING)) {
750                 /* If we received a blocked AST and this was the last reference,
751                  * run the callback. */
752                 if (lock->l_ns_srv && lock->l_export)
753                         CERROR("FL_CBPENDING set on non-local lock--just a "
754                                "warning\n");
755
756                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
757
758                 LDLM_LOCK_GET(lock); /* dropped by bl thread */
759                 ldlm_lock_remove_from_lru(lock);
760                 unlock_res_and_lock(lock);
761
762                 if (lock->l_flags & LDLM_FL_FAIL_LOC)
763                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
764
765                 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
766                     ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
767                         ldlm_handle_bl_callback(ns, NULL, lock);
768         } else if (ns_is_client(ns) &&
769                    !lock->l_readers && !lock->l_writers &&
770                    !(lock->l_flags & LDLM_FL_BL_AST)) {
771                 /* If this is a client-side namespace and this was the last
772                  * reference, put it on the LRU. */
773                 ldlm_lock_add_to_lru(lock);
774                 unlock_res_and_lock(lock);
775
776                 if (lock->l_flags & LDLM_FL_FAIL_LOC)
777                         OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
778
779                 /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
780                  * are not supported by the server, otherwise, it is done on
781                  * enqueue. */
782                 if (!exp_connect_cancelset(lock->l_conn_export) &&
783                     !ns_connect_lru_resize(ns))
784                         ldlm_cancel_lru(ns, 0, LDLM_ASYNC, 0);
785         } else {
786                 unlock_res_and_lock(lock);
787         }
788
789         EXIT;
790 }
791
792 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
793 {
794         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
795         LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
796         ldlm_lock_decref_internal(lock, mode);
797         LDLM_LOCK_PUT(lock);
798 }
799
800 /* This will drop a lock reference and mark it for destruction, but will not
801  * necessarily cancel the lock before returning. */
802 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
803 {
804         struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
805         ENTRY;
806
807         LASSERT(lock != NULL);
808
809         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
810         lock_res_and_lock(lock);
811         lock->l_flags |= LDLM_FL_CBPENDING;
812         unlock_res_and_lock(lock);
813         ldlm_lock_decref_internal(lock, mode);
814         LDLM_LOCK_PUT(lock);
815 }
816
817 struct sl_insert_point {
818         cfs_list_t *res_link;
819         cfs_list_t *mode_link;
820         cfs_list_t *policy_link;
821 };
822
823 /*
824  * search_granted_lock
825  *
826  * Description:
827  *      Finds a position to insert the new lock.
828  * Parameters:
829  *      queue [input]:  the granted list where search acts on;
830  *      req [input]:    the lock whose position to be located;
831  *      prev [output]:  positions within 3 lists to insert @req to
832  * Return Value:
833  *      filled @prev
834  * NOTE: called by
835  *  - ldlm_grant_lock_with_skiplist
836  */
837 static void search_granted_lock(cfs_list_t *queue,
838                                 struct ldlm_lock *req,
839                                 struct sl_insert_point *prev)
840 {
841         cfs_list_t *tmp;
842         struct ldlm_lock *lock, *mode_end, *policy_end;
843         ENTRY;
844
845         cfs_list_for_each(tmp, queue) {
846                 lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
847
848                 mode_end = cfs_list_entry(lock->l_sl_mode.prev,
849                                           struct ldlm_lock, l_sl_mode);
850
851                 if (lock->l_req_mode != req->l_req_mode) {
852                         /* jump to last lock of mode group */
853                         tmp = &mode_end->l_res_link;
854                         continue;
855                 }
856
857                 /* suitable mode group is found */
858                 if (lock->l_resource->lr_type == LDLM_PLAIN) {
859                         /* insert point is last lock of the mode group */
860                         prev->res_link = &mode_end->l_res_link;
861                         prev->mode_link = &mode_end->l_sl_mode;
862                         prev->policy_link = &req->l_sl_policy;
863                         EXIT;
864                         return;
865                 } else if (lock->l_resource->lr_type == LDLM_IBITS) {
866                         for (;;) {
867                                 policy_end =
868                                         cfs_list_entry(lock->l_sl_policy.prev,
869                                                        struct ldlm_lock,
870                                                        l_sl_policy);
871
872                                 if (lock->l_policy_data.l_inodebits.bits ==
873                                     req->l_policy_data.l_inodebits.bits) {
874                                         /* insert point is last lock of
875                                          * the policy group */
876                                         prev->res_link =
877                                                 &policy_end->l_res_link;
878                                         prev->mode_link =
879                                                 &policy_end->l_sl_mode;
880                                         prev->policy_link =
881                                                 &policy_end->l_sl_policy;
882                                         EXIT;
883                                         return;
884                                 }
885
886                                 if (policy_end == mode_end)
887                                         /* done with mode group */
888                                         break;
889
890                                 /* go to next policy group within mode group */
891                                 tmp = policy_end->l_res_link.next;
892                                 lock = cfs_list_entry(tmp, struct ldlm_lock,
893                                                       l_res_link);
894                         }  /* loop over policy groups within the mode group */
895
896                         /* insert point is last lock of the mode group,
897                          * new policy group is started */
898                         prev->res_link = &mode_end->l_res_link;
899                         prev->mode_link = &mode_end->l_sl_mode;
900                         prev->policy_link = &req->l_sl_policy;
901                         EXIT;
902                         return;
903                 } else {
904                         LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock");
905                         LBUG();
906                 }
907         }
908
909         /* insert point is last lock on the queue,
910          * new mode group and new policy group are started */
911         prev->res_link = queue->prev;
912         prev->mode_link = &req->l_sl_mode;
913         prev->policy_link = &req->l_sl_policy;
914         EXIT;
915         return;
916 }
917
918 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
919                                        struct sl_insert_point *prev)
920 {
921         struct ldlm_resource *res = lock->l_resource;
922         ENTRY;
923
924         check_res_locked(res);
925
926         ldlm_resource_dump(D_INFO, res);
927         CDEBUG(D_OTHER, "About to add this lock:\n");
928         ldlm_lock_dump(D_OTHER, lock, 0);
929
930         if (lock->l_destroyed) {
931                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
932                 return;
933         }
934
935         LASSERT(cfs_list_empty(&lock->l_res_link));
936         LASSERT(cfs_list_empty(&lock->l_sl_mode));
937         LASSERT(cfs_list_empty(&lock->l_sl_policy));
938
939         cfs_list_add(&lock->l_res_link, prev->res_link);
940         cfs_list_add(&lock->l_sl_mode, prev->mode_link);
941         cfs_list_add(&lock->l_sl_policy, prev->policy_link);
942
943         EXIT;
944 }
945
946 static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
947 {
948         struct sl_insert_point prev;
949         ENTRY;
950
951         LASSERT(lock->l_req_mode == lock->l_granted_mode);
952
953         search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
954         ldlm_granted_list_add_lock(lock, &prev);
955         EXIT;
956 }
957
958 /* NOTE: called by
959  *  - ldlm_lock_enqueue
960  *  - ldlm_reprocess_queue
961  *  - ldlm_lock_convert
962  *
963  * must be called with lr_lock held
964  */
965 void ldlm_grant_lock(struct ldlm_lock *lock, cfs_list_t *work_list)
966 {
967         struct ldlm_resource *res = lock->l_resource;
968         ENTRY;
969
970         check_res_locked(res);
971
972         lock->l_granted_mode = lock->l_req_mode;
973         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
974                 ldlm_grant_lock_with_skiplist(lock);
975         else if (res->lr_type == LDLM_EXTENT)
976                 ldlm_extent_add_lock(res, lock);
977         else
978                 ldlm_resource_add_lock(res, &res->lr_granted, lock);
979
980         if (lock->l_granted_mode < res->lr_most_restr)
981                 res->lr_most_restr = lock->l_granted_mode;
982
983         if (work_list && lock->l_completion_ast != NULL)
984                 ldlm_add_ast_work_item(lock, NULL, work_list);
985
986         ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
987         EXIT;
988 }
989
990 /* returns a referenced lock or NULL.  See the flag descriptions below, in the
991  * comment above ldlm_lock_match */
992 static struct ldlm_lock *search_queue(cfs_list_t *queue,
993                                       ldlm_mode_t *mode,
994                                       ldlm_policy_data_t *policy,
995                                       struct ldlm_lock *old_lock,
996                                       int flags, int unref)
997 {
998         struct ldlm_lock *lock;
999         cfs_list_t       *tmp;
1000
1001         cfs_list_for_each(tmp, queue) {
1002                 ldlm_mode_t match;
1003
1004                 lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
1005
1006                 if (lock == old_lock)
1007                         break;
1008
1009                 /* llite sometimes wants to match locks that will be
1010                  * canceled when their users drop, but we allow it to match
1011                  * if it passes in CBPENDING and the lock still has users.
1012                  * this is generally only going to be used by children
1013                  * whose parents already hold a lock so forward progress
1014                  * can still happen. */
1015                 if (lock->l_flags & LDLM_FL_CBPENDING &&
1016                     !(flags & LDLM_FL_CBPENDING))
1017                         continue;
1018                 if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
1019                     lock->l_readers == 0 && lock->l_writers == 0)
1020                         continue;
1021
1022                 if (!(lock->l_req_mode & *mode))
1023                         continue;
1024                 match = lock->l_req_mode;
1025
1026                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
1027                     (lock->l_policy_data.l_extent.start >
1028                      policy->l_extent.start ||
1029                      lock->l_policy_data.l_extent.end < policy->l_extent.end))
1030                         continue;
1031
1032                 if (unlikely(match == LCK_GROUP) &&
1033                     lock->l_resource->lr_type == LDLM_EXTENT &&
1034                     lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
1035                         continue;
1036
1037                 /* We match if we have existing lock with same or wider set
1038                    of bits. */
1039                 if (lock->l_resource->lr_type == LDLM_IBITS &&
1040                      ((lock->l_policy_data.l_inodebits.bits &
1041                       policy->l_inodebits.bits) !=
1042                       policy->l_inodebits.bits))
1043                         continue;
1044
1045                 if (!unref &&
1046                     (lock->l_destroyed || (lock->l_flags & LDLM_FL_FAILED)))
1047                         continue;
1048
1049                 if ((flags & LDLM_FL_LOCAL_ONLY) &&
1050                     !(lock->l_flags & LDLM_FL_LOCAL))
1051                         continue;
1052
1053                 if (flags & LDLM_FL_TEST_LOCK) {
1054                         LDLM_LOCK_GET(lock);
1055                         ldlm_lock_touch_in_lru(lock);
1056                 } else {
1057                         ldlm_lock_addref_internal_nolock(lock, match);
1058                 }
1059                 *mode = match;
1060                 return lock;
1061         }
1062
1063         return NULL;
1064 }
1065
1066 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
1067 {
1068         lock->l_flags |= LDLM_FL_LVB_READY;
1069         cfs_waitq_signal(&lock->l_waitq);
1070 }
1071
1072 void ldlm_lock_allow_match(struct ldlm_lock *lock)
1073 {
1074         lock_res_and_lock(lock);
1075         ldlm_lock_allow_match_locked(lock);
1076         unlock_res_and_lock(lock);
1077 }
1078
1079 /* Can be called in two ways:
1080  *
1081  * If 'ns' is NULL, then lockh describes an existing lock that we want to look
1082  * for a duplicate of.
1083  *
1084  * Otherwise, all of the fields must be filled in, to match against.
1085  *
1086  * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
1087  *     server (ie, connh is NULL)
1088  * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
1089  *     list will be considered
1090  * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
1091  *     to be canceled can still be matched as long as they still have reader
1092  *     or writer refernces
1093  * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1094  *     just tell us if we would have matched.
1095  *
1096  * Returns 1 if it finds an already-existing lock that is compatible; in this
1097  * case, lockh is filled in with a addref()ed lock
1098  *
1099  * we also check security context, if that failed we simply return 0 (to keep
1100  * caller code unchanged), the context failure will be discovered by caller
1101  * sometime later.
1102  */
1103 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
1104                             const struct ldlm_res_id *res_id, ldlm_type_t type,
1105                             ldlm_policy_data_t *policy, ldlm_mode_t mode,
1106                             struct lustre_handle *lockh, int unref)
1107 {
1108         struct ldlm_resource *res;
1109         struct ldlm_lock *lock, *old_lock = NULL;
1110         int rc = 0;
1111         ENTRY;
1112
1113         if (ns == NULL) {
1114                 old_lock = ldlm_handle2lock(lockh);
1115                 LASSERT(old_lock);
1116
1117                 ns = ldlm_lock_to_ns(old_lock);
1118                 res_id = &old_lock->l_resource->lr_name;
1119                 type = old_lock->l_resource->lr_type;
1120                 mode = old_lock->l_req_mode;
1121         }
1122
1123         res = ldlm_resource_get(ns, NULL, res_id, type, 0);
1124         if (res == NULL) {
1125                 LASSERT(old_lock == NULL);
1126                 RETURN(0);
1127         }
1128
1129         LDLM_RESOURCE_ADDREF(res);
1130         lock_res(res);
1131
1132         lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
1133                             flags, unref);
1134         if (lock != NULL)
1135                 GOTO(out, rc = 1);
1136         if (flags & LDLM_FL_BLOCK_GRANTED)
1137                 GOTO(out, rc = 0);
1138         lock = search_queue(&res->lr_converting, &mode, policy, old_lock,
1139                             flags, unref);
1140         if (lock != NULL)
1141                 GOTO(out, rc = 1);
1142         lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
1143                             flags, unref);
1144         if (lock != NULL)
1145                 GOTO(out, rc = 1);
1146
1147         EXIT;
1148  out:
1149         unlock_res(res);
1150         LDLM_RESOURCE_DELREF(res);
1151         ldlm_resource_putref(res);
1152
1153         if (lock) {
1154                 ldlm_lock2handle(lock, lockh);
1155                 if ((flags & LDLM_FL_LVB_READY) &&
1156                     (!(lock->l_flags & LDLM_FL_LVB_READY))) {
1157                         struct l_wait_info lwi;
1158                         if (lock->l_completion_ast) {
1159                                 int err = lock->l_completion_ast(lock,
1160                                                           LDLM_FL_WAIT_NOREPROC,
1161                                                                  NULL);
1162                                 if (err) {
1163                                         if (flags & LDLM_FL_TEST_LOCK)
1164                                                 LDLM_LOCK_RELEASE(lock);
1165                                         else
1166                                                 ldlm_lock_decref_internal(lock,
1167                                                                           mode);
1168                                         rc = 0;
1169                                         goto out2;
1170                                 }
1171                         }
1172
1173                         lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
1174                                                NULL, LWI_ON_SIGNAL_NOOP, NULL);
1175
1176                         /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
1177                         l_wait_event(lock->l_waitq,
1178                                      (lock->l_flags & LDLM_FL_LVB_READY), &lwi);
1179                 }
1180         }
1181  out2:
1182         if (rc) {
1183                 LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
1184                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1185                                 res_id->name[2] : policy->l_extent.start,
1186                            (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1187                                 res_id->name[3] : policy->l_extent.end);
1188
1189                 /* check user's security context */
1190                 if (lock->l_conn_export &&
1191                     sptlrpc_import_check_ctx(
1192                                 class_exp2cliimp(lock->l_conn_export))) {
1193                         if (!(flags & LDLM_FL_TEST_LOCK))
1194                                 ldlm_lock_decref_internal(lock, mode);
1195                         rc = 0;
1196                 }
1197
1198                 if (flags & LDLM_FL_TEST_LOCK)
1199                         LDLM_LOCK_RELEASE(lock);
1200
1201         } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
1202                 LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
1203                                   LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
1204                                   type, mode, res_id->name[0], res_id->name[1],
1205                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1206                                         res_id->name[2] :policy->l_extent.start,
1207                                   (type == LDLM_PLAIN || type == LDLM_IBITS) ?
1208                                         res_id->name[3] : policy->l_extent.end);
1209         }
1210         if (old_lock)
1211                 LDLM_LOCK_PUT(old_lock);
1212
1213         return rc ? mode : 0;
1214 }
1215
1216 /* Returns a referenced lock */
1217 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
1218                                    const struct ldlm_res_id *res_id,
1219                                    ldlm_type_t type,
1220                                    ldlm_mode_t mode,
1221                                    const struct ldlm_callback_suite *cbs,
1222                                    void *data, __u32 lvb_len)
1223 {
1224         struct ldlm_lock *lock;
1225         struct ldlm_resource *res;
1226         ENTRY;
1227
1228         res = ldlm_resource_get(ns, NULL, res_id, type, 1);
1229         if (res == NULL)
1230                 RETURN(NULL);
1231
1232         lock = ldlm_lock_new(res);
1233
1234         if (lock == NULL)
1235                 RETURN(NULL);
1236
1237         lock->l_req_mode = mode;
1238         lock->l_ast_data = data;
1239         lock->l_pid = cfs_curproc_pid();
1240         lock->l_ns_srv = ns_is_server(ns);
1241         if (cbs) {
1242                 lock->l_blocking_ast = cbs->lcs_blocking;
1243                 lock->l_completion_ast = cbs->lcs_completion;
1244                 lock->l_glimpse_ast = cbs->lcs_glimpse;
1245                 lock->l_weigh_ast = cbs->lcs_weigh;
1246         }
1247
1248         lock->l_tree_node = NULL;
1249         /* if this is the extent lock, allocate the interval tree node */
1250         if (type == LDLM_EXTENT) {
1251                 if (ldlm_interval_alloc(lock) == NULL)
1252                         GOTO(out, 0);
1253         }
1254
1255         if (lvb_len) {
1256                 lock->l_lvb_len = lvb_len;
1257                 OBD_ALLOC(lock->l_lvb_data, lvb_len);
1258                 if (lock->l_lvb_data == NULL)
1259                         GOTO(out, 0);
1260         }
1261
1262         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
1263                 GOTO(out, 0);
1264
1265         RETURN(lock);
1266
1267 out:
1268         ldlm_lock_destroy(lock);
1269         LDLM_LOCK_RELEASE(lock);
1270         return NULL;
1271 }
1272
1273 ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
1274                                struct ldlm_lock **lockp,
1275                                void *cookie, int *flags)
1276 {
1277         struct ldlm_lock *lock = *lockp;
1278         struct ldlm_resource *res = lock->l_resource;
1279         int local = ns_is_client(ldlm_res_to_ns(res));
1280         ldlm_processing_policy policy;
1281         ldlm_error_t rc = ELDLM_OK;
1282         struct ldlm_interval *node = NULL;
1283         ENTRY;
1284
1285         lock->l_last_activity = cfs_time_current_sec();
1286         /* policies are not executed on the client or during replay */
1287         if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
1288             && !local && ns->ns_policy) {
1289                 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
1290                                    NULL);
1291                 if (rc == ELDLM_LOCK_REPLACED) {
1292                         /* The lock that was returned has already been granted,
1293                          * and placed into lockp.  If it's not the same as the
1294                          * one we passed in, then destroy the old one and our
1295                          * work here is done. */
1296                         if (lock != *lockp) {
1297                                 ldlm_lock_destroy(lock);
1298                                 LDLM_LOCK_RELEASE(lock);
1299                         }
1300                         *flags |= LDLM_FL_LOCK_CHANGED;
1301                         RETURN(0);
1302                 } else if (rc != ELDLM_OK ||
1303                            (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
1304                         ldlm_lock_destroy(lock);
1305                         RETURN(rc);
1306                 }
1307         }
1308
1309         /* For a replaying lock, it might be already in granted list. So
1310          * unlinking the lock will cause the interval node to be freed, we
1311          * have to allocate the interval node early otherwise we can't regrant
1312          * this lock in the future. - jay */
1313         if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
1314                 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
1315
1316         lock_res_and_lock(lock);
1317         if (local && lock->l_req_mode == lock->l_granted_mode) {
1318                 /* The server returned a blocked lock, but it was granted
1319                  * before we got a chance to actually enqueue it.  We don't
1320                  * need to do anything else. */
1321                 *flags &= ~(LDLM_FL_BLOCK_GRANTED |
1322                             LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
1323                 GOTO(out, ELDLM_OK);
1324         }
1325
1326         ldlm_resource_unlink_lock(lock);
1327         if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
1328                 if (node == NULL) {
1329                         ldlm_lock_destroy_nolock(lock);
1330                         GOTO(out, rc = -ENOMEM);
1331                 }
1332
1333                 CFS_INIT_LIST_HEAD(&node->li_group);
1334                 ldlm_interval_attach(node, lock);
1335                 node = NULL;
1336         }
1337
1338         /* Some flags from the enqueue want to make it into the AST, via the
1339          * lock's l_flags. */
1340         lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
1341
1342         /* This distinction between local lock trees is very important; a client
1343          * namespace only has information about locks taken by that client, and
1344          * thus doesn't have enough information to decide for itself if it can
1345          * be granted (below).  In this case, we do exactly what the server
1346          * tells us to do, as dictated by the 'flags'.
1347          *
1348          * We do exactly the same thing during recovery, when the server is
1349          * more or less trusting the clients not to lie.
1350          *
1351          * FIXME (bug 268): Detect obvious lies by checking compatibility in
1352          * granted/converting queues. */
1353         if (local) {
1354                 if (*flags & LDLM_FL_BLOCK_CONV)
1355                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1356                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
1357                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1358                 else
1359                         ldlm_grant_lock(lock, NULL);
1360                 GOTO(out, ELDLM_OK);
1361         } else if (*flags & LDLM_FL_REPLAY) {
1362                 if (*flags & LDLM_FL_BLOCK_CONV) {
1363                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1364                         GOTO(out, ELDLM_OK);
1365                 } else if (*flags & LDLM_FL_BLOCK_WAIT) {
1366                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
1367                         GOTO(out, ELDLM_OK);
1368                 } else if (*flags & LDLM_FL_BLOCK_GRANTED) {
1369                         ldlm_grant_lock(lock, NULL);
1370                         GOTO(out, ELDLM_OK);
1371                 }
1372                 /* If no flags, fall through to normal enqueue path. */
1373         }
1374
1375         policy = ldlm_processing_policy_table[res->lr_type];
1376         policy(lock, flags, 1, &rc, NULL);
1377         GOTO(out, rc);
1378 out:
1379         unlock_res_and_lock(lock);
1380         if (node)
1381                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1382         return rc;
1383 }
1384
1385 /* Must be called with namespace taken: queue is waiting or converting. */
1386 int ldlm_reprocess_queue(struct ldlm_resource *res, cfs_list_t *queue,
1387                          cfs_list_t *work_list)
1388 {
1389         cfs_list_t *tmp, *pos;
1390         ldlm_processing_policy policy;
1391         int flags;
1392         int rc = LDLM_ITER_CONTINUE;
1393         ldlm_error_t err;
1394         ENTRY;
1395
1396         check_res_locked(res);
1397
1398         policy = ldlm_processing_policy_table[res->lr_type];
1399         LASSERT(policy);
1400
1401         cfs_list_for_each_safe(tmp, pos, queue) {
1402                 struct ldlm_lock *pending;
1403                 pending = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
1404
1405                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
1406
1407                 flags = 0;
1408                 rc = policy(pending, &flags, 0, &err, work_list);
1409                 if (rc != LDLM_ITER_CONTINUE)
1410                         break;
1411         }
1412
1413         RETURN(rc);
1414 }
1415
1416 /* Helper function for ldlm_run_ast_work().
1417  *
1418  * Send an existing rpc set specified by @arg->set and then
1419  * destroy it. Create new one if @do_create flag is set. */
1420 static void
1421 ldlm_send_and_maybe_create_set(struct ldlm_cb_set_arg *arg, int do_create)
1422 {
1423         ENTRY;
1424
1425         ptlrpc_set_wait(arg->set);
1426         if (arg->type == LDLM_BL_CALLBACK)
1427                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2);
1428         ptlrpc_set_destroy(arg->set);
1429
1430         if (do_create)
1431                 arg->set = ptlrpc_prep_set();
1432
1433         EXIT;
1434 }
1435
1436 static int
1437 ldlm_work_bl_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1438 {
1439         struct ldlm_lock_desc d;
1440         struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
1441                                                 l_bl_ast);
1442         ENTRY;
1443
1444         /* nobody should touch l_bl_ast */
1445         lock_res_and_lock(lock);
1446         cfs_list_del_init(&lock->l_bl_ast);
1447
1448         LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
1449         LASSERT(lock->l_bl_ast_run == 0);
1450         LASSERT(lock->l_blocking_lock);
1451         lock->l_bl_ast_run++;
1452         unlock_res_and_lock(lock);
1453
1454         ldlm_lock2desc(lock->l_blocking_lock, &d);
1455
1456         lock->l_blocking_ast(lock, &d, (void *)arg,
1457                              LDLM_CB_BLOCKING);
1458         LDLM_LOCK_RELEASE(lock->l_blocking_lock);
1459         lock->l_blocking_lock = NULL;
1460         LDLM_LOCK_RELEASE(lock);
1461
1462         RETURN(1);
1463 }
1464
1465 static int
1466 ldlm_work_cp_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1467 {
1468         struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock, l_cp_ast);
1469         ldlm_completion_callback completion_callback;
1470         int rc = 0;
1471         ENTRY;
1472
1473         /* It's possible to receive a completion AST before we've set
1474          * the l_completion_ast pointer: either because the AST arrived
1475          * before the reply, or simply because there's a small race
1476          * window between receiving the reply and finishing the local
1477          * enqueue. (bug 842)
1478          *
1479          * This can't happen with the blocking_ast, however, because we
1480          * will never call the local blocking_ast until we drop our
1481          * reader/writer reference, which we won't do until we get the
1482          * reply and finish enqueueing. */
1483
1484         /* nobody should touch l_cp_ast */
1485         lock_res_and_lock(lock);
1486         cfs_list_del_init(&lock->l_cp_ast);
1487         LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1488         /* save l_completion_ast since it can be changed by
1489          * mds_intent_policy(), see bug 14225 */
1490         completion_callback = lock->l_completion_ast;
1491         lock->l_flags &= ~LDLM_FL_CP_REQD;
1492         unlock_res_and_lock(lock);
1493
1494         if (completion_callback != NULL) {
1495                 completion_callback(lock, 0, (void *)arg);
1496                 rc = 1;
1497         }
1498         LDLM_LOCK_RELEASE(lock);
1499
1500         RETURN(rc);
1501 }
1502
1503 static int
1504 ldlm_work_revoke_ast_lock(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg)
1505 {
1506         struct ldlm_lock_desc desc;
1507         struct ldlm_lock *lock = cfs_list_entry(tmp, struct ldlm_lock,
1508                                                 l_rk_ast);
1509         ENTRY;
1510
1511         cfs_list_del_init(&lock->l_rk_ast);
1512
1513         /* the desc just pretend to exclusive */
1514         ldlm_lock2desc(lock, &desc);
1515         desc.l_req_mode = LCK_EX;
1516         desc.l_granted_mode = 0;
1517
1518         lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
1519         LDLM_LOCK_RELEASE(lock);
1520
1521         RETURN(1);
1522 }
1523
1524 int ldlm_run_ast_work(cfs_list_t *rpc_list, ldlm_desc_ast_t ast_type)
1525 {
1526         struct ldlm_cb_set_arg arg;
1527         cfs_list_t *tmp, *pos;
1528         int (*work_ast_lock)(cfs_list_t *tmp, struct ldlm_cb_set_arg *arg);
1529         int ast_count;
1530         ENTRY;
1531
1532         if (cfs_list_empty(rpc_list))
1533                 RETURN(0);
1534
1535         arg.set = ptlrpc_prep_set();
1536         if (NULL == arg.set)
1537                 RETURN(-ERESTART);
1538         cfs_atomic_set(&arg.restart, 0);
1539         switch (ast_type) {
1540         case LDLM_WORK_BL_AST:
1541                 arg.type = LDLM_BL_CALLBACK;
1542                 work_ast_lock = ldlm_work_bl_ast_lock;
1543                 break;
1544         case LDLM_WORK_CP_AST:
1545                 arg.type = LDLM_CP_CALLBACK;
1546                 work_ast_lock = ldlm_work_cp_ast_lock;
1547                 break;
1548         case LDLM_WORK_REVOKE_AST:
1549                 arg.type = LDLM_BL_CALLBACK;
1550                 work_ast_lock = ldlm_work_revoke_ast_lock;
1551                 break;
1552         default:
1553                 LBUG();
1554         }
1555
1556         ast_count = 0;
1557         cfs_list_for_each_safe(tmp, pos, rpc_list) {
1558                 ast_count += work_ast_lock(tmp, &arg);
1559
1560                 /* Send the request set if it exceeds the PARALLEL_AST_LIMIT,
1561                  * and create a new set for requests that remained in
1562                  * @rpc_list */
1563                 if (unlikely(ast_count == PARALLEL_AST_LIMIT)) {
1564                         ldlm_send_and_maybe_create_set(&arg, 1);
1565                         ast_count = 0;
1566                 }
1567         }
1568
1569         if (ast_count > 0)
1570                 ldlm_send_and_maybe_create_set(&arg, 0);
1571         else
1572                 /* In case when number of ASTs is multiply of
1573                  * PARALLEL_AST_LIMIT or @rpc_list was initially empty,
1574                  * @arg.set must be destroyed here, otherwise we get
1575                  * write memory leaking. */
1576                 ptlrpc_set_destroy(arg.set);
1577
1578         RETURN(cfs_atomic_read(&arg.restart) ? -ERESTART : 0);
1579 }
1580
1581 static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
1582 {
1583         ldlm_reprocess_all(res);
1584         return LDLM_ITER_CONTINUE;
1585 }
1586
1587 static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1588                               cfs_hlist_node_t *hnode, void *arg)
1589 {
1590         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1591         int    rc;
1592
1593         rc = reprocess_one_queue(res, arg);
1594
1595         return rc == LDLM_ITER_STOP;
1596 }
1597
1598 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
1599 {
1600         ENTRY;
1601
1602         if (ns != NULL) {
1603                 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1604                                          ldlm_reprocess_res, NULL);
1605         }
1606         EXIT;
1607 }
1608
1609 void ldlm_reprocess_all(struct ldlm_resource *res)
1610 {
1611         CFS_LIST_HEAD(rpc_list);
1612         int rc;
1613         ENTRY;
1614
1615         /* Local lock trees don't get reprocessed. */
1616         if (ns_is_client(ldlm_res_to_ns(res))) {
1617                 EXIT;
1618                 return;
1619         }
1620
1621  restart:
1622         lock_res(res);
1623         rc = ldlm_reprocess_queue(res, &res->lr_converting, &rpc_list);
1624         if (rc == LDLM_ITER_CONTINUE)
1625                 ldlm_reprocess_queue(res, &res->lr_waiting, &rpc_list);
1626         unlock_res(res);
1627
1628         rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
1629         if (rc == -ERESTART) {
1630                 LASSERT(cfs_list_empty(&rpc_list));
1631                 goto restart;
1632         }
1633         EXIT;
1634 }
1635
1636 void ldlm_cancel_callback(struct ldlm_lock *lock)
1637 {
1638         check_res_locked(lock->l_resource);
1639         if (!(lock->l_flags & LDLM_FL_CANCEL)) {
1640                 lock->l_flags |= LDLM_FL_CANCEL;
1641                 if (lock->l_blocking_ast) {
1642                         // l_check_no_ns_lock(ns);
1643                         unlock_res_and_lock(lock);
1644                         lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
1645                                              LDLM_CB_CANCELING);
1646                         lock_res_and_lock(lock);
1647                 } else {
1648                         LDLM_DEBUG(lock, "no blocking ast");
1649                 }
1650         }
1651         lock->l_flags |= LDLM_FL_BL_DONE;
1652 }
1653
1654 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
1655 {
1656         if (req->l_resource->lr_type != LDLM_PLAIN &&
1657             req->l_resource->lr_type != LDLM_IBITS)
1658                 return;
1659
1660         cfs_list_del_init(&req->l_sl_policy);
1661         cfs_list_del_init(&req->l_sl_mode);
1662 }
1663
1664 void ldlm_lock_cancel(struct ldlm_lock *lock)
1665 {
1666         struct ldlm_resource *res;
1667         struct ldlm_namespace *ns;
1668         ENTRY;
1669
1670         lock_res_and_lock(lock);
1671
1672         res = lock->l_resource;
1673         ns  = ldlm_res_to_ns(res);
1674
1675         /* Please do not, no matter how tempting, remove this LBUG without
1676          * talking to me first. -phik */
1677         if (lock->l_readers || lock->l_writers) {
1678                 LDLM_ERROR(lock, "lock still has references");
1679                 LBUG();
1680         }
1681
1682         ldlm_del_waiting_lock(lock);
1683
1684         /* Releases cancel callback. */
1685         ldlm_cancel_callback(lock);
1686
1687         /* Yes, second time, just in case it was added again while we were
1688            running with no res lock in ldlm_cancel_callback */
1689         ldlm_del_waiting_lock(lock);
1690         ldlm_resource_unlink_lock(lock);
1691         ldlm_lock_destroy_nolock(lock);
1692
1693         if (lock->l_granted_mode == lock->l_req_mode)
1694                 ldlm_pool_del(&ns->ns_pool, lock);
1695
1696         /* Make sure we will not be called again for same lock what is possible
1697          * if not to zero out lock->l_granted_mode */
1698         lock->l_granted_mode = LCK_MINMODE;
1699         unlock_res_and_lock(lock);
1700
1701         EXIT;
1702 }
1703
1704 int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
1705 {
1706         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
1707         ENTRY;
1708
1709         if (lock == NULL)
1710                 RETURN(-EINVAL);
1711
1712         lock->l_ast_data = data;
1713         LDLM_LOCK_PUT(lock);
1714         RETURN(0);
1715 }
1716
1717 int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1718                                     cfs_hlist_node_t *hnode, void *data)
1719
1720 {
1721         struct obd_export    *exp  = data;
1722         struct ldlm_lock     *lock = cfs_hash_object(hs, hnode);
1723         struct ldlm_resource *res;
1724
1725         res = ldlm_resource_getref(lock->l_resource);
1726         LDLM_LOCK_GET(lock);
1727
1728         LDLM_DEBUG(lock, "export %p", exp);
1729         ldlm_res_lvbo_update(res, NULL, 1);
1730         ldlm_lock_cancel(lock);
1731         ldlm_reprocess_all(res);
1732         ldlm_resource_putref(res);
1733         LDLM_LOCK_RELEASE(lock);
1734         return 0;
1735 }
1736
1737 void ldlm_cancel_locks_for_export(struct obd_export *exp)
1738 {
1739         cfs_hash_for_each_empty(exp->exp_lock_hash,
1740                                 ldlm_cancel_locks_for_export_cb, exp);
1741 }
1742
1743 /**
1744  * Downgrade an exclusive lock.
1745  *
1746  * A fast variant of ldlm_lock_convert for convertion of exclusive
1747  * locks. The convertion is always successful.
1748  *
1749  * \param lock A lock to convert
1750  * \param new_mode new lock mode
1751  */
1752 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
1753 {
1754         ENTRY;
1755
1756         LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
1757         LASSERT(new_mode == LCK_COS);
1758
1759         lock_res_and_lock(lock);
1760         ldlm_resource_unlink_lock(lock);
1761         /*
1762          * Remove the lock from pool as it will be added again in
1763          * ldlm_grant_lock() called below.
1764          */
1765         ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
1766
1767         lock->l_req_mode = new_mode;
1768         ldlm_grant_lock(lock, NULL);
1769         unlock_res_and_lock(lock);
1770         ldlm_reprocess_all(lock->l_resource);
1771
1772         EXIT;
1773 }
1774
1775 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1776                                         __u32 *flags)
1777 {
1778         CFS_LIST_HEAD(rpc_list);
1779         struct ldlm_resource *res;
1780         struct ldlm_namespace *ns;
1781         int granted = 0;
1782         int old_mode, rc;
1783         struct sl_insert_point prev;
1784         ldlm_error_t err;
1785         struct ldlm_interval *node;
1786         ENTRY;
1787
1788         if (new_mode == lock->l_granted_mode) { // No changes? Just return.
1789                 *flags |= LDLM_FL_BLOCK_GRANTED;
1790                 RETURN(lock->l_resource);
1791         }
1792
1793         /* I can't check the type of lock here because the bitlock of lock
1794          * is not held here, so do the allocation blindly. -jay */
1795         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
1796         if (node == NULL)  /* Actually, this causes EDEADLOCK to be returned */
1797                 RETURN(NULL);
1798
1799         LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
1800                  "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
1801
1802         lock_res_and_lock(lock);
1803
1804         res = lock->l_resource;
1805         ns  = ldlm_res_to_ns(res);
1806
1807         old_mode = lock->l_req_mode;
1808         lock->l_req_mode = new_mode;
1809         if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
1810                 /* remember the lock position where the lock might be
1811                  * added back to the granted list later and also
1812                  * remember the join mode for skiplist fixing. */
1813                 prev.res_link = lock->l_res_link.prev;
1814                 prev.mode_link = lock->l_sl_mode.prev;
1815                 prev.policy_link = lock->l_sl_policy.prev;
1816                 ldlm_resource_unlink_lock(lock);
1817         } else {
1818                 ldlm_resource_unlink_lock(lock);
1819                 if (res->lr_type == LDLM_EXTENT) {
1820                         /* FIXME: ugly code, I have to attach the lock to a
1821                          * interval node again since perhaps it will be granted
1822                          * soon */
1823                         CFS_INIT_LIST_HEAD(&node->li_group);
1824                         ldlm_interval_attach(node, lock);
1825                         node = NULL;
1826                 }
1827         }
1828
1829         /*
1830          * Remove old lock from the pool before adding the lock with new
1831          * mode below in ->policy()
1832          */
1833         ldlm_pool_del(&ns->ns_pool, lock);
1834
1835         /* If this is a local resource, put it on the appropriate list. */
1836         if (ns_is_client(ldlm_res_to_ns(res))) {
1837                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
1838                         ldlm_resource_add_lock(res, &res->lr_converting, lock);
1839                 } else {
1840                         /* This should never happen, because of the way the
1841                          * server handles conversions. */
1842                         LDLM_ERROR(lock, "Erroneous flags %d on local lock\n",
1843                                    *flags);
1844                         LBUG();
1845
1846                         ldlm_grant_lock(lock, &rpc_list);
1847                         granted = 1;
1848                         /* FIXME: completion handling not with lr_lock held ! */
1849                         if (lock->l_completion_ast)
1850                                 lock->l_completion_ast(lock, 0, NULL);
1851                 }
1852         } else {
1853                 int pflags = 0;
1854                 ldlm_processing_policy policy;
1855                 policy = ldlm_processing_policy_table[res->lr_type];
1856                 rc = policy(lock, &pflags, 0, &err, &rpc_list);
1857                 if (rc == LDLM_ITER_STOP) {
1858                         lock->l_req_mode = old_mode;
1859                         if (res->lr_type == LDLM_EXTENT)
1860                                 ldlm_extent_add_lock(res, lock);
1861                         else
1862                                 ldlm_granted_list_add_lock(lock, &prev);
1863
1864                         res = NULL;
1865                 } else {
1866                         *flags |= LDLM_FL_BLOCK_GRANTED;
1867                         granted = 1;
1868                 }
1869         }
1870         unlock_res_and_lock(lock);
1871
1872         if (granted)
1873                 ldlm_run_ast_work(&rpc_list, LDLM_WORK_CP_AST);
1874         if (node)
1875                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1876         RETURN(res);
1877 }
1878
1879 void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos)
1880 {
1881         struct obd_device *obd = NULL;
1882
1883         if (!((libcfs_debug | D_ERROR) & level))
1884                 return;
1885
1886         if (!lock) {
1887                 CDEBUG(level, "  NULL LDLM lock\n");
1888                 return;
1889         }
1890
1891         CDEBUG(level," -- Lock dump: %p/"LPX64" (rc: %d) (pos: %d) (pid: %d)\n",
1892                lock, lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1893                pos, lock->l_pid);
1894         if (lock->l_conn_export != NULL)
1895                 obd = lock->l_conn_export->exp_obd;
1896         if (lock->l_export && lock->l_export->exp_connection) {
1897                 CDEBUG(level, "  Node: NID %s (rhandle: "LPX64")\n",
1898                      libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid),
1899                      lock->l_remote_handle.cookie);
1900         } else if (obd == NULL) {
1901                 CDEBUG(level, "  Node: local\n");
1902         } else {
1903                 struct obd_import *imp = obd->u.cli.cl_import;
1904                 CDEBUG(level, "  Node: NID %s (rhandle: "LPX64")\n",
1905                        libcfs_nid2str(imp->imp_connection->c_peer.nid),
1906                        lock->l_remote_handle.cookie);
1907         }
1908         CDEBUG(level, "  Resource: %p ("LPU64"/"LPU64"/"LPU64")\n",
1909                   lock->l_resource,
1910                   lock->l_resource->lr_name.name[0],
1911                   lock->l_resource->lr_name.name[1],
1912                   lock->l_resource->lr_name.name[2]);
1913         CDEBUG(level, "  Req mode: %s, grant mode: %s, rc: %u, read: %d, "
1914                "write: %d flags: "LPX64"\n", ldlm_lockname[lock->l_req_mode],
1915                ldlm_lockname[lock->l_granted_mode],
1916                cfs_atomic_read(&lock->l_refc), lock->l_readers, lock->l_writers,
1917                lock->l_flags);
1918         if (lock->l_resource->lr_type == LDLM_EXTENT)
1919                 CDEBUG(level, "  Extent: "LPU64" -> "LPU64
1920                        " (req "LPU64"-"LPU64")\n",
1921                        lock->l_policy_data.l_extent.start,
1922                        lock->l_policy_data.l_extent.end,
1923                        lock->l_req_extent.start, lock->l_req_extent.end);
1924         else if (lock->l_resource->lr_type == LDLM_FLOCK)
1925                 CDEBUG(level, "  Pid: %d Extent: "LPU64" -> "LPU64"\n",
1926                        lock->l_policy_data.l_flock.pid,
1927                        lock->l_policy_data.l_flock.start,
1928                        lock->l_policy_data.l_flock.end);
1929        else if (lock->l_resource->lr_type == LDLM_IBITS)
1930                 CDEBUG(level, "  Bits: "LPX64"\n",
1931                        lock->l_policy_data.l_inodebits.bits);
1932 }
1933
1934 void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
1935 {
1936         struct ldlm_lock *lock;
1937
1938         if (!((libcfs_debug | D_ERROR) & level))
1939                 return;
1940
1941         lock = ldlm_handle2lock(lockh);
1942         if (lock == NULL)
1943                 return;
1944
1945         ldlm_lock_dump(D_OTHER, lock, 0);
1946
1947         LDLM_LOCK_PUT(lock);
1948 }
1949
1950 void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 level,
1951                       struct libcfs_debug_msg_data *data, const char *fmt,
1952                       ...)
1953 {
1954         va_list args;
1955         cfs_debug_limit_state_t *cdls = data->msg_cdls;
1956
1957         va_start(args, fmt);
1958
1959         if (lock->l_resource == NULL) {
1960                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1961                                    data->msg_fn, data->msg_line, fmt, args,
1962                        " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1963                        "res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" remote: "
1964                        LPX64" expref: %d pid: %u timeout: %lu\n", lock,
1965                        lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1966                        lock->l_readers, lock->l_writers,
1967                        ldlm_lockname[lock->l_granted_mode],
1968                        ldlm_lockname[lock->l_req_mode],
1969                        lock->l_flags, lock->l_remote_handle.cookie,
1970                        lock->l_export ?
1971                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
1972                        lock->l_pid, lock->l_callback_timeout);
1973                 va_end(args);
1974                 return;
1975         }
1976
1977         switch (lock->l_resource->lr_type) {
1978         case LDLM_EXTENT:
1979                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
1980                                    data->msg_fn, data->msg_line, fmt, args,
1981                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
1982                        "res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64
1983                        "] (req "LPU64"->"LPU64") flags: "LPX64" remote: "LPX64
1984                        " expref: %d pid: %u timeout %lu\n",
1985                        ldlm_lock_to_ns_name(lock), lock,
1986                        lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
1987                        lock->l_readers, lock->l_writers,
1988                        ldlm_lockname[lock->l_granted_mode],
1989                        ldlm_lockname[lock->l_req_mode],
1990                        lock->l_resource->lr_name.name[0],
1991                        lock->l_resource->lr_name.name[1],
1992                        cfs_atomic_read(&lock->l_resource->lr_refcount),
1993                        ldlm_typename[lock->l_resource->lr_type],
1994                        lock->l_policy_data.l_extent.start,
1995                        lock->l_policy_data.l_extent.end,
1996                        lock->l_req_extent.start, lock->l_req_extent.end,
1997                        lock->l_flags, lock->l_remote_handle.cookie,
1998                        lock->l_export ?
1999                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
2000                        lock->l_pid, lock->l_callback_timeout);
2001                 break;
2002
2003         case LDLM_FLOCK:
2004                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
2005                                    data->msg_fn, data->msg_line, fmt, args,
2006                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2007                        "res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d "
2008                        "["LPU64"->"LPU64"] flags: "LPX64" remote: "LPX64
2009                        " expref: %d pid: %u timeout: %lu\n",
2010                        ldlm_lock_to_ns_name(lock), lock,
2011                        lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
2012                        lock->l_readers, lock->l_writers,
2013                        ldlm_lockname[lock->l_granted_mode],
2014                        ldlm_lockname[lock->l_req_mode],
2015                        lock->l_resource->lr_name.name[0],
2016                        lock->l_resource->lr_name.name[1],
2017                        cfs_atomic_read(&lock->l_resource->lr_refcount),
2018                        ldlm_typename[lock->l_resource->lr_type],
2019                        lock->l_policy_data.l_flock.pid,
2020                        lock->l_policy_data.l_flock.start,
2021                        lock->l_policy_data.l_flock.end,
2022                        lock->l_flags, lock->l_remote_handle.cookie,
2023                        lock->l_export ?
2024                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
2025                        lock->l_pid, lock->l_callback_timeout);
2026                 break;
2027
2028         case LDLM_IBITS:
2029                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
2030                                    data->msg_fn, data->msg_line, fmt, args,
2031                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2032                        "res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s "
2033                        "flags: "LPX64" remote: "LPX64" expref: %d "
2034                        "pid: %u timeout: %lu\n",
2035                        ldlm_lock_to_ns_name(lock),
2036                        lock, lock->l_handle.h_cookie,
2037                        cfs_atomic_read (&lock->l_refc),
2038                        lock->l_readers, lock->l_writers,
2039                        ldlm_lockname[lock->l_granted_mode],
2040                        ldlm_lockname[lock->l_req_mode],
2041                        lock->l_resource->lr_name.name[0],
2042                        lock->l_resource->lr_name.name[1],
2043                        lock->l_policy_data.l_inodebits.bits,
2044                        cfs_atomic_read(&lock->l_resource->lr_refcount),
2045                        ldlm_typename[lock->l_resource->lr_type],
2046                        lock->l_flags, lock->l_remote_handle.cookie,
2047                        lock->l_export ?
2048                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
2049                        lock->l_pid, lock->l_callback_timeout);
2050                 break;
2051
2052         default:
2053                 libcfs_debug_vmsg2(cdls, data->msg_subsys, level,data->msg_file,
2054                                    data->msg_fn, data->msg_line, fmt, args,
2055                        " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
2056                        "res: "LPU64"/"LPU64" rrc: %d type: %s flags: "LPX64" "
2057                        "remote: "LPX64" expref: %d pid: %u timeout %lu\n",
2058                        ldlm_lock_to_ns_name(lock),
2059                        lock, lock->l_handle.h_cookie,
2060                        cfs_atomic_read (&lock->l_refc),
2061                        lock->l_readers, lock->l_writers,
2062                        ldlm_lockname[lock->l_granted_mode],
2063                        ldlm_lockname[lock->l_req_mode],
2064                        lock->l_resource->lr_name.name[0],
2065                        lock->l_resource->lr_name.name[1],
2066                        cfs_atomic_read(&lock->l_resource->lr_refcount),
2067                        ldlm_typename[lock->l_resource->lr_type],
2068                        lock->l_flags, lock->l_remote_handle.cookie,
2069                        lock->l_export ?
2070                        cfs_atomic_read(&lock->l_export->exp_refcount) : -99,
2071                        lock->l_pid, lock->l_callback_timeout);
2072                 break;
2073         }
2074         va_end(args);
2075 }
2076 EXPORT_SYMBOL(_ldlm_lock_debug);