Whamcloud - gitweb
b=605627
[fs/lustre-release.git] / lustre / ldlm / ldlm_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/random.h>
29 #include <linux/lustre_dlm.h>
30 #include <linux/lustre_mds.h>
31 #include <linux/obd_class.h>
32
33 /* lock types */
34 char *ldlm_lockname[] = {
35         [0] "--",
36         [LCK_EX] "EX",
37         [LCK_PW] "PW",
38         [LCK_PR] "PR",
39         [LCK_CW] "CW",
40         [LCK_CR] "CR",
41         [LCK_NL] "NL"
42 };
43 char *ldlm_typename[] = {
44         [LDLM_PLAIN] "PLN",
45         [LDLM_EXTENT] "EXT",
46         [LDLM_MDSINTENT] "INT"
47 };
48
49 char *ldlm_it2str(int it)
50 {
51         switch (it) {
52         case IT_OPEN:
53                 return "open";
54         case IT_CREAT:
55                 return "creat";
56         case (IT_OPEN | IT_CREAT):
57                 return "open|creat";
58         case IT_MKDIR:
59                 return "mkdir";
60         case IT_LINK:
61                 return "link";
62         case IT_LINK2:
63                 return "link2";
64         case IT_SYMLINK:
65                 return "symlink";
66         case IT_UNLINK:
67                 return "unlink";
68         case IT_RMDIR:
69                 return "rmdir";
70         case IT_RENAME:
71                 return "rename";
72         case IT_RENAME2:
73                 return "rename2";
74         case IT_READDIR:
75                 return "readdir";
76         case IT_GETATTR:
77                 return "getattr";
78         case IT_SETATTR:
79                 return "setattr";
80         case IT_READLINK:
81                 return "readlink";
82         case IT_MKNOD:
83                 return "mknod";
84         case IT_LOOKUP:
85                 return "lookup";
86         default:
87                 CERROR("Unknown intent %d\n", it);
88                 return "UNKNOWN";
89         }
90 }
91
92 extern kmem_cache_t *ldlm_lock_slab;
93
94 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b);
95
96 ldlm_res_compat ldlm_res_compat_table[] = {
97         [LDLM_PLAIN] ldlm_plain_compat,
98         [LDLM_EXTENT] ldlm_extent_compat,
99         [LDLM_MDSINTENT] ldlm_plain_compat
100 };
101
102 ldlm_res_policy ldlm_res_policy_table[] = {
103         [LDLM_PLAIN] NULL,
104         [LDLM_EXTENT] ldlm_extent_policy,
105         [LDLM_MDSINTENT] NULL
106 };
107
108 void ldlm_register_intent(int (*arg) (struct ldlm_lock * lock, void *req_cookie,
109                                       ldlm_mode_t mode, void *data))
110 {
111         ldlm_res_policy_table[LDLM_MDSINTENT] = arg;
112 }
113
114 void ldlm_unregister_intent(void)
115 {
116         ldlm_res_policy_table[LDLM_MDSINTENT] = NULL;
117 }
118
119 /*
120  * REFCOUNTED LOCK OBJECTS
121  */
122
123
124 /*
125  * Lock refcounts, during creation:
126  *   - one special one for allocation, dec'd only once in destroy
127  *   - one for being a lock that's in-use
128  *   - one for the addref associated with a new lock
129  */
130 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
131 {
132         l_lock(&lock->l_resource->lr_namespace->ns_lock);
133         lock->l_refc++;
134         ldlm_resource_getref(lock->l_resource);
135         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
136         return lock;
137 }
138
139 void ldlm_lock_put(struct ldlm_lock *lock)
140 {
141         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
142         ENTRY;
143
144         l_lock(&ns->ns_lock);
145         lock->l_refc--;
146         //LDLM_DEBUG(lock, "after refc--");
147         if (lock->l_refc < 0)
148                 LBUG();
149
150         ldlm_resource_put(lock->l_resource);
151         if (lock->l_parent)
152                 LDLM_LOCK_PUT(lock->l_parent);
153
154         if (lock->l_refc == 0 && (lock->l_flags & LDLM_FL_DESTROYED)) {
155                 lock->l_blocking_ast(lock, NULL, lock->l_data,
156                                      lock->l_data_len, LDLM_CB_DYING);
157
158                 spin_lock(&ns->ns_counter_lock);
159                 ns->ns_locks--;
160                 spin_unlock(&ns->ns_counter_lock);
161
162                 lock->l_resource = NULL;
163                 LDLM_DEBUG(lock, "final lock_put on destroyed lock, freeing");
164                 if (lock->l_export && lock->l_export->exp_connection)
165                         ptlrpc_put_connection(lock->l_export->exp_connection);
166                 kmem_cache_free(ldlm_lock_slab, lock);
167                 CDEBUG(D_MALLOC, "kfreed 'lock': %d at %p (tot 0).\n",
168                        sizeof(*lock), lock);
169         }
170         l_unlock(&ns->ns_lock);
171         EXIT;
172 }
173
174 void ldlm_lock_destroy(struct ldlm_lock *lock)
175 {
176         ENTRY;
177         l_lock(&lock->l_resource->lr_namespace->ns_lock);
178
179         if (!list_empty(&lock->l_children)) {
180                 LDLM_DEBUG(lock, "still has children (%p)!",
181                            lock->l_children.next);
182                 ldlm_lock_dump(lock);
183                 LBUG();
184         }
185         if (lock->l_readers || lock->l_writers) {
186                 LDLM_DEBUG(lock, "lock still has references");
187                 ldlm_lock_dump(lock);
188                 LBUG();
189         }
190
191         if (!list_empty(&lock->l_res_link)) {
192                 ldlm_lock_dump(lock);
193                 LBUG();
194         }
195
196         if (lock->l_flags & LDLM_FL_DESTROYED) {
197                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
198                 EXIT;
199                 return;
200         }
201
202         list_del(&lock->l_export_chain);
203         lock->l_export = NULL;
204         lock->l_flags |= LDLM_FL_DESTROYED;
205
206         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
207         LDLM_LOCK_PUT(lock);
208         EXIT;
209 }
210
211 /*
212    usage: pass in a resource on which you have done get
213           pass in a parent lock on which you have done a get
214           do not put the resource or the parent
215    returns: lock with refcount 1
216 */
217 static struct ldlm_lock *ldlm_lock_new(struct ldlm_lock *parent,
218                                        struct ldlm_resource *resource)
219 {
220         struct ldlm_lock *lock;
221         ENTRY;
222
223         if (resource == NULL)
224                 LBUG();
225
226         lock = kmem_cache_alloc(ldlm_lock_slab, SLAB_KERNEL);
227         if (lock == NULL)
228                 RETURN(NULL);
229         CDEBUG(D_MALLOC, "kmalloced 'lock': %d at "
230                "%p (tot %d).\n", sizeof(*lock), lock, 1);
231
232         memset(lock, 0, sizeof(*lock));
233         get_random_bytes(&lock->l_random, sizeof(__u64));
234
235         lock->l_resource = resource;
236         /* this refcount matches the one of the resource passed
237            in which is not being put away */
238         lock->l_refc = 1;
239         INIT_LIST_HEAD(&lock->l_children);
240         INIT_LIST_HEAD(&lock->l_res_link);
241         INIT_LIST_HEAD(&lock->l_export_chain);
242         INIT_LIST_HEAD(&lock->l_pending_chain);
243         init_waitqueue_head(&lock->l_waitq);
244
245         spin_lock(&resource->lr_namespace->ns_counter_lock);
246         resource->lr_namespace->ns_locks++;
247         spin_unlock(&resource->lr_namespace->ns_counter_lock);
248
249         if (parent != NULL) {
250                 l_lock(&parent->l_resource->lr_namespace->ns_lock);
251                 lock->l_parent = parent;
252                 list_add(&lock->l_childof, &parent->l_children);
253                 l_unlock(&parent->l_resource->lr_namespace->ns_lock);
254         }
255         /* this is the extra refcount, to prevent the lock
256            evaporating */
257         LDLM_LOCK_GET(lock);
258         RETURN(lock);
259 }
260
261 int ldlm_lock_change_resource(struct ldlm_lock *lock, __u64 new_resid[3])
262 {
263         struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
264         struct ldlm_resource *oldres = lock->l_resource;
265         int type, i;
266         ENTRY;
267
268         l_lock(&ns->ns_lock);
269         if (memcmp(new_resid, lock->l_resource->lr_name,
270                    sizeof(lock->l_resource->lr_name)) == 0) {
271                 /* Nothing to do */
272                 l_unlock(&ns->ns_lock);
273                 RETURN(0);
274         }
275
276         type = lock->l_resource->lr_type;
277         if (new_resid[0] == 0)
278                 LBUG();
279         lock->l_resource = ldlm_resource_get(ns, NULL, new_resid, type, 1);
280         if (lock->l_resource == NULL) {
281                 LBUG();
282                 RETURN(-ENOMEM);
283         }
284
285         /* move references over */
286         for (i = 0; i < lock->l_refc; i++) {
287                 int rc;
288                 ldlm_resource_getref(lock->l_resource);
289                 rc = ldlm_resource_put(oldres);
290                 if (rc == 1 && i != lock->l_refc - 1)
291                         LBUG();
292         }
293         /* compensate for the initial get above.. */
294         ldlm_resource_put(lock->l_resource);
295
296         l_unlock(&ns->ns_lock);
297         RETURN(0);
298 }
299
300 /*
301  *  HANDLES
302  */
303
304 void ldlm_lock2handle(struct ldlm_lock *lock, struct lustre_handle *lockh)
305 {
306         lockh->addr = (__u64) (unsigned long)lock;
307         lockh->cookie = lock->l_random;
308 }
309
310 struct ldlm_lock *ldlm_handle2lock(struct lustre_handle *handle)
311 {
312         struct ldlm_lock *lock = NULL, *retval = NULL;
313         ENTRY;
314
315         if (!handle || !handle->addr)
316                 RETURN(NULL);
317
318         lock = (struct ldlm_lock *)(unsigned long)(handle->addr);
319         if (!kmem_cache_validate(ldlm_lock_slab, (void *)lock))
320                 RETURN(NULL);
321
322         l_lock(&lock->l_resource->lr_namespace->ns_lock);
323         if (lock->l_random != handle->cookie)
324                 GOTO(out, NULL);
325
326         if (lock->l_flags & LDLM_FL_DESTROYED)
327                 GOTO(out, NULL);
328
329         retval = LDLM_LOCK_GET(lock);
330         EXIT;
331       out:
332         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
333         return retval;
334 }
335
336 static int ldlm_plain_compat(struct ldlm_lock *a, struct ldlm_lock *b)
337 {
338         return lockmode_compat(a->l_req_mode, b->l_req_mode);
339 }
340
341 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
342 {
343         ldlm_res2desc(lock->l_resource, &desc->l_resource);
344         desc->l_req_mode = lock->l_req_mode;
345         desc->l_granted_mode = lock->l_granted_mode;
346         memcpy(&desc->l_extent, &lock->l_extent, sizeof(desc->l_extent));
347         memcpy(desc->l_version, lock->l_version, sizeof(desc->l_version));
348 }
349
350 static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
351                                    struct ldlm_lock *new)
352 {
353         struct ldlm_ast_work *w;
354         ENTRY;
355
356         l_lock(&lock->l_resource->lr_namespace->ns_lock);
357         if (new && (lock->l_flags & LDLM_FL_AST_SENT))
358                 GOTO(out, 0);
359
360         OBD_ALLOC(w, sizeof(*w));
361         if (!w) {
362                 LBUG();
363                 GOTO(out, 0);
364         }
365
366         if (new) {
367                 lock->l_flags |= LDLM_FL_AST_SENT;
368                 w->w_blocking = 1;
369                 ldlm_lock2desc(new, &w->w_desc);
370         }
371
372         w->w_lock = LDLM_LOCK_GET(lock);
373         list_add(&w->w_list, lock->l_resource->lr_tmp);
374       out:
375         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
376         return;
377 }
378
379 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
380 {
381         struct ldlm_lock *lock;
382
383         lock = ldlm_handle2lock(lockh);
384         ldlm_lock_addref_internal(lock, mode);
385         LDLM_LOCK_PUT(lock);
386 }
387
388 /* only called for local locks */
389 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
390 {
391         l_lock(&lock->l_resource->lr_namespace->ns_lock);
392         if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
393                 lock->l_readers++;
394         else
395                 lock->l_writers++;
396         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
397         LDLM_LOCK_GET(lock);
398         LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
399 }
400
401 /* Args: unlocked lock */
402 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
403 {
404         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
405         ENTRY;
406
407         if (lock == NULL)
408                 LBUG();
409
410         LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
411         l_lock(&lock->l_resource->lr_namespace->ns_lock);
412         if (mode == LCK_NL || mode == LCK_CR || mode == LCK_PR)
413                 lock->l_readers--;
414         else
415                 lock->l_writers--;
416
417         /* If we received a blocked AST and this was the last reference,
418          * run the callback. */
419         if (!lock->l_readers && !lock->l_writers &&
420             (lock->l_flags & LDLM_FL_CBPENDING)) {
421                 if (!lock->l_resource->lr_namespace->ns_client) {
422                         CERROR("LDLM_FL_CBPENDING set on non-local lock!\n");
423                         LBUG();
424                 }
425
426                 LDLM_DEBUG(lock, "final decref done on cbpending lock");
427                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
428
429                 /* FIXME: need a real 'desc' here */
430                 lock->l_blocking_ast(lock, NULL, lock->l_data,
431                                      lock->l_data_len, LDLM_CB_BLOCKING);
432         } else
433                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
434
435         LDLM_LOCK_PUT(lock);    /* matches the ldlm_lock_get in addref */
436         LDLM_LOCK_PUT(lock);    /* matches the handle2lock above */
437
438         EXIT;
439 }
440
441 static int ldlm_lock_compat_list(struct ldlm_lock *lock, int send_cbs,
442                                  struct list_head *queue)
443 {
444         struct list_head *tmp, *pos;
445         int rc = 1;
446
447         list_for_each_safe(tmp, pos, queue) {
448                 struct ldlm_lock *child;
449                 ldlm_res_compat compat;
450
451                 child = list_entry(tmp, struct ldlm_lock, l_res_link);
452                 if (lock == child)
453                         continue;
454
455                 compat = ldlm_res_compat_table[child->l_resource->lr_type];
456                 if (compat && compat(child, lock)) {
457                         CDEBUG(D_OTHER, "compat function succeded, next.\n");
458                         continue;
459                 }
460                 if (lockmode_compat(child->l_granted_mode, lock->l_req_mode)) {
461                         CDEBUG(D_OTHER, "lock modes are compatible, next.\n");
462                         continue;
463                 }
464
465                 rc = 0;
466
467                 if (send_cbs && child->l_blocking_ast != NULL) {
468                         CDEBUG(D_OTHER, "lock %p incompatible; sending "
469                                "blocking AST.\n", child);
470                         ldlm_add_ast_work_item(child, lock);
471                 }
472         }
473
474         return rc;
475 }
476
477 static int ldlm_lock_compat(struct ldlm_lock *lock, int send_cbs)
478 {
479         int rc;
480         ENTRY;
481
482         l_lock(&lock->l_resource->lr_namespace->ns_lock);
483         rc = ldlm_lock_compat_list(lock, send_cbs,
484                                    &lock->l_resource->lr_granted);
485         /* FIXME: should we be sending ASTs to converting? */
486         if (rc)
487                 rc = ldlm_lock_compat_list
488                         (lock, send_cbs, &lock->l_resource->lr_converting);
489
490         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
491         RETURN(rc);
492 }
493
494 /* NOTE: called by
495    - ldlm_handle_enqueuque - resource
496 */
497 void ldlm_grant_lock(struct ldlm_lock *lock)
498 {
499         struct ldlm_resource *res = lock->l_resource;
500         ENTRY;
501
502         l_lock(&lock->l_resource->lr_namespace->ns_lock);
503         ldlm_resource_add_lock(res, &res->lr_granted, lock);
504         lock->l_granted_mode = lock->l_req_mode;
505
506         if (lock->l_granted_mode < res->lr_most_restr)
507                 res->lr_most_restr = lock->l_granted_mode;
508
509         if (lock->l_completion_ast) {
510                 ldlm_add_ast_work_item(lock, NULL);
511         }
512         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
513         EXIT;
514 }
515
516 /* returns a referenced lock or NULL */
517 static struct ldlm_lock *search_queue(struct list_head *queue, ldlm_mode_t mode,
518                                       struct ldlm_extent *extent)
519 {
520         struct ldlm_lock *lock;
521         struct list_head *tmp;
522
523         list_for_each(tmp, queue) {
524                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
525
526                 if (lock->l_flags & (LDLM_FL_CBPENDING | LDLM_FL_DESTROYED))
527                         continue;
528
529                 /* lock_convert() takes the resource lock, so we're sure that
530                  * req_mode, lr_type, and l_cookie won't change beneath us */
531                 if (lock->l_req_mode != mode)
532                         continue;
533
534                 if (lock->l_resource->lr_type == LDLM_EXTENT &&
535                     (lock->l_extent.start > extent->start ||
536                      lock->l_extent.end < extent->end))
537                         continue;
538
539                 ldlm_lock_addref_internal(lock, mode);
540                 return lock;
541         }
542
543         return NULL;
544 }
545
546 /* Must be called with no resource or lock locks held.
547  *
548  * Returns 1 if it finds an already-existing lock that is compatible; in this
549  * case, lockh is filled in with a addref()ed lock
550 */
551 int ldlm_lock_match(struct ldlm_namespace *ns, __u64 * res_id, __u32 type,
552                     void *cookie, int cookielen, ldlm_mode_t mode,
553                     struct lustre_handle *lockh)
554 {
555         struct ldlm_resource *res;
556         struct ldlm_lock *lock;
557         int rc = 0;
558         ENTRY;
559
560         res = ldlm_resource_get(ns, NULL, res_id, type, 0);
561         if (res == NULL)
562                 RETURN(0);
563
564         ns = res->lr_namespace;
565         l_lock(&ns->ns_lock);
566
567         if ((lock = search_queue(&res->lr_granted, mode, cookie)))
568                 GOTO(out, rc = 1);
569         if ((lock = search_queue(&res->lr_converting, mode, cookie)))
570                 GOTO(out, rc = 1);
571         if ((lock = search_queue(&res->lr_waiting, mode, cookie)))
572                 GOTO(out, rc = 1);
573
574         EXIT;
575       out:
576         ldlm_resource_put(res);
577         l_unlock(&ns->ns_lock);
578
579         if (lock) {
580                 ldlm_lock2handle(lock, lockh);
581                 if (lock->l_completion_ast)
582                         lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC);
583         }
584         if (rc)
585                 LDLM_DEBUG(lock, "matched");
586         else
587                 LDLM_DEBUG_NOLOCK("not matched");
588         return rc;
589 }
590
591 /* Returns a referenced lock */
592 struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
593                                    struct lustre_handle *parent_lock_handle,
594                                    __u64 * res_id, __u32 type,
595                                    ldlm_mode_t mode, void *data, __u32 data_len)
596 {
597         struct ldlm_resource *res, *parent_res = NULL;
598         struct ldlm_lock *lock, *parent_lock;
599
600         parent_lock = ldlm_handle2lock(parent_lock_handle);
601         if (parent_lock)
602                 parent_res = parent_lock->l_resource;
603
604         res = ldlm_resource_get(ns, parent_res, res_id, type, 1);
605         if (res == NULL)
606                 RETURN(NULL);
607
608         lock = ldlm_lock_new(parent_lock, res);
609         if (lock == NULL) {
610                 ldlm_resource_put(res);
611                 RETURN(NULL);
612         }
613
614         lock->l_req_mode = mode;
615         lock->l_data = data;
616         lock->l_data_len = data_len;
617
618         return lock;
619 }
620
621 /* Must be called with lock->l_lock and lock->l_resource->lr_lock not held */
622 ldlm_error_t ldlm_lock_enqueue(struct ldlm_lock * lock,
623                                void *cookie, int cookie_len,
624                                int *flags,
625                                ldlm_completion_callback completion,
626                                ldlm_blocking_callback blocking)
627 {
628         struct ldlm_resource *res;
629         int local;
630         ldlm_res_policy policy;
631         ENTRY;
632
633         res = lock->l_resource;
634         lock->l_blocking_ast = blocking;
635
636         if (res->lr_type == LDLM_EXTENT)
637                 memcpy(&lock->l_extent, cookie, sizeof(lock->l_extent));
638
639         /* policies are not executed on the client */
640         local = res->lr_namespace->ns_client;
641         if (!local && (policy = ldlm_res_policy_table[res->lr_type])) {
642                 int rc;
643                 rc = policy(lock, cookie, lock->l_req_mode, NULL);
644
645                 if (rc == ELDLM_LOCK_CHANGED) {
646                         res = lock->l_resource;
647                         *flags |= LDLM_FL_LOCK_CHANGED;
648                 } else if (rc == ELDLM_LOCK_ABORTED) {
649                         ldlm_lock_destroy(lock);
650                         RETURN(rc);
651                 }
652         }
653
654         lock->l_cookie = cookie;
655         lock->l_cookie_len = cookie_len;
656
657         l_lock(&res->lr_namespace->ns_lock);
658         if (local && lock->l_req_mode == lock->l_granted_mode) {
659                 /* The server returned a blocked lock, but it was granted before
660                  * we got a chance to actually enqueue it.  We don't need to do
661                  * anything else. */
662                 *flags &= ~(LDLM_FL_BLOCK_GRANTED | 
663                           LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
664                 GOTO(out, ELDLM_OK);
665         }
666
667         /* This distinction between local lock trees is very important; a client
668          * namespace only has information about locks taken by that client, and
669          * thus doesn't have enough information to decide for itself if it can
670          * be granted (below).  In this case, we do exactly what the server
671          * tells us to do, as dictated by the 'flags' */
672         ldlm_resource_unlink_lock(lock);
673         if (local) {
674                 if (*flags & LDLM_FL_BLOCK_CONV)
675                         ldlm_resource_add_lock(res, res->lr_converting.prev,
676                                                lock);
677                 else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
678                         ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
679                 else
680                         ldlm_grant_lock(lock);
681                 GOTO(out, ELDLM_OK);
682         }
683
684         /* FIXME: We may want to optimize by checking lr_most_restr */
685         if (!list_empty(&res->lr_converting)) {
686                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
687                 *flags |= LDLM_FL_BLOCK_CONV;
688                 GOTO(out, ELDLM_OK);
689         }
690         if (!list_empty(&res->lr_waiting)) {
691                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
692                 *flags |= LDLM_FL_BLOCK_WAIT;
693                 GOTO(out, ELDLM_OK);
694         }
695         if (!ldlm_lock_compat(lock, 0)) {
696                 ldlm_resource_add_lock(res, res->lr_waiting.prev, lock);
697                 *flags |= LDLM_FL_BLOCK_GRANTED;
698                 GOTO(out, ELDLM_OK);
699         }
700
701         ldlm_grant_lock(lock);
702         EXIT;
703       out:
704         l_unlock(&res->lr_namespace->ns_lock);
705         /* Don't set 'completion_ast' until here so that if the lock is granted
706          * immediately we don't do an unnecessary completion call. */
707         lock->l_completion_ast = completion;
708         return ELDLM_OK;
709 }
710
711 /* Must be called with namespace taken: queue is waiting or converting. */
712 static int ldlm_reprocess_queue(struct ldlm_resource *res,
713                                 struct list_head *queue)
714 {
715         struct list_head *tmp, *pos;
716         ENTRY;
717
718         list_for_each_safe(tmp, pos, queue) {
719                 struct ldlm_lock *pending;
720                 pending = list_entry(tmp, struct ldlm_lock, l_res_link);
721
722                 CDEBUG(D_INFO, "Reprocessing lock %p\n", pending);
723
724                 if (!ldlm_lock_compat(pending, 1))
725                         RETURN(1);
726
727                 list_del_init(&pending->l_res_link);
728                 ldlm_grant_lock(pending);
729         }
730
731         RETURN(0);
732 }
733
734 void ldlm_run_ast_work(struct list_head *rpc_list)
735 {
736         struct list_head *tmp, *pos;
737         int rc;
738         ENTRY;
739
740         list_for_each_safe(tmp, pos, rpc_list) {
741                 struct ldlm_ast_work *w =
742                         list_entry(tmp, struct ldlm_ast_work, w_list);
743
744                 if (w->w_blocking)
745                         rc = w->w_lock->l_blocking_ast
746                                 (w->w_lock, &w->w_desc, w->w_data,
747                                  w->w_datalen, LDLM_CB_BLOCKING);
748                 else
749                         rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags);
750                 if (rc)
751                         CERROR("Failed AST - should clean & disconnect "
752                                "client\n");
753                 LDLM_LOCK_PUT(w->w_lock);
754                 list_del(&w->w_list);
755                 OBD_FREE(w, sizeof(*w));
756         }
757         EXIT;
758 }
759
760 /* Must be called with resource->lr_lock not taken. */
761 void ldlm_reprocess_all(struct ldlm_resource *res)
762 {
763         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
764         ENTRY;
765
766         /* Local lock trees don't get reprocessed. */
767         if (res->lr_namespace->ns_client) {
768                 EXIT;
769                 return;
770         }
771
772         l_lock(&res->lr_namespace->ns_lock);
773         res->lr_tmp = &rpc_list;
774
775         ldlm_reprocess_queue(res, &res->lr_converting);
776         if (list_empty(&res->lr_converting))
777                 ldlm_reprocess_queue(res, &res->lr_waiting);
778
779         res->lr_tmp = NULL;
780         l_unlock(&res->lr_namespace->ns_lock);
781
782         ldlm_run_ast_work(&rpc_list);
783         EXIT;
784 }
785
786 void ldlm_lock_cancel(struct ldlm_lock *lock)
787 {
788         struct ldlm_resource *res;
789         struct ldlm_namespace *ns;
790         ENTRY;
791
792         res = lock->l_resource;
793         ns = res->lr_namespace;
794
795         l_lock(&ns->ns_lock);
796         if (lock->l_readers || lock->l_writers)
797                 CDEBUG(D_INFO, "lock still has references (%d readers, %d "
798                        "writers)\n", lock->l_readers, lock->l_writers);
799
800         ldlm_resource_unlink_lock(lock);
801         ldlm_lock_destroy(lock);
802         l_unlock(&ns->ns_lock);
803         EXIT;
804 }
805
806 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
807                                         int *flags)
808 {
809         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
810         struct ldlm_resource *res;
811         struct ldlm_namespace *ns;
812         int granted = 0;
813         ENTRY;
814
815         res = lock->l_resource;
816         ns = res->lr_namespace;
817
818         l_lock(&ns->ns_lock);
819
820         lock->l_req_mode = new_mode;
821         ldlm_resource_unlink_lock(lock);
822
823         /* If this is a local resource, put it on the appropriate list. */
824         if (res->lr_namespace->ns_client) {
825                 if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED))
826                         ldlm_resource_add_lock(res, res->lr_converting.prev,
827                                                lock);
828                 else {
829                         res->lr_tmp = &rpc_list;
830                         ldlm_grant_lock(lock);
831                         res->lr_tmp = NULL;
832                         granted = 1;
833                         /* FIXME: completion handling not with ns_lock held ! */
834                         if (lock->l_completion_ast)
835                                 lock->l_completion_ast(lock, 0);
836                 }
837         } else {
838                 /* FIXME: We should try the conversion right away and possibly
839                  * return success without the need for an extra AST */
840                 ldlm_resource_add_lock(res, res->lr_converting.prev, lock);
841                 *flags |= LDLM_FL_BLOCK_CONV;
842         }
843
844         l_unlock(&ns->ns_lock);
845
846         if (granted)
847                 ldlm_run_ast_work(&rpc_list);
848         RETURN(res);
849 }
850
851 void ldlm_lock_dump(struct ldlm_lock *lock)
852 {
853         char ver[128];
854
855         if (!(portal_debug & D_OTHER))
856                 return;
857
858         if (RES_VERSION_SIZE != 4)
859                 LBUG();
860
861         if (!lock) {
862                 CDEBUG(D_OTHER, "  NULL LDLM lock\n");
863                 return;
864         }
865
866         snprintf(ver, sizeof(ver), "%x %x %x %x",
867                  lock->l_version[0], lock->l_version[1],
868                  lock->l_version[2], lock->l_version[3]);
869
870         CDEBUG(D_OTHER, "  -- Lock dump: %p (%s)\n", lock, ver);
871         if (lock->l_export && lock->l_export->exp_connection)
872                 CDEBUG(D_OTHER, "  Node: NID %x (rhandle: "LPX64")\n",
873                        lock->l_export->exp_connection->c_peer.peer_nid,
874                        lock->l_remote_handle.addr);
875         else
876                 CDEBUG(D_OTHER, "  Node: local\n");
877         CDEBUG(D_OTHER, "  Parent: %p\n", lock->l_parent);
878         CDEBUG(D_OTHER, "  Resource: %p ("LPD64")\n", lock->l_resource,
879                lock->l_resource->lr_name[0]);
880         CDEBUG(D_OTHER, "  Requested mode: %d, granted mode: %d\n",
881                (int)lock->l_req_mode, (int)lock->l_granted_mode);
882         CDEBUG(D_OTHER, "  Readers: %u ; Writers; %u\n",
883                lock->l_readers, lock->l_writers);
884         if (lock->l_resource->lr_type == LDLM_EXTENT)
885                 CDEBUG(D_OTHER, "  Extent: %Lu -> %Lu\n",
886                        (unsigned long long)lock->l_extent.start,
887                        (unsigned long long)lock->l_extent.end);
888 }