Whamcloud - gitweb
Instead of specifying each ldlm_lock call-back through separate parameter,
[fs/lustre-release.git] / lustre / ldlm / ldlm_flock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2003 Hewlett-Packard Development Company LP.
33  * Developed under the sponsorship of the US Government under
34  * Subcontract No. B514193
35  */
36 /*
37  * This file is part of Lustre, http://www.lustre.org/
38  * Lustre is a trademark of Sun Microsystems, Inc.
39  */
40
41 #define DEBUG_SUBSYSTEM S_LDLM
42
43 #ifdef __KERNEL__
44 #include <lustre_dlm.h>
45 #include <obd_support.h>
46 #include <obd_class.h>
47 #include <lustre_lib.h>
48 #include <libcfs/list.h>
49 #else
50 #include <liblustre.h>
51 #include <obd_class.h>
52 #endif
53
54 #include "ldlm_internal.h"
55
56 #define l_flock_waitq   l_lru
57
58 /**
59  * Wait queue for Posix lock deadlock detection, added with
60  * ldlm_lock::l_flock_waitq.
61  */
62 static CFS_LIST_HEAD(ldlm_flock_waitq);
63 /**
64  * Lock protecting access to ldlm_flock_waitq.
65  */
66 spinlock_t ldlm_flock_waitq_lock = SPIN_LOCK_UNLOCKED;
67
68 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
69                             void *data, int flag);
70
71 /**
72  * list_for_remaining_safe - iterate over the remaining entries in a list
73  *              and safeguard against removal of a list entry.
74  * @pos:        the &struct list_head to use as a loop counter. pos MUST
75  *              have been initialized prior to using it in this macro.
76  * @n:          another &struct list_head to use as temporary storage
77  * @head:       the head for your list.
78  */
79 #define list_for_remaining_safe(pos, n, head) \
80         for (n = pos->next; pos != (head); pos = n, n = pos->next)
81
82 static inline int
83 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
84 {
85         return((new->l_policy_data.l_flock.pid ==
86                 lock->l_policy_data.l_flock.pid) &&
87                (new->l_export == lock->l_export));
88 }
89
90 static inline int
91 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
92 {
93         return((new->l_policy_data.l_flock.start <=
94                 lock->l_policy_data.l_flock.end) &&
95                (new->l_policy_data.l_flock.end >=
96                 lock->l_policy_data.l_flock.start));
97 }
98
99 static inline void
100 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
101 {
102         ENTRY;
103
104         LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
105                    mode, flags);
106
107         /* Safe to not lock here, since it should be empty anyway */
108         LASSERT(list_empty(&lock->l_flock_waitq));
109
110         list_del_init(&lock->l_res_link);
111         if (flags == LDLM_FL_WAIT_NOREPROC) {
112                 /* client side - set a flag to prevent sending a CANCEL */
113                 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
114
115                 /* when reaching here, it is under lock_res_and_lock(). Thus, 
116                    need call the nolock version of ldlm_lock_decref_internal*/
117                 ldlm_lock_decref_internal_nolock(lock, mode);
118         }
119
120         ldlm_lock_destroy_nolock(lock);
121         EXIT;
122 }
123
124 static int
125 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
126 {
127         struct obd_export *req_export = req->l_export;
128         struct obd_export *blocking_export = blocking_lock->l_export;
129         pid_t req_pid = req->l_policy_data.l_flock.pid;
130         pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
131         struct ldlm_lock *lock;
132
133         spin_lock(&ldlm_flock_waitq_lock);
134 restart:
135         list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
136                 if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
137                     (lock->l_export != blocking_export))
138                         continue;
139
140                 blocking_pid = lock->l_policy_data.l_flock.blocking_pid;
141                 blocking_export = (struct obd_export *)(long)
142                         lock->l_policy_data.l_flock.blocking_export;
143                 if (blocking_pid == req_pid && blocking_export == req_export) {
144                         spin_unlock(&ldlm_flock_waitq_lock);
145                         return 1;
146                 }
147
148                 goto restart;
149         }
150         spin_unlock(&ldlm_flock_waitq_lock);
151
152         return 0;
153 }
154
155 int
156 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
157                         ldlm_error_t *err, struct list_head *work_list)
158 {
159         struct ldlm_resource *res = req->l_resource;
160         struct ldlm_namespace *ns = res->lr_namespace;
161         struct list_head *tmp;
162         struct list_head *ownlocks = NULL;
163         struct ldlm_lock *lock = NULL;
164         struct ldlm_lock *new = req;
165         struct ldlm_lock *new2 = NULL;
166         ldlm_mode_t mode = req->l_req_mode;
167         int local = ns_is_client(ns);
168         int added = (mode == LCK_NL);
169         int overlaps = 0;
170         int splitted = 0;
171         const struct ldlm_callback_suite null_cbs = { NULL };
172         ENTRY;
173
174         CDEBUG(D_DLMTRACE, "flags %#x pid %u mode %u start "LPU64" end "LPU64
175                "\n", *flags, new->l_policy_data.l_flock.pid, mode,
176                req->l_policy_data.l_flock.start,
177                req->l_policy_data.l_flock.end);
178
179         *err = ELDLM_OK;
180
181         if (local) {
182                 /* No blocking ASTs are sent to the clients for
183                  * Posix file & record locks */
184                 req->l_blocking_ast = NULL;
185         } else {
186                 /* Called on the server for lock cancels. */
187                 req->l_blocking_ast = ldlm_flock_blocking_ast;
188         }
189
190 reprocess:
191         if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
192                 /* This loop determines where this processes locks start
193                  * in the resource lr_granted list. */
194                 list_for_each(tmp, &res->lr_granted) {
195                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
196                         if (ldlm_same_flock_owner(lock, req)) {
197                                 ownlocks = tmp;
198                                 break;
199                         }
200                 }
201         } else {
202                 lockmode_verify(mode);
203
204                 /* This loop determines if there are existing locks
205                  * that conflict with the new lock request. */
206                 list_for_each(tmp, &res->lr_granted) {
207                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
208
209                         if (ldlm_same_flock_owner(lock, req)) {
210                                 if (!ownlocks)
211                                         ownlocks = tmp;
212                                 continue;
213                         }
214
215                         /* locks are compatible, overlap doesn't matter */
216                         if (lockmode_compat(lock->l_granted_mode, mode))
217                                 continue;
218
219                         if (!ldlm_flocks_overlap(lock, req))
220                                 continue;
221
222                         if (!first_enq)
223                                 RETURN(LDLM_ITER_CONTINUE);
224
225                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
226                                 ldlm_flock_destroy(req, mode, *flags);
227                                 *err = -EAGAIN;
228                                 RETURN(LDLM_ITER_STOP);
229                         }
230
231                         if (*flags & LDLM_FL_TEST_LOCK) {
232                                 ldlm_flock_destroy(req, mode, *flags);
233                                 req->l_req_mode = lock->l_granted_mode;
234                                 req->l_policy_data.l_flock.pid =
235                                         lock->l_policy_data.l_flock.pid;
236                                 req->l_policy_data.l_flock.start =
237                                         lock->l_policy_data.l_flock.start;
238                                 req->l_policy_data.l_flock.end =
239                                         lock->l_policy_data.l_flock.end;
240                                 *flags |= LDLM_FL_LOCK_CHANGED;
241                                 RETURN(LDLM_ITER_STOP);
242                         }
243
244                         if (ldlm_flock_deadlock(req, lock)) {
245                                 ldlm_flock_destroy(req, mode, *flags);
246                                 *err = -EDEADLK;
247                                 RETURN(LDLM_ITER_STOP);
248                         }
249
250                         req->l_policy_data.l_flock.blocking_pid =
251                                 lock->l_policy_data.l_flock.pid;
252                         req->l_policy_data.l_flock.blocking_export =
253                                 (long)(void *)lock->l_export;
254
255                         LASSERT(list_empty(&req->l_flock_waitq));
256                         spin_lock(&ldlm_flock_waitq_lock);
257                         list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
258                         spin_unlock(&ldlm_flock_waitq_lock);
259
260                         ldlm_resource_add_lock(res, &res->lr_waiting, req);
261                         *flags |= LDLM_FL_BLOCK_GRANTED;
262                         RETURN(LDLM_ITER_STOP);
263                 }
264         }
265
266         if (*flags & LDLM_FL_TEST_LOCK) {
267                 ldlm_flock_destroy(req, mode, *flags);
268                 req->l_req_mode = LCK_NL;
269                 *flags |= LDLM_FL_LOCK_CHANGED;
270                 RETURN(LDLM_ITER_STOP);
271         }
272
273         /* In case we had slept on this lock request take it off of the
274          * deadlock detection waitq. */
275         spin_lock(&ldlm_flock_waitq_lock);
276         list_del_init(&req->l_flock_waitq);
277         spin_unlock(&ldlm_flock_waitq_lock);
278
279         /* Scan the locks owned by this process that overlap this request.
280          * We may have to merge or split existing locks. */
281
282         if (!ownlocks)
283                 ownlocks = &res->lr_granted;
284
285         list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
286                 lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
287
288                 if (!ldlm_same_flock_owner(lock, new))
289                         break;
290
291                 if (lock->l_granted_mode == mode) {
292                         /* If the modes are the same then we need to process
293                          * locks that overlap OR adjoin the new lock. The extra
294                          * logic condition is necessary to deal with arithmetic
295                          * overflow and underflow. */
296                         if ((new->l_policy_data.l_flock.start >
297                              (lock->l_policy_data.l_flock.end + 1))
298                             && (lock->l_policy_data.l_flock.end !=
299                                 OBD_OBJECT_EOF))
300                                 continue;
301
302                         if ((new->l_policy_data.l_flock.end <
303                              (lock->l_policy_data.l_flock.start - 1))
304                             && (lock->l_policy_data.l_flock.start != 0))
305                                 break;
306
307                         if (new->l_policy_data.l_flock.start <
308                             lock->l_policy_data.l_flock.start) {
309                                 lock->l_policy_data.l_flock.start =
310                                         new->l_policy_data.l_flock.start;
311                         } else {
312                                 new->l_policy_data.l_flock.start =
313                                         lock->l_policy_data.l_flock.start;
314                         }
315
316                         if (new->l_policy_data.l_flock.end >
317                             lock->l_policy_data.l_flock.end) {
318                                 lock->l_policy_data.l_flock.end =
319                                         new->l_policy_data.l_flock.end;
320                         } else {
321                                 new->l_policy_data.l_flock.end =
322                                         lock->l_policy_data.l_flock.end;
323                         }
324
325                         if (added) {
326                                 ldlm_flock_destroy(lock, mode, *flags);
327                         } else {
328                                 new = lock;
329                                 added = 1;
330                         }
331                         continue;
332                 }
333
334                 if (new->l_policy_data.l_flock.start >
335                     lock->l_policy_data.l_flock.end)
336                         continue;
337
338                 if (new->l_policy_data.l_flock.end <
339                     lock->l_policy_data.l_flock.start)
340                         break;
341
342                 ++overlaps;
343
344                 if (new->l_policy_data.l_flock.start <=
345                     lock->l_policy_data.l_flock.start) {
346                         if (new->l_policy_data.l_flock.end <
347                             lock->l_policy_data.l_flock.end) {
348                                 lock->l_policy_data.l_flock.start =
349                                         new->l_policy_data.l_flock.end + 1;
350                                 break;
351                         }
352                         ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
353                         continue;
354                 }
355                 if (new->l_policy_data.l_flock.end >=
356                     lock->l_policy_data.l_flock.end) {
357                         lock->l_policy_data.l_flock.end =
358                                 new->l_policy_data.l_flock.start - 1;
359                         continue;
360                 }
361
362                 /* split the existing lock into two locks */
363
364                 /* if this is an F_UNLCK operation then we could avoid
365                  * allocating a new lock and use the req lock passed in
366                  * with the request but this would complicate the reply
367                  * processing since updates to req get reflected in the
368                  * reply. The client side replays the lock request so
369                  * it must see the original lock data in the reply. */
370
371                 /* XXX - if ldlm_lock_new() can sleep we should
372                  * release the ns_lock, allocate the new lock,
373                  * and restart processing this lock. */
374                 if (!new2) {
375                         unlock_res_and_lock(req);
376                          new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
377                                         lock->l_granted_mode, &null_cbs,
378                                         NULL, 0);
379                         lock_res_and_lock(req);
380                         if (!new2) {
381                                 ldlm_flock_destroy(req, lock->l_granted_mode, *flags);
382                                 *err = -ENOLCK;
383                                 RETURN(LDLM_ITER_STOP);
384                         }
385                         goto reprocess;
386                 }
387
388                 splitted = 1;
389
390                 new2->l_granted_mode = lock->l_granted_mode;
391                 new2->l_policy_data.l_flock.pid =
392                         new->l_policy_data.l_flock.pid;
393                 new2->l_policy_data.l_flock.start =
394                         lock->l_policy_data.l_flock.start;
395                 new2->l_policy_data.l_flock.end =
396                         new->l_policy_data.l_flock.start - 1;
397                 lock->l_policy_data.l_flock.start =
398                         new->l_policy_data.l_flock.end + 1;
399                 new2->l_conn_export = lock->l_conn_export;
400                 if (lock->l_export != NULL) {
401                         new2->l_export = class_export_get(lock->l_export);
402                         if (new2->l_export->exp_lock_hash && 
403                             hlist_unhashed(&new2->l_exp_hash))
404                                 lustre_hash_add(new2->l_export->exp_lock_hash,
405                                                 &new2->l_remote_handle,
406                                                 &new2->l_exp_hash);
407                 }
408                 if (*flags == LDLM_FL_WAIT_NOREPROC) {
409                         ldlm_lock_addref_internal_nolock(new2, lock->l_granted_mode);
410                 }
411
412                 /* insert new2 at lock */
413                 ldlm_resource_add_lock(res, ownlocks, new2);
414                 LDLM_LOCK_PUT(new2);
415                 break;
416         }
417
418         /* if new2 is created but never used, destroy it*/
419         if (splitted == 0 && new2 != NULL)
420                 ldlm_lock_destroy_nolock(new2);
421
422         /* At this point we're granting the lock request. */
423         req->l_granted_mode = req->l_req_mode;
424
425         /* Add req to the granted queue before calling ldlm_reprocess_all(). */
426         if (!added) {
427                 list_del_init(&req->l_res_link);
428                 /* insert new lock before ownlocks in list. */
429                 ldlm_resource_add_lock(res, ownlocks, req);
430         }
431
432         if (*flags != LDLM_FL_WAIT_NOREPROC) {
433                 if (first_enq) {
434                         /* If this is an unlock, reprocess the waitq and
435                          * send completions ASTs for locks that can now be
436                          * granted. The only problem with doing this
437                          * reprocessing here is that the completion ASTs for
438                          * newly granted locks will be sent before the unlock
439                          * completion is sent. It shouldn't be an issue. Also
440                          * note that ldlm_process_flock_lock() will recurse,
441                          * but only once because first_enq will be false from
442                          * ldlm_reprocess_queue. */
443                         if ((mode == LCK_NL) && overlaps) {
444                                 CFS_LIST_HEAD(rpc_list);
445                                 int rc;
446 restart:
447                                 ldlm_reprocess_queue(res, &res->lr_waiting,
448                                                      &rpc_list);
449
450                                 unlock_res_and_lock(req);
451                                 rc = ldlm_run_ast_work(&rpc_list,
452                                                        LDLM_WORK_CP_AST);
453                                 lock_res_and_lock(req);
454                                 if (rc == -ERESTART)
455                                         GOTO(restart, -ERESTART);
456                        }
457                 } else {
458                         LASSERT(req->l_completion_ast);
459                         ldlm_add_ast_work_item(req, NULL, work_list);
460                 }
461         }
462
463         /* In case we're reprocessing the requested lock we can't destroy
464          * it until after calling ldlm_ast_work_item() above so that lawi()
465          * can bump the reference count on req. Otherwise req could be freed
466          * before the completion AST can be sent.  */
467         if (added)
468                 ldlm_flock_destroy(req, mode, *flags);
469
470         ldlm_resource_dump(D_INFO, res);
471         RETURN(LDLM_ITER_CONTINUE);
472 }
473
474 struct ldlm_flock_wait_data {
475         struct ldlm_lock *fwd_lock;
476         int               fwd_generation;
477 };
478
479 static void
480 ldlm_flock_interrupted_wait(void *data)
481 {
482         struct ldlm_lock *lock;
483         struct lustre_handle lockh;
484         int rc;
485         ENTRY;
486
487         lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
488
489         /* take lock off the deadlock detection waitq. */
490         spin_lock(&ldlm_flock_waitq_lock);
491         list_del_init(&lock->l_flock_waitq);
492         spin_unlock(&ldlm_flock_waitq_lock);
493
494         /* client side - set flag to prevent lock from being put on lru list */
495         lock->l_flags |= LDLM_FL_CBPENDING;
496
497         ldlm_lock_decref_internal(lock, lock->l_req_mode);
498         ldlm_lock2handle(lock, &lockh);
499         rc = ldlm_cli_cancel(&lockh);
500         if (rc != ELDLM_OK)
501                 CERROR("ldlm_cli_cancel: %d\n", rc);
502
503         EXIT;
504 }
505
506 int
507 ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
508 {
509         struct ldlm_namespace *ns;
510         cfs_flock_t *getlk = lock->l_ast_data;
511         struct ldlm_flock_wait_data fwd;
512         struct obd_device *obd;
513         struct obd_import *imp = NULL;
514         ldlm_error_t err;
515         int rc = 0;
516         struct l_wait_info lwi;
517         ENTRY;
518
519         CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
520                flags, data, getlk);
521
522         /* Import invalidation. We need to actually release the lock
523          * references being held, so that it can go away. No point in
524          * holding the lock even if app still believes it has it, since
525          * server already dropped it anyway. Only for granted locks too. */
526         lock_res_and_lock(lock);
527         if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) == 
528             (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
529                 unlock_res_and_lock(lock);
530                 if (lock->l_req_mode == lock->l_granted_mode &&
531                     lock->l_granted_mode != LCK_NL)
532                         ldlm_lock_decref_internal(lock, lock->l_req_mode);
533                 RETURN(0);
534         }
535         unlock_res_and_lock(lock);
536
537         LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
538
539         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
540                        LDLM_FL_BLOCK_CONV)))
541                 goto  granted;
542
543         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
544                    "sleeping");
545
546         fwd.fwd_lock = lock;
547         obd = class_exp2obd(lock->l_conn_export);
548
549         /* if this is a local lock, then there is no import */
550         if (obd != NULL)
551                 imp = obd->u.cli.cl_import;
552
553         if (imp != NULL) {
554                 spin_lock(&imp->imp_lock);
555                 fwd.fwd_generation = imp->imp_generation;
556                 spin_unlock(&imp->imp_lock);
557         }
558
559         lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
560
561         /* Go to sleep until the lock is granted. */
562         rc = l_wait_event(lock->l_waitq,
563                           ((lock->l_req_mode == lock->l_granted_mode) ||
564                            lock->l_destroyed), &lwi);
565
566         LDLM_DEBUG(lock, "client-side enqueue waking up: rc = %d", rc);
567         RETURN(rc);
568
569 granted:
570         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
571         LDLM_DEBUG(lock, "client-side enqueue granted");
572         ns = lock->l_resource->lr_namespace;
573         lock_res_and_lock(lock);
574
575         /* before flock's complete ast gets here, the flock
576          * can possibly be freed by another thread
577          */
578         if (lock->l_destroyed) {
579                 LDLM_DEBUG(lock, "already destroyed by another thread");
580                 unlock_res(lock->l_resource);
581                 RETURN(0);
582         }
583
584         /* take lock off the deadlock detection waitq. */
585         spin_lock(&ldlm_flock_waitq_lock);
586         list_del_init(&lock->l_flock_waitq);
587         spin_unlock(&ldlm_flock_waitq_lock);
588
589         /* ldlm_lock_enqueue() has already placed lock on the granted list. */
590         list_del_init(&lock->l_res_link);
591
592         if (flags & LDLM_FL_TEST_LOCK) {
593                 /* fcntl(F_GETLK) request */
594                 /* The old mode was saved in getlk->fl_type so that if the mode
595                  * in the lock changes we can decref the approprate refcount. */
596                 ldlm_flock_destroy(lock, cfs_flock_type(getlk), LDLM_FL_WAIT_NOREPROC);
597                 switch (lock->l_granted_mode) {
598                 case LCK_PR:
599                         cfs_flock_set_type(getlk, F_RDLCK);
600                         break;
601                 case LCK_PW:
602                         cfs_flock_set_type(getlk, F_WRLCK);
603                         break;
604                 default:
605                         cfs_flock_set_type(getlk, F_UNLCK);
606                 }
607                 cfs_flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
608                 cfs_flock_set_start(getlk, (loff_t)lock->l_policy_data.l_flock.start);
609                 cfs_flock_set_end(getlk, (loff_t)lock->l_policy_data.l_flock.end);
610         } else {
611                 int noreproc = LDLM_FL_WAIT_NOREPROC;
612
613                 /* We need to reprocess the lock to do merges or splits
614                  * with existing locks owned by this process. */
615                 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
616                 if (flags == 0)
617                         cfs_waitq_signal(&lock->l_waitq);
618         }
619         unlock_res_and_lock(lock);
620         RETURN(0);
621 }
622 EXPORT_SYMBOL(ldlm_flock_completion_ast);
623
624 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
625                             void *data, int flag)
626 {
627         struct ldlm_namespace *ns;
628         ENTRY;
629
630         LASSERT(lock);
631         LASSERT(flag == LDLM_CB_CANCELING);
632
633         ns = lock->l_resource->lr_namespace;
634
635         /* take lock off the deadlock detection waitq. */
636         spin_lock(&ldlm_flock_waitq_lock);
637         list_del_init(&lock->l_flock_waitq);
638         spin_unlock(&ldlm_flock_waitq_lock);
639         RETURN(0);
640 }