Whamcloud - gitweb
LU-104 Properly address ownership of posix and flock locks
[fs/lustre-release.git] / lustre / ldlm / ldlm_flock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2003 Hewlett-Packard Development Company LP.
33  * Developed under the sponsorship of the US Government under
34  * Subcontract No. B514193
35  */
36 /*
37  * This file is part of Lustre, http://www.lustre.org/
38  * Lustre is a trademark of Sun Microsystems, Inc.
39  */
40
41 #define DEBUG_SUBSYSTEM S_LDLM
42
43 #ifdef __KERNEL__
44 #include <lustre_dlm.h>
45 #include <obd_support.h>
46 #include <obd_class.h>
47 #include <lustre_lib.h>
48 #include <libcfs/list.h>
49 #else
50 #include <liblustre.h>
51 #include <obd_class.h>
52 #endif
53
54 #include "ldlm_internal.h"
55
56 #define l_flock_waitq   l_lru
57
58 /**
59  * Wait queue for Posix lock deadlock detection, added with
60  * ldlm_lock::l_flock_waitq.
61  */
62 static CFS_LIST_HEAD(ldlm_flock_waitq);
63 /**
64  * Lock protecting access to ldlm_flock_waitq.
65  */
66 cfs_spinlock_t ldlm_flock_waitq_lock = CFS_SPIN_LOCK_UNLOCKED;
67
68 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
69                             void *data, int flag);
70
71 /**
72  * list_for_remaining_safe - iterate over the remaining entries in a list
73  *              and safeguard against removal of a list entry.
74  * \param pos   the &struct list_head to use as a loop counter. pos MUST
75  *              have been initialized prior to using it in this macro.
76  * \param n     another &struct list_head to use as temporary storage
77  * \param head  the head for your list.
78  */
79 #define list_for_remaining_safe(pos, n, head) \
80         for (n = pos->next; pos != (head); pos = n, n = pos->next)
81
82 static inline int
83 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
84 {
85         return((new->l_policy_data.l_flock.owner ==
86                 lock->l_policy_data.l_flock.owner) &&
87                (new->l_export == lock->l_export));
88 }
89
90 static inline int
91 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
92 {
93         return((new->l_policy_data.l_flock.start <=
94                 lock->l_policy_data.l_flock.end) &&
95                (new->l_policy_data.l_flock.end >=
96                 lock->l_policy_data.l_flock.start));
97 }
98
99 static inline void
100 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
101 {
102         ENTRY;
103
104         LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
105                    mode, flags);
106
107         /* Safe to not lock here, since it should be empty anyway */
108         LASSERT(cfs_list_empty(&lock->l_flock_waitq));
109
110         cfs_list_del_init(&lock->l_res_link);
111         if (flags == LDLM_FL_WAIT_NOREPROC &&
112             !(lock->l_flags & LDLM_FL_FAILED)) {
113                 /* client side - set a flag to prevent sending a CANCEL */
114                 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
115
116                 /* when reaching here, it is under lock_res_and_lock(). Thus,
117                    need call the nolock version of ldlm_lock_decref_internal*/
118                 ldlm_lock_decref_internal_nolock(lock, mode);
119         }
120
121         ldlm_lock_destroy_nolock(lock);
122         EXIT;
123 }
124
125 static int
126 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
127 {
128         struct obd_export *req_export = req->l_export;
129         struct obd_export *blocking_export = blocking_lock->l_export;
130         __u64 req_owner = req->l_policy_data.l_flock.owner;
131         __u64 blocking_owner = blocking_lock->l_policy_data.l_flock.owner;
132         struct ldlm_lock *lock;
133
134         cfs_spin_lock(&ldlm_flock_waitq_lock);
135 restart:
136         cfs_list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
137                 if ((lock->l_policy_data.l_flock.owner != blocking_owner) ||
138                     (lock->l_export != blocking_export))
139                         continue;
140
141                 blocking_owner = lock->l_policy_data.l_flock.blocking_owner;
142                 blocking_export = (struct obd_export *)
143                         lock->l_policy_data.l_flock.blocking_export;
144                 if (blocking_owner == req_owner &&
145                     blocking_export == req_export) {
146                         cfs_spin_unlock(&ldlm_flock_waitq_lock);
147                         return 1;
148                 }
149
150                 goto restart;
151         }
152         cfs_spin_unlock(&ldlm_flock_waitq_lock);
153
154         return 0;
155 }
156
157 int
158 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
159                         ldlm_error_t *err, cfs_list_t *work_list)
160 {
161         struct ldlm_resource *res = req->l_resource;
162         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
163         cfs_list_t *tmp;
164         cfs_list_t *ownlocks = NULL;
165         struct ldlm_lock *lock = NULL;
166         struct ldlm_lock *new = req;
167         struct ldlm_lock *new2 = NULL;
168         ldlm_mode_t mode = req->l_req_mode;
169         int local = ns_is_client(ns);
170         int added = (mode == LCK_NL);
171         int overlaps = 0;
172         int splitted = 0;
173         const struct ldlm_callback_suite null_cbs = { NULL };
174         ENTRY;
175
176         CDEBUG(D_DLMTRACE, "flags %#x owner "LPU64" pid %u mode %u start "LPU64
177                " end "LPU64"\n", *flags, new->l_policy_data.l_flock.owner,
178                new->l_policy_data.l_flock.pid, mode,
179                req->l_policy_data.l_flock.start,
180                req->l_policy_data.l_flock.end);
181
182         *err = ELDLM_OK;
183
184         if (local) {
185                 /* No blocking ASTs are sent to the clients for
186                  * Posix file & record locks */
187                 req->l_blocking_ast = NULL;
188         } else {
189                 /* Called on the server for lock cancels. */
190                 req->l_blocking_ast = ldlm_flock_blocking_ast;
191         }
192
193 reprocess:
194         if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
195                 /* This loop determines where this processes locks start
196                  * in the resource lr_granted list. */
197                 cfs_list_for_each(tmp, &res->lr_granted) {
198                         lock = cfs_list_entry(tmp, struct ldlm_lock,
199                                               l_res_link);
200                         if (ldlm_same_flock_owner(lock, req)) {
201                                 ownlocks = tmp;
202                                 break;
203                         }
204                 }
205         } else {
206                 lockmode_verify(mode);
207
208                 /* This loop determines if there are existing locks
209                  * that conflict with the new lock request. */
210                 cfs_list_for_each(tmp, &res->lr_granted) {
211                         lock = cfs_list_entry(tmp, struct ldlm_lock,
212                                               l_res_link);
213
214                         if (ldlm_same_flock_owner(lock, req)) {
215                                 if (!ownlocks)
216                                         ownlocks = tmp;
217                                 continue;
218                         }
219
220                         /* locks are compatible, overlap doesn't matter */
221                         if (lockmode_compat(lock->l_granted_mode, mode))
222                                 continue;
223
224                         if (!ldlm_flocks_overlap(lock, req))
225                                 continue;
226
227                         if (!first_enq)
228                                 RETURN(LDLM_ITER_CONTINUE);
229
230                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
231                                 ldlm_flock_destroy(req, mode, *flags);
232                                 *err = -EAGAIN;
233                                 RETURN(LDLM_ITER_STOP);
234                         }
235
236                         if (*flags & LDLM_FL_TEST_LOCK) {
237                                 ldlm_flock_destroy(req, mode, *flags);
238                                 req->l_req_mode = lock->l_granted_mode;
239                                 req->l_policy_data.l_flock.pid =
240                                         lock->l_policy_data.l_flock.pid;
241                                 req->l_policy_data.l_flock.start =
242                                         lock->l_policy_data.l_flock.start;
243                                 req->l_policy_data.l_flock.end =
244                                         lock->l_policy_data.l_flock.end;
245                                 *flags |= LDLM_FL_LOCK_CHANGED;
246                                 RETURN(LDLM_ITER_STOP);
247                         }
248
249                         if (ldlm_flock_deadlock(req, lock)) {
250                                 ldlm_flock_destroy(req, mode, *flags);
251                                 *err = -EDEADLK;
252                                 RETURN(LDLM_ITER_STOP);
253                         }
254
255                         req->l_policy_data.l_flock.blocking_owner =
256                                 lock->l_policy_data.l_flock.owner;
257                         req->l_policy_data.l_flock.blocking_export =
258                                 lock->l_export;
259
260                         LASSERT(cfs_list_empty(&req->l_flock_waitq));
261                         cfs_spin_lock(&ldlm_flock_waitq_lock);
262                         cfs_list_add_tail(&req->l_flock_waitq,
263                                           &ldlm_flock_waitq);
264                         cfs_spin_unlock(&ldlm_flock_waitq_lock);
265
266                         ldlm_resource_add_lock(res, &res->lr_waiting, req);
267                         *flags |= LDLM_FL_BLOCK_GRANTED;
268                         RETURN(LDLM_ITER_STOP);
269                 }
270         }
271
272         if (*flags & LDLM_FL_TEST_LOCK) {
273                 ldlm_flock_destroy(req, mode, *flags);
274                 req->l_req_mode = LCK_NL;
275                 *flags |= LDLM_FL_LOCK_CHANGED;
276                 RETURN(LDLM_ITER_STOP);
277         }
278
279         /* In case we had slept on this lock request take it off of the
280          * deadlock detection waitq. */
281         cfs_spin_lock(&ldlm_flock_waitq_lock);
282         cfs_list_del_init(&req->l_flock_waitq);
283         cfs_spin_unlock(&ldlm_flock_waitq_lock);
284
285         /* Scan the locks owned by this process that overlap this request.
286          * We may have to merge or split existing locks. */
287
288         if (!ownlocks)
289                 ownlocks = &res->lr_granted;
290
291         list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
292                 lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
293
294                 if (!ldlm_same_flock_owner(lock, new))
295                         break;
296
297                 if (lock->l_granted_mode == mode) {
298                         /* If the modes are the same then we need to process
299                          * locks that overlap OR adjoin the new lock. The extra
300                          * logic condition is necessary to deal with arithmetic
301                          * overflow and underflow. */
302                         if ((new->l_policy_data.l_flock.start >
303                              (lock->l_policy_data.l_flock.end + 1))
304                             && (lock->l_policy_data.l_flock.end !=
305                                 OBD_OBJECT_EOF))
306                                 continue;
307
308                         if ((new->l_policy_data.l_flock.end <
309                              (lock->l_policy_data.l_flock.start - 1))
310                             && (lock->l_policy_data.l_flock.start != 0))
311                                 break;
312
313                         if (new->l_policy_data.l_flock.start <
314                             lock->l_policy_data.l_flock.start) {
315                                 lock->l_policy_data.l_flock.start =
316                                         new->l_policy_data.l_flock.start;
317                         } else {
318                                 new->l_policy_data.l_flock.start =
319                                         lock->l_policy_data.l_flock.start;
320                         }
321
322                         if (new->l_policy_data.l_flock.end >
323                             lock->l_policy_data.l_flock.end) {
324                                 lock->l_policy_data.l_flock.end =
325                                         new->l_policy_data.l_flock.end;
326                         } else {
327                                 new->l_policy_data.l_flock.end =
328                                         lock->l_policy_data.l_flock.end;
329                         }
330
331                         if (added) {
332                                 ldlm_flock_destroy(lock, mode, *flags);
333                         } else {
334                                 new = lock;
335                                 added = 1;
336                         }
337                         continue;
338                 }
339
340                 if (new->l_policy_data.l_flock.start >
341                     lock->l_policy_data.l_flock.end)
342                         continue;
343
344                 if (new->l_policy_data.l_flock.end <
345                     lock->l_policy_data.l_flock.start)
346                         break;
347
348                 ++overlaps;
349
350                 if (new->l_policy_data.l_flock.start <=
351                     lock->l_policy_data.l_flock.start) {
352                         if (new->l_policy_data.l_flock.end <
353                             lock->l_policy_data.l_flock.end) {
354                                 lock->l_policy_data.l_flock.start =
355                                         new->l_policy_data.l_flock.end + 1;
356                                 break;
357                         }
358                         ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
359                         continue;
360                 }
361                 if (new->l_policy_data.l_flock.end >=
362                     lock->l_policy_data.l_flock.end) {
363                         lock->l_policy_data.l_flock.end =
364                                 new->l_policy_data.l_flock.start - 1;
365                         continue;
366                 }
367
368                 /* split the existing lock into two locks */
369
370                 /* if this is an F_UNLCK operation then we could avoid
371                  * allocating a new lock and use the req lock passed in
372                  * with the request but this would complicate the reply
373                  * processing since updates to req get reflected in the
374                  * reply. The client side replays the lock request so
375                  * it must see the original lock data in the reply. */
376
377                 /* XXX - if ldlm_lock_new() can sleep we should
378                  * release the lr_lock, allocate the new lock,
379                  * and restart processing this lock. */
380                 if (!new2) {
381                         unlock_res_and_lock(req);
382                          new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
383                                         lock->l_granted_mode, &null_cbs,
384                                         NULL, 0);
385                         lock_res_and_lock(req);
386                         if (!new2) {
387                                 ldlm_flock_destroy(req, lock->l_granted_mode,
388                                                    *flags);
389                                 *err = -ENOLCK;
390                                 RETURN(LDLM_ITER_STOP);
391                         }
392                         goto reprocess;
393                 }
394
395                 splitted = 1;
396
397                 new2->l_granted_mode = lock->l_granted_mode;
398                 new2->l_policy_data.l_flock.pid =
399                         new->l_policy_data.l_flock.pid;
400                 new2->l_policy_data.l_flock.owner =
401                         new->l_policy_data.l_flock.owner;
402                 new2->l_policy_data.l_flock.start =
403                         lock->l_policy_data.l_flock.start;
404                 new2->l_policy_data.l_flock.end =
405                         new->l_policy_data.l_flock.start - 1;
406                 lock->l_policy_data.l_flock.start =
407                         new->l_policy_data.l_flock.end + 1;
408                 new2->l_conn_export = lock->l_conn_export;
409                 if (lock->l_export != NULL) {
410                         new2->l_export = class_export_lock_get(lock->l_export, new2);
411                         if (new2->l_export->exp_lock_hash &&
412                             cfs_hlist_unhashed(&new2->l_exp_hash))
413                                 cfs_hash_add(new2->l_export->exp_lock_hash,
414                                              &new2->l_remote_handle,
415                                              &new2->l_exp_hash);
416                 }
417                 if (*flags == LDLM_FL_WAIT_NOREPROC)
418                         ldlm_lock_addref_internal_nolock(new2,
419                                                          lock->l_granted_mode);
420
421                 /* insert new2 at lock */
422                 ldlm_resource_add_lock(res, ownlocks, new2);
423                 LDLM_LOCK_RELEASE(new2);
424                 break;
425         }
426
427         /* if new2 is created but never used, destroy it*/
428         if (splitted == 0 && new2 != NULL)
429                 ldlm_lock_destroy_nolock(new2);
430
431         /* At this point we're granting the lock request. */
432         req->l_granted_mode = req->l_req_mode;
433
434         /* Add req to the granted queue before calling ldlm_reprocess_all(). */
435         if (!added) {
436                 cfs_list_del_init(&req->l_res_link);
437                 /* insert new lock before ownlocks in list. */
438                 ldlm_resource_add_lock(res, ownlocks, req);
439         }
440
441         if (*flags != LDLM_FL_WAIT_NOREPROC) {
442                 if (first_enq) {
443                         /* If this is an unlock, reprocess the waitq and
444                          * send completions ASTs for locks that can now be
445                          * granted. The only problem with doing this
446                          * reprocessing here is that the completion ASTs for
447                          * newly granted locks will be sent before the unlock
448                          * completion is sent. It shouldn't be an issue. Also
449                          * note that ldlm_process_flock_lock() will recurse,
450                          * but only once because first_enq will be false from
451                          * ldlm_reprocess_queue. */
452                         if ((mode == LCK_NL) && overlaps) {
453                                 CFS_LIST_HEAD(rpc_list);
454                                 int rc;
455 restart:
456                                 ldlm_reprocess_queue(res, &res->lr_waiting,
457                                                      &rpc_list);
458
459                                 unlock_res_and_lock(req);
460                                 rc = ldlm_run_ast_work(&rpc_list,
461                                                        LDLM_WORK_CP_AST);
462                                 lock_res_and_lock(req);
463                                 if (rc == -ERESTART)
464                                         GOTO(restart, -ERESTART);
465                        }
466                 } else {
467                         LASSERT(req->l_completion_ast);
468                         ldlm_add_ast_work_item(req, NULL, work_list);
469                 }
470         }
471
472         /* In case we're reprocessing the requested lock we can't destroy
473          * it until after calling ldlm_ast_work_item() above so that lawi()
474          * can bump the reference count on req. Otherwise req could be freed
475          * before the completion AST can be sent.  */
476         if (added)
477                 ldlm_flock_destroy(req, mode, *flags);
478
479         ldlm_resource_dump(D_INFO, res);
480         RETURN(LDLM_ITER_CONTINUE);
481 }
482
483 struct ldlm_flock_wait_data {
484         struct ldlm_lock *fwd_lock;
485         int               fwd_generation;
486 };
487
488 static void
489 ldlm_flock_interrupted_wait(void *data)
490 {
491         struct ldlm_lock *lock;
492         ENTRY;
493
494         lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
495
496         /* take lock off the deadlock detection waitq. */
497         cfs_spin_lock(&ldlm_flock_waitq_lock);
498         cfs_list_del_init(&lock->l_flock_waitq);
499         cfs_spin_unlock(&ldlm_flock_waitq_lock);
500
501         /* client side - set flag to prevent lock from being put on lru list */
502         lock->l_flags |= LDLM_FL_CBPENDING;
503
504         EXIT;
505 }
506
507 /**
508  * Flock completion calback function.
509  *
510  * \param lock [in,out]: A lock to be handled
511  * \param flags    [in]: flags
512  * \param *data    [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
513  *
514  * \retval 0    : success
515  * \retval <0   : failure
516  */
517 int
518 ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
519 {
520         cfs_flock_t                    *getlk = lock->l_ast_data;
521         struct obd_device              *obd;
522         struct obd_import              *imp = NULL;
523         struct ldlm_flock_wait_data     fwd;
524         struct l_wait_info              lwi;
525         ldlm_error_t                    err;
526         int                             rc = 0;
527         ENTRY;
528
529         CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
530                flags, data, getlk);
531
532         /* Import invalidation. We need to actually release the lock
533          * references being held, so that it can go away. No point in
534          * holding the lock even if app still believes it has it, since
535          * server already dropped it anyway. Only for granted locks too. */
536         if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
537             (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
538                 if (lock->l_req_mode == lock->l_granted_mode &&
539                     lock->l_granted_mode != LCK_NL &&
540                     NULL == data)
541                         ldlm_lock_decref_internal(lock, lock->l_req_mode);
542
543                 /* Need to wake up the waiter if we were evicted */
544                 cfs_waitq_signal(&lock->l_waitq);
545                 RETURN(0);
546         }
547
548         LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
549
550         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
551                        LDLM_FL_BLOCK_CONV))) {
552                 if (NULL == data)
553                         /* mds granted the lock in the reply */
554                         goto granted;
555                 /* CP AST RPC: lock get granted, wake it up */
556                 cfs_waitq_signal(&lock->l_waitq);
557                 RETURN(0);
558         }
559
560         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
561                    "sleeping");
562         fwd.fwd_lock = lock;
563         obd = class_exp2obd(lock->l_conn_export);
564
565         /* if this is a local lock, there is no import */
566         if (NULL != obd)
567                 imp = obd->u.cli.cl_import;
568
569         if (NULL != imp) {
570                 cfs_spin_lock(&imp->imp_lock);
571                 fwd.fwd_generation = imp->imp_generation;
572                 cfs_spin_unlock(&imp->imp_lock);
573         }
574
575         lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
576
577         /* Go to sleep until the lock is granted. */
578         rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
579
580         if (rc) {
581                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
582                            rc);
583                 RETURN(rc);
584         }
585
586 granted:
587         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
588
589         if (lock->l_destroyed) {
590                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
591                 RETURN(0);
592         }
593
594         if (lock->l_flags & LDLM_FL_FAILED) {
595                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
596                 RETURN(-EIO);
597         }
598
599         if (rc) {
600                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
601                            rc);
602                 RETURN(rc);
603         }
604
605         LDLM_DEBUG(lock, "client-side enqueue granted");
606
607         /* take lock off the deadlock detection waitq. */
608         cfs_spin_lock(&ldlm_flock_waitq_lock);
609         cfs_list_del_init(&lock->l_flock_waitq);
610         cfs_spin_unlock(&ldlm_flock_waitq_lock);
611
612         lock_res_and_lock(lock);
613         /* ldlm_lock_enqueue() has already placed lock on the granted list. */
614         cfs_list_del_init(&lock->l_res_link);
615
616         if (flags & LDLM_FL_TEST_LOCK) {
617                 /* fcntl(F_GETLK) request */
618                 /* The old mode was saved in getlk->fl_type so that if the mode
619                  * in the lock changes we can decref the appropriate refcount.*/
620                 ldlm_flock_destroy(lock, cfs_flock_type(getlk),
621                                    LDLM_FL_WAIT_NOREPROC);
622                 switch (lock->l_granted_mode) {
623                 case LCK_PR:
624                         cfs_flock_set_type(getlk, F_RDLCK);
625                         break;
626                 case LCK_PW:
627                         cfs_flock_set_type(getlk, F_WRLCK);
628                         break;
629                 default:
630                         cfs_flock_set_type(getlk, F_UNLCK);
631                 }
632                 cfs_flock_set_pid(getlk,
633                                   (pid_t)lock->l_policy_data.l_flock.pid);
634                 cfs_flock_set_start(getlk,
635                                     (loff_t)lock->l_policy_data.l_flock.start);
636                 cfs_flock_set_end(getlk,
637                                   (loff_t)lock->l_policy_data.l_flock.end);
638         } else {
639                 int noreproc = LDLM_FL_WAIT_NOREPROC;
640
641                 /* We need to reprocess the lock to do merges or splits
642                  * with existing locks owned by this process. */
643                 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
644         }
645         unlock_res_and_lock(lock);
646         RETURN(0);
647 }
648 EXPORT_SYMBOL(ldlm_flock_completion_ast);
649
650 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
651                             void *data, int flag)
652 {
653         struct ldlm_namespace *ns;
654         ENTRY;
655
656         LASSERT(lock);
657         LASSERT(flag == LDLM_CB_CANCELING);
658
659         ns = ldlm_lock_to_ns(lock);
660
661         /* take lock off the deadlock detection waitq. */
662         cfs_spin_lock(&ldlm_flock_waitq_lock);
663         cfs_list_del_init(&lock->l_flock_waitq);
664         cfs_spin_unlock(&ldlm_flock_waitq_lock);
665         RETURN(0);
666 }
667
668 void ldlm_flock_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
669                                      ldlm_policy_data_t *lpolicy)
670 {
671         memset(lpolicy, 0, sizeof(*lpolicy));
672         lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
673         lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
674         lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
675         lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
676         /* Compat code, old clients had no idea about owner field and
677          * relied solely on pid for ownership. Introduced in 2.1, April 2011 */
678         if (!lpolicy->l_flock.owner)
679                 lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
680 }
681
682 void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
683                                      ldlm_wire_policy_data_t *wpolicy)
684 {
685         memset(wpolicy, 0, sizeof(*wpolicy));
686         wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
687         wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
688         wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
689         wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
690 }