Whamcloud - gitweb
LU-1137 ldlm: fix for the flock handling for 1.8 clients
[fs/lustre-release.git] / lustre / ldlm / ldlm_flock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2003 Hewlett-Packard Development Company LP.
30  * Developed under the sponsorship of the US Government under
31  * Subcontract No. B514193
32  *
33  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
34  * Use is subject to license terms.
35  *
36  * Copyright (c) 2010, 2011, Whamcloud, Inc.
37  */
38 /*
39  * This file is part of Lustre, http://www.lustre.org/
40  * Lustre is a trademark of Sun Microsystems, Inc.
41  */
42
43 #define DEBUG_SUBSYSTEM S_LDLM
44
45 #ifdef __KERNEL__
46 #include <lustre_dlm.h>
47 #include <obd_support.h>
48 #include <obd_class.h>
49 #include <lustre_lib.h>
50 #include <libcfs/list.h>
51 #else
52 #include <liblustre.h>
53 #include <obd_class.h>
54 #endif
55
56 #include "ldlm_internal.h"
57
58 #define l_flock_waitq   l_lru
59
60 /**
61  * Wait queue for Posix lock deadlock detection, added with
62  * ldlm_lock::l_flock_waitq.
63  */
64 static CFS_LIST_HEAD(ldlm_flock_waitq);
65 /**
66  * Lock protecting access to ldlm_flock_waitq.
67  */
68 cfs_spinlock_t ldlm_flock_waitq_lock = CFS_SPIN_LOCK_UNLOCKED;
69
70 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
71                             void *data, int flag);
72
73 /**
74  * list_for_remaining_safe - iterate over the remaining entries in a list
75  *              and safeguard against removal of a list entry.
76  * \param pos   the &struct list_head to use as a loop counter. pos MUST
77  *              have been initialized prior to using it in this macro.
78  * \param n     another &struct list_head to use as temporary storage
79  * \param head  the head for your list.
80  */
81 #define list_for_remaining_safe(pos, n, head) \
82         for (n = pos->next; pos != (head); pos = n, n = pos->next)
83
84 static inline int
85 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
86 {
87         return((new->l_policy_data.l_flock.owner ==
88                 lock->l_policy_data.l_flock.owner) &&
89                (new->l_export == lock->l_export));
90 }
91
92 static inline int
93 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
94 {
95         return((new->l_policy_data.l_flock.start <=
96                 lock->l_policy_data.l_flock.end) &&
97                (new->l_policy_data.l_flock.end >=
98                 lock->l_policy_data.l_flock.start));
99 }
100
101 static inline void
102 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
103 {
104         ENTRY;
105
106         LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
107                    mode, flags);
108
109         /* Safe to not lock here, since it should be empty anyway */
110         LASSERT(cfs_list_empty(&lock->l_flock_waitq));
111
112         cfs_list_del_init(&lock->l_res_link);
113         if (flags == LDLM_FL_WAIT_NOREPROC &&
114             !(lock->l_flags & LDLM_FL_FAILED)) {
115                 /* client side - set a flag to prevent sending a CANCEL */
116                 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
117
118                 /* when reaching here, it is under lock_res_and_lock(). Thus,
119                    need call the nolock version of ldlm_lock_decref_internal*/
120                 ldlm_lock_decref_internal_nolock(lock, mode);
121         }
122
123         ldlm_lock_destroy_nolock(lock);
124         EXIT;
125 }
126
127 static int
128 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
129 {
130         struct obd_export *req_export = req->l_export;
131         struct obd_export *blocking_export = blocking_lock->l_export;
132         __u64 req_owner = req->l_policy_data.l_flock.owner;
133         __u64 blocking_owner = blocking_lock->l_policy_data.l_flock.owner;
134         struct ldlm_lock *lock;
135
136         cfs_spin_lock(&ldlm_flock_waitq_lock);
137 restart:
138         cfs_list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
139                 if ((lock->l_policy_data.l_flock.owner != blocking_owner) ||
140                     (lock->l_export != blocking_export))
141                         continue;
142
143                 blocking_owner = lock->l_policy_data.l_flock.blocking_owner;
144                 blocking_export = (struct obd_export *)
145                         lock->l_policy_data.l_flock.blocking_export;
146                 if (blocking_owner == req_owner &&
147                     blocking_export == req_export) {
148                         cfs_spin_unlock(&ldlm_flock_waitq_lock);
149                         return 1;
150                 }
151
152                 goto restart;
153         }
154         cfs_spin_unlock(&ldlm_flock_waitq_lock);
155
156         return 0;
157 }
158
159 int
160 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
161                         ldlm_error_t *err, cfs_list_t *work_list)
162 {
163         struct ldlm_resource *res = req->l_resource;
164         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
165         cfs_list_t *tmp;
166         cfs_list_t *ownlocks = NULL;
167         struct ldlm_lock *lock = NULL;
168         struct ldlm_lock *new = req;
169         struct ldlm_lock *new2 = NULL;
170         ldlm_mode_t mode = req->l_req_mode;
171         int local = ns_is_client(ns);
172         int added = (mode == LCK_NL);
173         int overlaps = 0;
174         int splitted = 0;
175         const struct ldlm_callback_suite null_cbs = { NULL };
176         ENTRY;
177
178         CDEBUG(D_DLMTRACE, "flags %#x owner "LPU64" pid %u mode %u start "LPU64
179                " end "LPU64"\n", *flags, new->l_policy_data.l_flock.owner,
180                new->l_policy_data.l_flock.pid, mode,
181                req->l_policy_data.l_flock.start,
182                req->l_policy_data.l_flock.end);
183
184         *err = ELDLM_OK;
185
186         if (local) {
187                 /* No blocking ASTs are sent to the clients for
188                  * Posix file & record locks */
189                 req->l_blocking_ast = NULL;
190         } else {
191                 /* Called on the server for lock cancels. */
192                 req->l_blocking_ast = ldlm_flock_blocking_ast;
193         }
194
195 reprocess:
196         if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
197                 /* This loop determines where this processes locks start
198                  * in the resource lr_granted list. */
199                 cfs_list_for_each(tmp, &res->lr_granted) {
200                         lock = cfs_list_entry(tmp, struct ldlm_lock,
201                                               l_res_link);
202                         if (ldlm_same_flock_owner(lock, req)) {
203                                 ownlocks = tmp;
204                                 break;
205                         }
206                 }
207         } else {
208                 lockmode_verify(mode);
209
210                 /* This loop determines if there are existing locks
211                  * that conflict with the new lock request. */
212                 cfs_list_for_each(tmp, &res->lr_granted) {
213                         lock = cfs_list_entry(tmp, struct ldlm_lock,
214                                               l_res_link);
215
216                         if (ldlm_same_flock_owner(lock, req)) {
217                                 if (!ownlocks)
218                                         ownlocks = tmp;
219                                 continue;
220                         }
221
222                         /* locks are compatible, overlap doesn't matter */
223                         if (lockmode_compat(lock->l_granted_mode, mode))
224                                 continue;
225
226                         if (!ldlm_flocks_overlap(lock, req))
227                                 continue;
228
229                         if (!first_enq)
230                                 RETURN(LDLM_ITER_CONTINUE);
231
232                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
233                                 ldlm_flock_destroy(req, mode, *flags);
234                                 *err = -EAGAIN;
235                                 RETURN(LDLM_ITER_STOP);
236                         }
237
238                         if (*flags & LDLM_FL_TEST_LOCK) {
239                                 ldlm_flock_destroy(req, mode, *flags);
240                                 req->l_req_mode = lock->l_granted_mode;
241                                 req->l_policy_data.l_flock.pid =
242                                         lock->l_policy_data.l_flock.pid;
243                                 req->l_policy_data.l_flock.start =
244                                         lock->l_policy_data.l_flock.start;
245                                 req->l_policy_data.l_flock.end =
246                                         lock->l_policy_data.l_flock.end;
247                                 *flags |= LDLM_FL_LOCK_CHANGED;
248                                 RETURN(LDLM_ITER_STOP);
249                         }
250
251                         if (ldlm_flock_deadlock(req, lock)) {
252                                 ldlm_flock_destroy(req, mode, *flags);
253                                 *err = -EDEADLK;
254                                 RETURN(LDLM_ITER_STOP);
255                         }
256
257                         req->l_policy_data.l_flock.blocking_owner =
258                                 lock->l_policy_data.l_flock.owner;
259                         req->l_policy_data.l_flock.blocking_export =
260                                 lock->l_export;
261
262                         LASSERT(cfs_list_empty(&req->l_flock_waitq));
263                         cfs_spin_lock(&ldlm_flock_waitq_lock);
264                         cfs_list_add_tail(&req->l_flock_waitq,
265                                           &ldlm_flock_waitq);
266                         cfs_spin_unlock(&ldlm_flock_waitq_lock);
267
268                         ldlm_resource_add_lock(res, &res->lr_waiting, req);
269                         *flags |= LDLM_FL_BLOCK_GRANTED;
270                         RETURN(LDLM_ITER_STOP);
271                 }
272         }
273
274         if (*flags & LDLM_FL_TEST_LOCK) {
275                 ldlm_flock_destroy(req, mode, *flags);
276                 req->l_req_mode = LCK_NL;
277                 *flags |= LDLM_FL_LOCK_CHANGED;
278                 RETURN(LDLM_ITER_STOP);
279         }
280
281         /* In case we had slept on this lock request take it off of the
282          * deadlock detection waitq. */
283         cfs_spin_lock(&ldlm_flock_waitq_lock);
284         cfs_list_del_init(&req->l_flock_waitq);
285         cfs_spin_unlock(&ldlm_flock_waitq_lock);
286
287         /* Scan the locks owned by this process that overlap this request.
288          * We may have to merge or split existing locks. */
289
290         if (!ownlocks)
291                 ownlocks = &res->lr_granted;
292
293         list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
294                 lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
295
296                 if (!ldlm_same_flock_owner(lock, new))
297                         break;
298
299                 if (lock->l_granted_mode == mode) {
300                         /* If the modes are the same then we need to process
301                          * locks that overlap OR adjoin the new lock. The extra
302                          * logic condition is necessary to deal with arithmetic
303                          * overflow and underflow. */
304                         if ((new->l_policy_data.l_flock.start >
305                              (lock->l_policy_data.l_flock.end + 1))
306                             && (lock->l_policy_data.l_flock.end !=
307                                 OBD_OBJECT_EOF))
308                                 continue;
309
310                         if ((new->l_policy_data.l_flock.end <
311                              (lock->l_policy_data.l_flock.start - 1))
312                             && (lock->l_policy_data.l_flock.start != 0))
313                                 break;
314
315                         if (new->l_policy_data.l_flock.start <
316                             lock->l_policy_data.l_flock.start) {
317                                 lock->l_policy_data.l_flock.start =
318                                         new->l_policy_data.l_flock.start;
319                         } else {
320                                 new->l_policy_data.l_flock.start =
321                                         lock->l_policy_data.l_flock.start;
322                         }
323
324                         if (new->l_policy_data.l_flock.end >
325                             lock->l_policy_data.l_flock.end) {
326                                 lock->l_policy_data.l_flock.end =
327                                         new->l_policy_data.l_flock.end;
328                         } else {
329                                 new->l_policy_data.l_flock.end =
330                                         lock->l_policy_data.l_flock.end;
331                         }
332
333                         if (added) {
334                                 ldlm_flock_destroy(lock, mode, *flags);
335                         } else {
336                                 new = lock;
337                                 added = 1;
338                         }
339                         continue;
340                 }
341
342                 if (new->l_policy_data.l_flock.start >
343                     lock->l_policy_data.l_flock.end)
344                         continue;
345
346                 if (new->l_policy_data.l_flock.end <
347                     lock->l_policy_data.l_flock.start)
348                         break;
349
350                 ++overlaps;
351
352                 if (new->l_policy_data.l_flock.start <=
353                     lock->l_policy_data.l_flock.start) {
354                         if (new->l_policy_data.l_flock.end <
355                             lock->l_policy_data.l_flock.end) {
356                                 lock->l_policy_data.l_flock.start =
357                                         new->l_policy_data.l_flock.end + 1;
358                                 break;
359                         }
360                         ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
361                         continue;
362                 }
363                 if (new->l_policy_data.l_flock.end >=
364                     lock->l_policy_data.l_flock.end) {
365                         lock->l_policy_data.l_flock.end =
366                                 new->l_policy_data.l_flock.start - 1;
367                         continue;
368                 }
369
370                 /* split the existing lock into two locks */
371
372                 /* if this is an F_UNLCK operation then we could avoid
373                  * allocating a new lock and use the req lock passed in
374                  * with the request but this would complicate the reply
375                  * processing since updates to req get reflected in the
376                  * reply. The client side replays the lock request so
377                  * it must see the original lock data in the reply. */
378
379                 /* XXX - if ldlm_lock_new() can sleep we should
380                  * release the lr_lock, allocate the new lock,
381                  * and restart processing this lock. */
382                 if (!new2) {
383                         unlock_res_and_lock(req);
384                          new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
385                                         lock->l_granted_mode, &null_cbs,
386                                         NULL, 0);
387                         lock_res_and_lock(req);
388                         if (!new2) {
389                                 ldlm_flock_destroy(req, lock->l_granted_mode,
390                                                    *flags);
391                                 *err = -ENOLCK;
392                                 RETURN(LDLM_ITER_STOP);
393                         }
394                         goto reprocess;
395                 }
396
397                 splitted = 1;
398
399                 new2->l_granted_mode = lock->l_granted_mode;
400                 new2->l_policy_data.l_flock.pid =
401                         new->l_policy_data.l_flock.pid;
402                 new2->l_policy_data.l_flock.owner =
403                         new->l_policy_data.l_flock.owner;
404                 new2->l_policy_data.l_flock.start =
405                         lock->l_policy_data.l_flock.start;
406                 new2->l_policy_data.l_flock.end =
407                         new->l_policy_data.l_flock.start - 1;
408                 lock->l_policy_data.l_flock.start =
409                         new->l_policy_data.l_flock.end + 1;
410                 new2->l_conn_export = lock->l_conn_export;
411                 if (lock->l_export != NULL) {
412                         new2->l_export = class_export_lock_get(lock->l_export, new2);
413                         if (new2->l_export->exp_lock_hash &&
414                             cfs_hlist_unhashed(&new2->l_exp_hash))
415                                 cfs_hash_add(new2->l_export->exp_lock_hash,
416                                              &new2->l_remote_handle,
417                                              &new2->l_exp_hash);
418                 }
419                 if (*flags == LDLM_FL_WAIT_NOREPROC)
420                         ldlm_lock_addref_internal_nolock(new2,
421                                                          lock->l_granted_mode);
422
423                 /* insert new2 at lock */
424                 ldlm_resource_add_lock(res, ownlocks, new2);
425                 LDLM_LOCK_RELEASE(new2);
426                 break;
427         }
428
429         /* if new2 is created but never used, destroy it*/
430         if (splitted == 0 && new2 != NULL)
431                 ldlm_lock_destroy_nolock(new2);
432
433         /* At this point we're granting the lock request. */
434         req->l_granted_mode = req->l_req_mode;
435
436         /* Add req to the granted queue before calling ldlm_reprocess_all(). */
437         if (!added) {
438                 cfs_list_del_init(&req->l_res_link);
439                 /* insert new lock before ownlocks in list. */
440                 ldlm_resource_add_lock(res, ownlocks, req);
441         }
442
443         if (*flags != LDLM_FL_WAIT_NOREPROC) {
444                 if (first_enq) {
445                         /* If this is an unlock, reprocess the waitq and
446                          * send completions ASTs for locks that can now be
447                          * granted. The only problem with doing this
448                          * reprocessing here is that the completion ASTs for
449                          * newly granted locks will be sent before the unlock
450                          * completion is sent. It shouldn't be an issue. Also
451                          * note that ldlm_process_flock_lock() will recurse,
452                          * but only once because first_enq will be false from
453                          * ldlm_reprocess_queue. */
454                         if ((mode == LCK_NL) && overlaps) {
455                                 CFS_LIST_HEAD(rpc_list);
456                                 int rc;
457 restart:
458                                 ldlm_reprocess_queue(res, &res->lr_waiting,
459                                                      &rpc_list);
460
461                                 unlock_res_and_lock(req);
462                                 rc = ldlm_run_ast_work(ns, &rpc_list,
463                                                        LDLM_WORK_CP_AST);
464                                 lock_res_and_lock(req);
465                                 if (rc == -ERESTART)
466                                         GOTO(restart, -ERESTART);
467                        }
468                 } else {
469                         LASSERT(req->l_completion_ast);
470                         ldlm_add_ast_work_item(req, NULL, work_list);
471                 }
472         }
473
474         /* In case we're reprocessing the requested lock we can't destroy
475          * it until after calling ldlm_ast_work_item() above so that lawi()
476          * can bump the reference count on req. Otherwise req could be freed
477          * before the completion AST can be sent.  */
478         if (added)
479                 ldlm_flock_destroy(req, mode, *flags);
480
481         ldlm_resource_dump(D_INFO, res);
482         RETURN(LDLM_ITER_CONTINUE);
483 }
484
485 struct ldlm_flock_wait_data {
486         struct ldlm_lock *fwd_lock;
487         int               fwd_generation;
488 };
489
490 static void
491 ldlm_flock_interrupted_wait(void *data)
492 {
493         struct ldlm_lock *lock;
494         ENTRY;
495
496         lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
497
498         /* take lock off the deadlock detection waitq. */
499         cfs_spin_lock(&ldlm_flock_waitq_lock);
500         cfs_list_del_init(&lock->l_flock_waitq);
501         cfs_spin_unlock(&ldlm_flock_waitq_lock);
502
503         /* client side - set flag to prevent lock from being put on lru list */
504         lock->l_flags |= LDLM_FL_CBPENDING;
505
506         EXIT;
507 }
508
509 /**
510  * Flock completion calback function.
511  *
512  * \param lock [in,out]: A lock to be handled
513  * \param flags    [in]: flags
514  * \param *data    [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
515  *
516  * \retval 0    : success
517  * \retval <0   : failure
518  */
519 int
520 ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
521 {
522         cfs_flock_t                    *getlk = lock->l_ast_data;
523         struct obd_device              *obd;
524         struct obd_import              *imp = NULL;
525         struct ldlm_flock_wait_data     fwd;
526         struct l_wait_info              lwi;
527         ldlm_error_t                    err;
528         int                             rc = 0;
529         ENTRY;
530
531         CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
532                flags, data, getlk);
533
534         /* Import invalidation. We need to actually release the lock
535          * references being held, so that it can go away. No point in
536          * holding the lock even if app still believes it has it, since
537          * server already dropped it anyway. Only for granted locks too. */
538         if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
539             (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
540                 if (lock->l_req_mode == lock->l_granted_mode &&
541                     lock->l_granted_mode != LCK_NL &&
542                     NULL == data)
543                         ldlm_lock_decref_internal(lock, lock->l_req_mode);
544
545                 /* Need to wake up the waiter if we were evicted */
546                 cfs_waitq_signal(&lock->l_waitq);
547                 RETURN(0);
548         }
549
550         LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
551
552         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
553                        LDLM_FL_BLOCK_CONV))) {
554                 if (NULL == data)
555                         /* mds granted the lock in the reply */
556                         goto granted;
557                 /* CP AST RPC: lock get granted, wake it up */
558                 cfs_waitq_signal(&lock->l_waitq);
559                 RETURN(0);
560         }
561
562         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
563                    "sleeping");
564         fwd.fwd_lock = lock;
565         obd = class_exp2obd(lock->l_conn_export);
566
567         /* if this is a local lock, there is no import */
568         if (NULL != obd)
569                 imp = obd->u.cli.cl_import;
570
571         if (NULL != imp) {
572                 cfs_spin_lock(&imp->imp_lock);
573                 fwd.fwd_generation = imp->imp_generation;
574                 cfs_spin_unlock(&imp->imp_lock);
575         }
576
577         lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
578
579         /* Go to sleep until the lock is granted. */
580         rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
581
582         if (rc) {
583                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
584                            rc);
585                 RETURN(rc);
586         }
587
588 granted:
589         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
590
591         if (lock->l_destroyed) {
592                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
593                 RETURN(0);
594         }
595
596         if (lock->l_flags & LDLM_FL_FAILED) {
597                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
598                 RETURN(-EIO);
599         }
600
601         if (rc) {
602                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
603                            rc);
604                 RETURN(rc);
605         }
606
607         LDLM_DEBUG(lock, "client-side enqueue granted");
608
609         /* take lock off the deadlock detection waitq. */
610         cfs_spin_lock(&ldlm_flock_waitq_lock);
611         cfs_list_del_init(&lock->l_flock_waitq);
612         cfs_spin_unlock(&ldlm_flock_waitq_lock);
613
614         lock_res_and_lock(lock);
615         /* ldlm_lock_enqueue() has already placed lock on the granted list. */
616         cfs_list_del_init(&lock->l_res_link);
617
618         if (flags & LDLM_FL_TEST_LOCK) {
619                 /* fcntl(F_GETLK) request */
620                 /* The old mode was saved in getlk->fl_type so that if the mode
621                  * in the lock changes we can decref the appropriate refcount.*/
622                 ldlm_flock_destroy(lock, cfs_flock_type(getlk),
623                                    LDLM_FL_WAIT_NOREPROC);
624                 switch (lock->l_granted_mode) {
625                 case LCK_PR:
626                         cfs_flock_set_type(getlk, F_RDLCK);
627                         break;
628                 case LCK_PW:
629                         cfs_flock_set_type(getlk, F_WRLCK);
630                         break;
631                 default:
632                         cfs_flock_set_type(getlk, F_UNLCK);
633                 }
634                 cfs_flock_set_pid(getlk,
635                                   (pid_t)lock->l_policy_data.l_flock.pid);
636                 cfs_flock_set_start(getlk,
637                                     (loff_t)lock->l_policy_data.l_flock.start);
638                 cfs_flock_set_end(getlk,
639                                   (loff_t)lock->l_policy_data.l_flock.end);
640         } else {
641                 int noreproc = LDLM_FL_WAIT_NOREPROC;
642
643                 /* We need to reprocess the lock to do merges or splits
644                  * with existing locks owned by this process. */
645                 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
646         }
647         unlock_res_and_lock(lock);
648         RETURN(0);
649 }
650 EXPORT_SYMBOL(ldlm_flock_completion_ast);
651
652 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
653                             void *data, int flag)
654 {
655         ENTRY;
656
657         LASSERT(lock);
658         LASSERT(flag == LDLM_CB_CANCELING);
659
660         /* take lock off the deadlock detection waitq. */
661         cfs_spin_lock(&ldlm_flock_waitq_lock);
662         cfs_list_del_init(&lock->l_flock_waitq);
663         cfs_spin_unlock(&ldlm_flock_waitq_lock);
664         RETURN(0);
665 }
666
667 void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
668                                        ldlm_policy_data_t *lpolicy)
669 {
670         memset(lpolicy, 0, sizeof(*lpolicy));
671         lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
672         lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
673         lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
674         /* Compat code, old clients had no idea about owner field and
675          * relied solely on pid for ownership. Introduced in LU-104, 2.1,
676          * April 2011 */
677         lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
678 }
679
680
681 void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
682                                        ldlm_policy_data_t *lpolicy)
683 {
684         memset(lpolicy, 0, sizeof(*lpolicy));
685         lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
686         lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
687         lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
688         lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
689 }
690
691 void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
692                                      ldlm_wire_policy_data_t *wpolicy)
693 {
694         memset(wpolicy, 0, sizeof(*wpolicy));
695         wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
696         wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
697         wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
698         wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
699 }