Whamcloud - gitweb
LU-569: Make lu_object cache size adjustable
[fs/lustre-release.git] / lustre / ldlm / ldlm_flock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2003 Hewlett-Packard Development Company LP.
33  * Developed under the sponsorship of the US Government under
34  * Subcontract No. B514193
35  *
36  * Copyright (c) 2011 Whamcloud, Inc.
37  *
38  */
39 /*
40  * This file is part of Lustre, http://www.lustre.org/
41  * Lustre is a trademark of Sun Microsystems, Inc.
42  */
43
44 #define DEBUG_SUBSYSTEM S_LDLM
45
46 #ifdef __KERNEL__
47 #include <lustre_dlm.h>
48 #include <obd_support.h>
49 #include <obd_class.h>
50 #include <lustre_lib.h>
51 #include <libcfs/list.h>
52 #else
53 #include <liblustre.h>
54 #include <obd_class.h>
55 #endif
56
57 #include "ldlm_internal.h"
58
59 #define l_flock_waitq   l_lru
60
61 /**
62  * Wait queue for Posix lock deadlock detection, added with
63  * ldlm_lock::l_flock_waitq.
64  */
65 static CFS_LIST_HEAD(ldlm_flock_waitq);
66 /**
67  * Lock protecting access to ldlm_flock_waitq.
68  */
69 cfs_spinlock_t ldlm_flock_waitq_lock = CFS_SPIN_LOCK_UNLOCKED;
70
71 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
72                             void *data, int flag);
73
74 /**
75  * list_for_remaining_safe - iterate over the remaining entries in a list
76  *              and safeguard against removal of a list entry.
77  * \param pos   the &struct list_head to use as a loop counter. pos MUST
78  *              have been initialized prior to using it in this macro.
79  * \param n     another &struct list_head to use as temporary storage
80  * \param head  the head for your list.
81  */
82 #define list_for_remaining_safe(pos, n, head) \
83         for (n = pos->next; pos != (head); pos = n, n = pos->next)
84
85 static inline int
86 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
87 {
88         return((new->l_policy_data.l_flock.owner ==
89                 lock->l_policy_data.l_flock.owner) &&
90                (new->l_export == lock->l_export));
91 }
92
93 static inline int
94 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
95 {
96         return((new->l_policy_data.l_flock.start <=
97                 lock->l_policy_data.l_flock.end) &&
98                (new->l_policy_data.l_flock.end >=
99                 lock->l_policy_data.l_flock.start));
100 }
101
102 static inline void
103 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
104 {
105         ENTRY;
106
107         LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
108                    mode, flags);
109
110         /* Safe to not lock here, since it should be empty anyway */
111         LASSERT(cfs_list_empty(&lock->l_flock_waitq));
112
113         cfs_list_del_init(&lock->l_res_link);
114         if (flags == LDLM_FL_WAIT_NOREPROC &&
115             !(lock->l_flags & LDLM_FL_FAILED)) {
116                 /* client side - set a flag to prevent sending a CANCEL */
117                 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
118
119                 /* when reaching here, it is under lock_res_and_lock(). Thus,
120                    need call the nolock version of ldlm_lock_decref_internal*/
121                 ldlm_lock_decref_internal_nolock(lock, mode);
122         }
123
124         ldlm_lock_destroy_nolock(lock);
125         EXIT;
126 }
127
128 static int
129 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
130 {
131         struct obd_export *req_export = req->l_export;
132         struct obd_export *blocking_export = blocking_lock->l_export;
133         __u64 req_owner = req->l_policy_data.l_flock.owner;
134         __u64 blocking_owner = blocking_lock->l_policy_data.l_flock.owner;
135         struct ldlm_lock *lock;
136
137         cfs_spin_lock(&ldlm_flock_waitq_lock);
138 restart:
139         cfs_list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
140                 if ((lock->l_policy_data.l_flock.owner != blocking_owner) ||
141                     (lock->l_export != blocking_export))
142                         continue;
143
144                 blocking_owner = lock->l_policy_data.l_flock.blocking_owner;
145                 blocking_export = (struct obd_export *)
146                         lock->l_policy_data.l_flock.blocking_export;
147                 if (blocking_owner == req_owner &&
148                     blocking_export == req_export) {
149                         cfs_spin_unlock(&ldlm_flock_waitq_lock);
150                         return 1;
151                 }
152
153                 goto restart;
154         }
155         cfs_spin_unlock(&ldlm_flock_waitq_lock);
156
157         return 0;
158 }
159
160 int
161 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
162                         ldlm_error_t *err, cfs_list_t *work_list)
163 {
164         struct ldlm_resource *res = req->l_resource;
165         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
166         cfs_list_t *tmp;
167         cfs_list_t *ownlocks = NULL;
168         struct ldlm_lock *lock = NULL;
169         struct ldlm_lock *new = req;
170         struct ldlm_lock *new2 = NULL;
171         ldlm_mode_t mode = req->l_req_mode;
172         int local = ns_is_client(ns);
173         int added = (mode == LCK_NL);
174         int overlaps = 0;
175         int splitted = 0;
176         const struct ldlm_callback_suite null_cbs = { NULL };
177         ENTRY;
178
179         CDEBUG(D_DLMTRACE, "flags %#x owner "LPU64" pid %u mode %u start "LPU64
180                " end "LPU64"\n", *flags, new->l_policy_data.l_flock.owner,
181                new->l_policy_data.l_flock.pid, mode,
182                req->l_policy_data.l_flock.start,
183                req->l_policy_data.l_flock.end);
184
185         *err = ELDLM_OK;
186
187         if (local) {
188                 /* No blocking ASTs are sent to the clients for
189                  * Posix file & record locks */
190                 req->l_blocking_ast = NULL;
191         } else {
192                 /* Called on the server for lock cancels. */
193                 req->l_blocking_ast = ldlm_flock_blocking_ast;
194         }
195
196 reprocess:
197         if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
198                 /* This loop determines where this processes locks start
199                  * in the resource lr_granted list. */
200                 cfs_list_for_each(tmp, &res->lr_granted) {
201                         lock = cfs_list_entry(tmp, struct ldlm_lock,
202                                               l_res_link);
203                         if (ldlm_same_flock_owner(lock, req)) {
204                                 ownlocks = tmp;
205                                 break;
206                         }
207                 }
208         } else {
209                 lockmode_verify(mode);
210
211                 /* This loop determines if there are existing locks
212                  * that conflict with the new lock request. */
213                 cfs_list_for_each(tmp, &res->lr_granted) {
214                         lock = cfs_list_entry(tmp, struct ldlm_lock,
215                                               l_res_link);
216
217                         if (ldlm_same_flock_owner(lock, req)) {
218                                 if (!ownlocks)
219                                         ownlocks = tmp;
220                                 continue;
221                         }
222
223                         /* locks are compatible, overlap doesn't matter */
224                         if (lockmode_compat(lock->l_granted_mode, mode))
225                                 continue;
226
227                         if (!ldlm_flocks_overlap(lock, req))
228                                 continue;
229
230                         if (!first_enq)
231                                 RETURN(LDLM_ITER_CONTINUE);
232
233                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
234                                 ldlm_flock_destroy(req, mode, *flags);
235                                 *err = -EAGAIN;
236                                 RETURN(LDLM_ITER_STOP);
237                         }
238
239                         if (*flags & LDLM_FL_TEST_LOCK) {
240                                 ldlm_flock_destroy(req, mode, *flags);
241                                 req->l_req_mode = lock->l_granted_mode;
242                                 req->l_policy_data.l_flock.pid =
243                                         lock->l_policy_data.l_flock.pid;
244                                 req->l_policy_data.l_flock.start =
245                                         lock->l_policy_data.l_flock.start;
246                                 req->l_policy_data.l_flock.end =
247                                         lock->l_policy_data.l_flock.end;
248                                 *flags |= LDLM_FL_LOCK_CHANGED;
249                                 RETURN(LDLM_ITER_STOP);
250                         }
251
252                         if (ldlm_flock_deadlock(req, lock)) {
253                                 ldlm_flock_destroy(req, mode, *flags);
254                                 *err = -EDEADLK;
255                                 RETURN(LDLM_ITER_STOP);
256                         }
257
258                         req->l_policy_data.l_flock.blocking_owner =
259                                 lock->l_policy_data.l_flock.owner;
260                         req->l_policy_data.l_flock.blocking_export =
261                                 lock->l_export;
262
263                         LASSERT(cfs_list_empty(&req->l_flock_waitq));
264                         cfs_spin_lock(&ldlm_flock_waitq_lock);
265                         cfs_list_add_tail(&req->l_flock_waitq,
266                                           &ldlm_flock_waitq);
267                         cfs_spin_unlock(&ldlm_flock_waitq_lock);
268
269                         ldlm_resource_add_lock(res, &res->lr_waiting, req);
270                         *flags |= LDLM_FL_BLOCK_GRANTED;
271                         RETURN(LDLM_ITER_STOP);
272                 }
273         }
274
275         if (*flags & LDLM_FL_TEST_LOCK) {
276                 ldlm_flock_destroy(req, mode, *flags);
277                 req->l_req_mode = LCK_NL;
278                 *flags |= LDLM_FL_LOCK_CHANGED;
279                 RETURN(LDLM_ITER_STOP);
280         }
281
282         /* In case we had slept on this lock request take it off of the
283          * deadlock detection waitq. */
284         cfs_spin_lock(&ldlm_flock_waitq_lock);
285         cfs_list_del_init(&req->l_flock_waitq);
286         cfs_spin_unlock(&ldlm_flock_waitq_lock);
287
288         /* Scan the locks owned by this process that overlap this request.
289          * We may have to merge or split existing locks. */
290
291         if (!ownlocks)
292                 ownlocks = &res->lr_granted;
293
294         list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
295                 lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
296
297                 if (!ldlm_same_flock_owner(lock, new))
298                         break;
299
300                 if (lock->l_granted_mode == mode) {
301                         /* If the modes are the same then we need to process
302                          * locks that overlap OR adjoin the new lock. The extra
303                          * logic condition is necessary to deal with arithmetic
304                          * overflow and underflow. */
305                         if ((new->l_policy_data.l_flock.start >
306                              (lock->l_policy_data.l_flock.end + 1))
307                             && (lock->l_policy_data.l_flock.end !=
308                                 OBD_OBJECT_EOF))
309                                 continue;
310
311                         if ((new->l_policy_data.l_flock.end <
312                              (lock->l_policy_data.l_flock.start - 1))
313                             && (lock->l_policy_data.l_flock.start != 0))
314                                 break;
315
316                         if (new->l_policy_data.l_flock.start <
317                             lock->l_policy_data.l_flock.start) {
318                                 lock->l_policy_data.l_flock.start =
319                                         new->l_policy_data.l_flock.start;
320                         } else {
321                                 new->l_policy_data.l_flock.start =
322                                         lock->l_policy_data.l_flock.start;
323                         }
324
325                         if (new->l_policy_data.l_flock.end >
326                             lock->l_policy_data.l_flock.end) {
327                                 lock->l_policy_data.l_flock.end =
328                                         new->l_policy_data.l_flock.end;
329                         } else {
330                                 new->l_policy_data.l_flock.end =
331                                         lock->l_policy_data.l_flock.end;
332                         }
333
334                         if (added) {
335                                 ldlm_flock_destroy(lock, mode, *flags);
336                         } else {
337                                 new = lock;
338                                 added = 1;
339                         }
340                         continue;
341                 }
342
343                 if (new->l_policy_data.l_flock.start >
344                     lock->l_policy_data.l_flock.end)
345                         continue;
346
347                 if (new->l_policy_data.l_flock.end <
348                     lock->l_policy_data.l_flock.start)
349                         break;
350
351                 ++overlaps;
352
353                 if (new->l_policy_data.l_flock.start <=
354                     lock->l_policy_data.l_flock.start) {
355                         if (new->l_policy_data.l_flock.end <
356                             lock->l_policy_data.l_flock.end) {
357                                 lock->l_policy_data.l_flock.start =
358                                         new->l_policy_data.l_flock.end + 1;
359                                 break;
360                         }
361                         ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
362                         continue;
363                 }
364                 if (new->l_policy_data.l_flock.end >=
365                     lock->l_policy_data.l_flock.end) {
366                         lock->l_policy_data.l_flock.end =
367                                 new->l_policy_data.l_flock.start - 1;
368                         continue;
369                 }
370
371                 /* split the existing lock into two locks */
372
373                 /* if this is an F_UNLCK operation then we could avoid
374                  * allocating a new lock and use the req lock passed in
375                  * with the request but this would complicate the reply
376                  * processing since updates to req get reflected in the
377                  * reply. The client side replays the lock request so
378                  * it must see the original lock data in the reply. */
379
380                 /* XXX - if ldlm_lock_new() can sleep we should
381                  * release the lr_lock, allocate the new lock,
382                  * and restart processing this lock. */
383                 if (!new2) {
384                         unlock_res_and_lock(req);
385                          new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
386                                         lock->l_granted_mode, &null_cbs,
387                                         NULL, 0);
388                         lock_res_and_lock(req);
389                         if (!new2) {
390                                 ldlm_flock_destroy(req, lock->l_granted_mode,
391                                                    *flags);
392                                 *err = -ENOLCK;
393                                 RETURN(LDLM_ITER_STOP);
394                         }
395                         goto reprocess;
396                 }
397
398                 splitted = 1;
399
400                 new2->l_granted_mode = lock->l_granted_mode;
401                 new2->l_policy_data.l_flock.pid =
402                         new->l_policy_data.l_flock.pid;
403                 new2->l_policy_data.l_flock.owner =
404                         new->l_policy_data.l_flock.owner;
405                 new2->l_policy_data.l_flock.start =
406                         lock->l_policy_data.l_flock.start;
407                 new2->l_policy_data.l_flock.end =
408                         new->l_policy_data.l_flock.start - 1;
409                 lock->l_policy_data.l_flock.start =
410                         new->l_policy_data.l_flock.end + 1;
411                 new2->l_conn_export = lock->l_conn_export;
412                 if (lock->l_export != NULL) {
413                         new2->l_export = class_export_lock_get(lock->l_export, new2);
414                         if (new2->l_export->exp_lock_hash &&
415                             cfs_hlist_unhashed(&new2->l_exp_hash))
416                                 cfs_hash_add(new2->l_export->exp_lock_hash,
417                                              &new2->l_remote_handle,
418                                              &new2->l_exp_hash);
419                 }
420                 if (*flags == LDLM_FL_WAIT_NOREPROC)
421                         ldlm_lock_addref_internal_nolock(new2,
422                                                          lock->l_granted_mode);
423
424                 /* insert new2 at lock */
425                 ldlm_resource_add_lock(res, ownlocks, new2);
426                 LDLM_LOCK_RELEASE(new2);
427                 break;
428         }
429
430         /* if new2 is created but never used, destroy it*/
431         if (splitted == 0 && new2 != NULL)
432                 ldlm_lock_destroy_nolock(new2);
433
434         /* At this point we're granting the lock request. */
435         req->l_granted_mode = req->l_req_mode;
436
437         /* Add req to the granted queue before calling ldlm_reprocess_all(). */
438         if (!added) {
439                 cfs_list_del_init(&req->l_res_link);
440                 /* insert new lock before ownlocks in list. */
441                 ldlm_resource_add_lock(res, ownlocks, req);
442         }
443
444         if (*flags != LDLM_FL_WAIT_NOREPROC) {
445                 if (first_enq) {
446                         /* If this is an unlock, reprocess the waitq and
447                          * send completions ASTs for locks that can now be
448                          * granted. The only problem with doing this
449                          * reprocessing here is that the completion ASTs for
450                          * newly granted locks will be sent before the unlock
451                          * completion is sent. It shouldn't be an issue. Also
452                          * note that ldlm_process_flock_lock() will recurse,
453                          * but only once because first_enq will be false from
454                          * ldlm_reprocess_queue. */
455                         if ((mode == LCK_NL) && overlaps) {
456                                 CFS_LIST_HEAD(rpc_list);
457                                 int rc;
458 restart:
459                                 ldlm_reprocess_queue(res, &res->lr_waiting,
460                                                      &rpc_list);
461
462                                 unlock_res_and_lock(req);
463                                 rc = ldlm_run_ast_work(&rpc_list,
464                                                        LDLM_WORK_CP_AST);
465                                 lock_res_and_lock(req);
466                                 if (rc == -ERESTART)
467                                         GOTO(restart, -ERESTART);
468                        }
469                 } else {
470                         LASSERT(req->l_completion_ast);
471                         ldlm_add_ast_work_item(req, NULL, work_list);
472                 }
473         }
474
475         /* In case we're reprocessing the requested lock we can't destroy
476          * it until after calling ldlm_ast_work_item() above so that lawi()
477          * can bump the reference count on req. Otherwise req could be freed
478          * before the completion AST can be sent.  */
479         if (added)
480                 ldlm_flock_destroy(req, mode, *flags);
481
482         ldlm_resource_dump(D_INFO, res);
483         RETURN(LDLM_ITER_CONTINUE);
484 }
485
486 struct ldlm_flock_wait_data {
487         struct ldlm_lock *fwd_lock;
488         int               fwd_generation;
489 };
490
491 static void
492 ldlm_flock_interrupted_wait(void *data)
493 {
494         struct ldlm_lock *lock;
495         ENTRY;
496
497         lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
498
499         /* take lock off the deadlock detection waitq. */
500         cfs_spin_lock(&ldlm_flock_waitq_lock);
501         cfs_list_del_init(&lock->l_flock_waitq);
502         cfs_spin_unlock(&ldlm_flock_waitq_lock);
503
504         /* client side - set flag to prevent lock from being put on lru list */
505         lock->l_flags |= LDLM_FL_CBPENDING;
506
507         EXIT;
508 }
509
510 /**
511  * Flock completion calback function.
512  *
513  * \param lock [in,out]: A lock to be handled
514  * \param flags    [in]: flags
515  * \param *data    [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
516  *
517  * \retval 0    : success
518  * \retval <0   : failure
519  */
520 int
521 ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
522 {
523         cfs_flock_t                    *getlk = lock->l_ast_data;
524         struct obd_device              *obd;
525         struct obd_import              *imp = NULL;
526         struct ldlm_flock_wait_data     fwd;
527         struct l_wait_info              lwi;
528         ldlm_error_t                    err;
529         int                             rc = 0;
530         ENTRY;
531
532         CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
533                flags, data, getlk);
534
535         /* Import invalidation. We need to actually release the lock
536          * references being held, so that it can go away. No point in
537          * holding the lock even if app still believes it has it, since
538          * server already dropped it anyway. Only for granted locks too. */
539         if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
540             (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
541                 if (lock->l_req_mode == lock->l_granted_mode &&
542                     lock->l_granted_mode != LCK_NL &&
543                     NULL == data)
544                         ldlm_lock_decref_internal(lock, lock->l_req_mode);
545
546                 /* Need to wake up the waiter if we were evicted */
547                 cfs_waitq_signal(&lock->l_waitq);
548                 RETURN(0);
549         }
550
551         LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
552
553         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
554                        LDLM_FL_BLOCK_CONV))) {
555                 if (NULL == data)
556                         /* mds granted the lock in the reply */
557                         goto granted;
558                 /* CP AST RPC: lock get granted, wake it up */
559                 cfs_waitq_signal(&lock->l_waitq);
560                 RETURN(0);
561         }
562
563         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
564                    "sleeping");
565         fwd.fwd_lock = lock;
566         obd = class_exp2obd(lock->l_conn_export);
567
568         /* if this is a local lock, there is no import */
569         if (NULL != obd)
570                 imp = obd->u.cli.cl_import;
571
572         if (NULL != imp) {
573                 cfs_spin_lock(&imp->imp_lock);
574                 fwd.fwd_generation = imp->imp_generation;
575                 cfs_spin_unlock(&imp->imp_lock);
576         }
577
578         lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
579
580         /* Go to sleep until the lock is granted. */
581         rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
582
583         if (rc) {
584                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
585                            rc);
586                 RETURN(rc);
587         }
588
589 granted:
590         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
591
592         if (lock->l_destroyed) {
593                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
594                 RETURN(0);
595         }
596
597         if (lock->l_flags & LDLM_FL_FAILED) {
598                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
599                 RETURN(-EIO);
600         }
601
602         if (rc) {
603                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
604                            rc);
605                 RETURN(rc);
606         }
607
608         LDLM_DEBUG(lock, "client-side enqueue granted");
609
610         /* take lock off the deadlock detection waitq. */
611         cfs_spin_lock(&ldlm_flock_waitq_lock);
612         cfs_list_del_init(&lock->l_flock_waitq);
613         cfs_spin_unlock(&ldlm_flock_waitq_lock);
614
615         lock_res_and_lock(lock);
616         /* ldlm_lock_enqueue() has already placed lock on the granted list. */
617         cfs_list_del_init(&lock->l_res_link);
618
619         if (flags & LDLM_FL_TEST_LOCK) {
620                 /* fcntl(F_GETLK) request */
621                 /* The old mode was saved in getlk->fl_type so that if the mode
622                  * in the lock changes we can decref the appropriate refcount.*/
623                 ldlm_flock_destroy(lock, cfs_flock_type(getlk),
624                                    LDLM_FL_WAIT_NOREPROC);
625                 switch (lock->l_granted_mode) {
626                 case LCK_PR:
627                         cfs_flock_set_type(getlk, F_RDLCK);
628                         break;
629                 case LCK_PW:
630                         cfs_flock_set_type(getlk, F_WRLCK);
631                         break;
632                 default:
633                         cfs_flock_set_type(getlk, F_UNLCK);
634                 }
635                 cfs_flock_set_pid(getlk,
636                                   (pid_t)lock->l_policy_data.l_flock.pid);
637                 cfs_flock_set_start(getlk,
638                                     (loff_t)lock->l_policy_data.l_flock.start);
639                 cfs_flock_set_end(getlk,
640                                   (loff_t)lock->l_policy_data.l_flock.end);
641         } else {
642                 int noreproc = LDLM_FL_WAIT_NOREPROC;
643
644                 /* We need to reprocess the lock to do merges or splits
645                  * with existing locks owned by this process. */
646                 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
647         }
648         unlock_res_and_lock(lock);
649         RETURN(0);
650 }
651 EXPORT_SYMBOL(ldlm_flock_completion_ast);
652
653 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
654                             void *data, int flag)
655 {
656         struct ldlm_namespace *ns;
657         ENTRY;
658
659         LASSERT(lock);
660         LASSERT(flag == LDLM_CB_CANCELING);
661
662         ns = ldlm_lock_to_ns(lock);
663
664         /* take lock off the deadlock detection waitq. */
665         cfs_spin_lock(&ldlm_flock_waitq_lock);
666         cfs_list_del_init(&lock->l_flock_waitq);
667         cfs_spin_unlock(&ldlm_flock_waitq_lock);
668         RETURN(0);
669 }
670
671 void ldlm_flock_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
672                                      ldlm_policy_data_t *lpolicy)
673 {
674         memset(lpolicy, 0, sizeof(*lpolicy));
675         lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
676         lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
677         lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
678         lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
679         /* Compat code, old clients had no idea about owner field and
680          * relied solely on pid for ownership. Introduced in 2.1, April 2011 */
681         if (!lpolicy->l_flock.owner)
682                 lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
683 }
684
685 void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
686                                      ldlm_wire_policy_data_t *wpolicy)
687 {
688         memset(wpolicy, 0, sizeof(*wpolicy));
689         wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
690         wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
691         wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
692         wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
693 }