Whamcloud - gitweb
LU-1306 ldlm: LBUG at ldlm_lock.c:213
[fs/lustre-release.git] / lustre / ldlm / ldlm_flock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2003 Hewlett-Packard Development Company LP.
30  * Developed under the sponsorship of the US Government under
31  * Subcontract No. B514193
32  *
33  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
34  * Use is subject to license terms.
35  *
36  * Copyright (c) 2010, 2011, Whamcloud, Inc.
37  */
38 /*
39  * This file is part of Lustre, http://www.lustre.org/
40  * Lustre is a trademark of Sun Microsystems, Inc.
41  */
42
43 #define DEBUG_SUBSYSTEM S_LDLM
44
45 #ifdef __KERNEL__
46 #include <lustre_dlm.h>
47 #include <obd_support.h>
48 #include <obd_class.h>
49 #include <lustre_lib.h>
50 #include <libcfs/list.h>
51 #else
52 #include <liblustre.h>
53 #include <obd_class.h>
54 #endif
55
56 #include "ldlm_internal.h"
57
58 #define l_flock_waitq   l_lru
59
60 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
61                             void *data, int flag);
62
63 /**
64  * list_for_remaining_safe - iterate over the remaining entries in a list
65  *              and safeguard against removal of a list entry.
66  * \param pos   the &struct list_head to use as a loop counter. pos MUST
67  *              have been initialized prior to using it in this macro.
68  * \param n     another &struct list_head to use as temporary storage
69  * \param head  the head for your list.
70  */
71 #define list_for_remaining_safe(pos, n, head) \
72         for (n = pos->next; pos != (head); pos = n, n = pos->next)
73
74 static inline int
75 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
76 {
77         return((new->l_policy_data.l_flock.owner ==
78                 lock->l_policy_data.l_flock.owner) &&
79                (new->l_export == lock->l_export));
80 }
81
82 static inline int
83 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
84 {
85         return((new->l_policy_data.l_flock.start <=
86                 lock->l_policy_data.l_flock.end) &&
87                (new->l_policy_data.l_flock.end >=
88                 lock->l_policy_data.l_flock.start));
89 }
90
91 static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
92                                             struct ldlm_lock *lock)
93 {
94         /* For server only */
95         if (req->l_export == NULL)
96                 return;
97
98         LASSERT(cfs_list_empty(&req->l_flock_waitq));
99         cfs_write_lock(&req->l_export->exp_flock_wait_lock);
100
101         req->l_policy_data.l_flock.blocking_owner =
102                 lock->l_policy_data.l_flock.owner;
103         req->l_policy_data.l_flock.blocking_export =
104                 class_export_get(lock->l_export);
105
106         cfs_list_add_tail(&req->l_flock_waitq,
107                           &req->l_export->exp_flock_wait_list);
108         cfs_write_unlock(&req->l_export->exp_flock_wait_lock);
109 }
110
111 static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
112 {
113         /* For server only */
114         if (req->l_export == NULL)
115                 return;
116
117         cfs_write_lock(&req->l_export->exp_flock_wait_lock);
118         if (!cfs_list_empty(&req->l_flock_waitq)) {
119                 cfs_list_del_init(&req->l_flock_waitq);
120
121                 class_export_put(req->l_policy_data.l_flock.blocking_export);
122                 req->l_policy_data.l_flock.blocking_owner = 0;
123                 req->l_policy_data.l_flock.blocking_export = NULL;
124         }
125         cfs_write_unlock(&req->l_export->exp_flock_wait_lock);
126 }
127
128 static inline void
129 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
130 {
131         ENTRY;
132
133         LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
134                    mode, flags);
135
136         /* Safe to not lock here, since it should be empty anyway */
137         LASSERT(cfs_list_empty(&lock->l_flock_waitq));
138
139         cfs_list_del_init(&lock->l_res_link);
140         if (flags == LDLM_FL_WAIT_NOREPROC &&
141             !(lock->l_flags & LDLM_FL_FAILED)) {
142                 /* client side - set a flag to prevent sending a CANCEL */
143                 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
144
145                 /* when reaching here, it is under lock_res_and_lock(). Thus,
146                    need call the nolock version of ldlm_lock_decref_internal*/
147                 ldlm_lock_decref_internal_nolock(lock, mode);
148         }
149
150         ldlm_lock_destroy_nolock(lock);
151         EXIT;
152 }
153
154 static int
155 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
156 {
157         struct obd_export *req_exp = req->l_export;
158         struct obd_export *bl_exp = bl_lock->l_export;
159         struct obd_export *bl_exp_new;
160         __u64 req_owner = req->l_policy_data.l_flock.owner;
161         __u64 bl_owner = bl_lock->l_policy_data.l_flock.owner;
162         struct ldlm_lock *lock;
163
164         /* For server only */
165         if (req_exp == NULL)
166                 return 0;
167
168         class_export_get(bl_exp);
169 restart:
170         cfs_read_lock(&bl_exp->exp_flock_wait_lock);
171         cfs_list_for_each_entry(lock, &bl_exp->exp_flock_wait_list,
172                                 l_flock_waitq) {
173                 struct ldlm_flock *flock = &lock->l_policy_data.l_flock;
174
175                 /* want to find something from same client and same process */
176                 if (flock->owner != bl_owner)
177                         continue;
178
179                 bl_owner = flock->blocking_owner;
180                 bl_exp_new = class_export_get(flock->blocking_export);
181                 cfs_read_unlock(&bl_exp->exp_flock_wait_lock);
182                 class_export_put(bl_exp);
183                 bl_exp = bl_exp_new;
184
185                 if (bl_owner == req_owner && bl_exp == req_exp) {
186                         class_export_put(bl_exp);
187                         return 1;
188                 }
189
190                 goto restart;
191         }
192         cfs_read_unlock(&bl_exp->exp_flock_wait_lock);
193         class_export_put(bl_exp);
194
195         return 0;
196 }
197
198 int
199 ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
200                         ldlm_error_t *err, cfs_list_t *work_list)
201 {
202         struct ldlm_resource *res = req->l_resource;
203         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
204         cfs_list_t *tmp;
205         cfs_list_t *ownlocks = NULL;
206         struct ldlm_lock *lock = NULL;
207         struct ldlm_lock *new = req;
208         struct ldlm_lock *new2 = NULL;
209         ldlm_mode_t mode = req->l_req_mode;
210         int local = ns_is_client(ns);
211         int added = (mode == LCK_NL);
212         int overlaps = 0;
213         int splitted = 0;
214         const struct ldlm_callback_suite null_cbs = { NULL };
215         ENTRY;
216
217         CDEBUG(D_DLMTRACE, "flags %#x owner "LPU64" pid %u mode %u start "LPU64
218                " end "LPU64"\n", *flags, new->l_policy_data.l_flock.owner,
219                new->l_policy_data.l_flock.pid, mode,
220                req->l_policy_data.l_flock.start,
221                req->l_policy_data.l_flock.end);
222
223         *err = ELDLM_OK;
224
225         if (local) {
226                 /* No blocking ASTs are sent to the clients for
227                  * Posix file & record locks */
228                 req->l_blocking_ast = NULL;
229         } else {
230                 /* Called on the server for lock cancels. */
231                 req->l_blocking_ast = ldlm_flock_blocking_ast;
232         }
233
234 reprocess:
235         if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
236                 /* This loop determines where this processes locks start
237                  * in the resource lr_granted list. */
238                 cfs_list_for_each(tmp, &res->lr_granted) {
239                         lock = cfs_list_entry(tmp, struct ldlm_lock,
240                                               l_res_link);
241                         if (ldlm_same_flock_owner(lock, req)) {
242                                 ownlocks = tmp;
243                                 break;
244                         }
245                 }
246         } else {
247                 lockmode_verify(mode);
248
249                 /* This loop determines if there are existing locks
250                  * that conflict with the new lock request. */
251                 cfs_list_for_each(tmp, &res->lr_granted) {
252                         lock = cfs_list_entry(tmp, struct ldlm_lock,
253                                               l_res_link);
254
255                         if (ldlm_same_flock_owner(lock, req)) {
256                                 if (!ownlocks)
257                                         ownlocks = tmp;
258                                 continue;
259                         }
260
261                         /* locks are compatible, overlap doesn't matter */
262                         if (lockmode_compat(lock->l_granted_mode, mode))
263                                 continue;
264
265                         if (!ldlm_flocks_overlap(lock, req))
266                                 continue;
267
268                         if (!first_enq)
269                                 RETURN(LDLM_ITER_CONTINUE);
270
271                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
272                                 ldlm_flock_destroy(req, mode, *flags);
273                                 *err = -EAGAIN;
274                                 RETURN(LDLM_ITER_STOP);
275                         }
276
277                         if (*flags & LDLM_FL_TEST_LOCK) {
278                                 ldlm_flock_destroy(req, mode, *flags);
279                                 req->l_req_mode = lock->l_granted_mode;
280                                 req->l_policy_data.l_flock.pid =
281                                         lock->l_policy_data.l_flock.pid;
282                                 req->l_policy_data.l_flock.start =
283                                         lock->l_policy_data.l_flock.start;
284                                 req->l_policy_data.l_flock.end =
285                                         lock->l_policy_data.l_flock.end;
286                                 *flags |= LDLM_FL_LOCK_CHANGED;
287                                 RETURN(LDLM_ITER_STOP);
288                         }
289
290                         if (ldlm_flock_deadlock(req, lock)) {
291                                 ldlm_flock_destroy(req, mode, *flags);
292                                 *err = -EDEADLK;
293                                 RETURN(LDLM_ITER_STOP);
294                         }
295
296
297                         ldlm_flock_blocking_link(req, lock);
298                         ldlm_resource_add_lock(res, &res->lr_waiting, req);
299                         *flags |= LDLM_FL_BLOCK_GRANTED;
300                         RETURN(LDLM_ITER_STOP);
301                 }
302         }
303
304         if (*flags & LDLM_FL_TEST_LOCK) {
305                 ldlm_flock_destroy(req, mode, *flags);
306                 req->l_req_mode = LCK_NL;
307                 *flags |= LDLM_FL_LOCK_CHANGED;
308                 RETURN(LDLM_ITER_STOP);
309         }
310
311         /* In case we had slept on this lock request take it off of the
312          * deadlock detection waitq. */
313         ldlm_flock_blocking_unlink(req);
314
315         /* Scan the locks owned by this process that overlap this request.
316          * We may have to merge or split existing locks. */
317
318         if (!ownlocks)
319                 ownlocks = &res->lr_granted;
320
321         list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
322                 lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
323
324                 if (!ldlm_same_flock_owner(lock, new))
325                         break;
326
327                 if (lock->l_granted_mode == mode) {
328                         /* If the modes are the same then we need to process
329                          * locks that overlap OR adjoin the new lock. The extra
330                          * logic condition is necessary to deal with arithmetic
331                          * overflow and underflow. */
332                         if ((new->l_policy_data.l_flock.start >
333                              (lock->l_policy_data.l_flock.end + 1))
334                             && (lock->l_policy_data.l_flock.end !=
335                                 OBD_OBJECT_EOF))
336                                 continue;
337
338                         if ((new->l_policy_data.l_flock.end <
339                              (lock->l_policy_data.l_flock.start - 1))
340                             && (lock->l_policy_data.l_flock.start != 0))
341                                 break;
342
343                         if (new->l_policy_data.l_flock.start <
344                             lock->l_policy_data.l_flock.start) {
345                                 lock->l_policy_data.l_flock.start =
346                                         new->l_policy_data.l_flock.start;
347                         } else {
348                                 new->l_policy_data.l_flock.start =
349                                         lock->l_policy_data.l_flock.start;
350                         }
351
352                         if (new->l_policy_data.l_flock.end >
353                             lock->l_policy_data.l_flock.end) {
354                                 lock->l_policy_data.l_flock.end =
355                                         new->l_policy_data.l_flock.end;
356                         } else {
357                                 new->l_policy_data.l_flock.end =
358                                         lock->l_policy_data.l_flock.end;
359                         }
360
361                         if (added) {
362                                 ldlm_flock_destroy(lock, mode, *flags);
363                         } else {
364                                 new = lock;
365                                 added = 1;
366                         }
367                         continue;
368                 }
369
370                 if (new->l_policy_data.l_flock.start >
371                     lock->l_policy_data.l_flock.end)
372                         continue;
373
374                 if (new->l_policy_data.l_flock.end <
375                     lock->l_policy_data.l_flock.start)
376                         break;
377
378                 ++overlaps;
379
380                 if (new->l_policy_data.l_flock.start <=
381                     lock->l_policy_data.l_flock.start) {
382                         if (new->l_policy_data.l_flock.end <
383                             lock->l_policy_data.l_flock.end) {
384                                 lock->l_policy_data.l_flock.start =
385                                         new->l_policy_data.l_flock.end + 1;
386                                 break;
387                         }
388                         ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
389                         continue;
390                 }
391                 if (new->l_policy_data.l_flock.end >=
392                     lock->l_policy_data.l_flock.end) {
393                         lock->l_policy_data.l_flock.end =
394                                 new->l_policy_data.l_flock.start - 1;
395                         continue;
396                 }
397
398                 /* split the existing lock into two locks */
399
400                 /* if this is an F_UNLCK operation then we could avoid
401                  * allocating a new lock and use the req lock passed in
402                  * with the request but this would complicate the reply
403                  * processing since updates to req get reflected in the
404                  * reply. The client side replays the lock request so
405                  * it must see the original lock data in the reply. */
406
407                 /* XXX - if ldlm_lock_new() can sleep we should
408                  * release the lr_lock, allocate the new lock,
409                  * and restart processing this lock. */
410                 if (!new2) {
411                         unlock_res_and_lock(req);
412                          new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
413                                         lock->l_granted_mode, &null_cbs,
414                                         NULL, 0);
415                         lock_res_and_lock(req);
416                         if (!new2) {
417                                 ldlm_flock_destroy(req, lock->l_granted_mode,
418                                                    *flags);
419                                 *err = -ENOLCK;
420                                 RETURN(LDLM_ITER_STOP);
421                         }
422                         goto reprocess;
423                 }
424
425                 splitted = 1;
426
427                 new2->l_granted_mode = lock->l_granted_mode;
428                 new2->l_policy_data.l_flock.pid =
429                         new->l_policy_data.l_flock.pid;
430                 new2->l_policy_data.l_flock.owner =
431                         new->l_policy_data.l_flock.owner;
432                 new2->l_policy_data.l_flock.start =
433                         lock->l_policy_data.l_flock.start;
434                 new2->l_policy_data.l_flock.end =
435                         new->l_policy_data.l_flock.start - 1;
436                 lock->l_policy_data.l_flock.start =
437                         new->l_policy_data.l_flock.end + 1;
438                 new2->l_conn_export = lock->l_conn_export;
439                 if (lock->l_export != NULL) {
440                         new2->l_export = class_export_lock_get(lock->l_export, new2);
441                         if (new2->l_export->exp_lock_hash &&
442                             cfs_hlist_unhashed(&new2->l_exp_hash))
443                                 cfs_hash_add(new2->l_export->exp_lock_hash,
444                                              &new2->l_remote_handle,
445                                              &new2->l_exp_hash);
446                 }
447                 if (*flags == LDLM_FL_WAIT_NOREPROC)
448                         ldlm_lock_addref_internal_nolock(new2,
449                                                          lock->l_granted_mode);
450
451                 /* insert new2 at lock */
452                 ldlm_resource_add_lock(res, ownlocks, new2);
453                 LDLM_LOCK_RELEASE(new2);
454                 break;
455         }
456
457         /* if new2 is created but never used, destroy it*/
458         if (splitted == 0 && new2 != NULL)
459                 ldlm_lock_destroy_nolock(new2);
460
461         /* At this point we're granting the lock request. */
462         req->l_granted_mode = req->l_req_mode;
463
464         /* Add req to the granted queue before calling ldlm_reprocess_all(). */
465         if (!added) {
466                 cfs_list_del_init(&req->l_res_link);
467                 /* insert new lock before ownlocks in list. */
468                 ldlm_resource_add_lock(res, ownlocks, req);
469         }
470
471         if (*flags != LDLM_FL_WAIT_NOREPROC) {
472 #ifdef HAVE_SERVER_SUPPORT
473                 if (first_enq) {
474                         /* If this is an unlock, reprocess the waitq and
475                          * send completions ASTs for locks that can now be
476                          * granted. The only problem with doing this
477                          * reprocessing here is that the completion ASTs for
478                          * newly granted locks will be sent before the unlock
479                          * completion is sent. It shouldn't be an issue. Also
480                          * note that ldlm_process_flock_lock() will recurse,
481                          * but only once because first_enq will be false from
482                          * ldlm_reprocess_queue. */
483                         if ((mode == LCK_NL) && overlaps) {
484                                 CFS_LIST_HEAD(rpc_list);
485                                 int rc;
486 restart:
487                                 ldlm_reprocess_queue(res, &res->lr_waiting,
488                                                      &rpc_list);
489
490                                 unlock_res_and_lock(req);
491                                 rc = ldlm_run_ast_work(ns, &rpc_list,
492                                                        LDLM_WORK_CP_AST);
493                                 lock_res_and_lock(req);
494                                 if (rc == -ERESTART)
495                                         GOTO(restart, -ERESTART);
496                        }
497                 } else {
498                         LASSERT(req->l_completion_ast);
499                         ldlm_add_ast_work_item(req, NULL, work_list);
500                 }
501 #else /* !HAVE_SERVER_SUPPORT */
502                 /* The only one possible case for client-side calls flock
503                  * policy function is ldlm_flock_completion_ast inside which
504                  * carries LDLM_FL_WAIT_NOREPROC flag. */
505                 CERROR("Illegal parameter for client-side-only module.\n");
506                 LBUG();
507 #endif /* HAVE_SERVER_SUPPORT */
508         }
509
510         /* In case we're reprocessing the requested lock we can't destroy
511          * it until after calling ldlm_ast_work_item() above so that lawi()
512          * can bump the reference count on req. Otherwise req could be freed
513          * before the completion AST can be sent.  */
514         if (added)
515                 ldlm_flock_destroy(req, mode, *flags);
516
517         ldlm_resource_dump(D_INFO, res);
518         RETURN(LDLM_ITER_CONTINUE);
519 }
520
521 struct ldlm_flock_wait_data {
522         struct ldlm_lock *fwd_lock;
523         int               fwd_generation;
524 };
525
526 static void
527 ldlm_flock_interrupted_wait(void *data)
528 {
529         struct ldlm_lock *lock;
530         ENTRY;
531
532         lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
533
534         /* take lock off the deadlock detection waitq. */
535         ldlm_flock_blocking_unlink(lock);
536
537         /* client side - set flag to prevent lock from being put on lru list */
538         lock_res_and_lock(lock);
539         lock->l_flags |= LDLM_FL_CBPENDING;
540         unlock_res_and_lock(lock);
541
542         EXIT;
543 }
544
545 /**
546  * Flock completion calback function.
547  *
548  * \param lock [in,out]: A lock to be handled
549  * \param flags    [in]: flags
550  * \param *data    [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
551  *
552  * \retval 0    : success
553  * \retval <0   : failure
554  */
555 int
556 ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
557 {
558         cfs_flock_t                    *getlk = lock->l_ast_data;
559         struct obd_device              *obd;
560         struct obd_import              *imp = NULL;
561         struct ldlm_flock_wait_data     fwd;
562         struct l_wait_info              lwi;
563         ldlm_error_t                    err;
564         int                             rc = 0;
565         ENTRY;
566
567         CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
568                flags, data, getlk);
569
570         /* Import invalidation. We need to actually release the lock
571          * references being held, so that it can go away. No point in
572          * holding the lock even if app still believes it has it, since
573          * server already dropped it anyway. Only for granted locks too. */
574         if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
575             (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
576                 if (lock->l_req_mode == lock->l_granted_mode &&
577                     lock->l_granted_mode != LCK_NL &&
578                     NULL == data)
579                         ldlm_lock_decref_internal(lock, lock->l_req_mode);
580
581                 /* Need to wake up the waiter if we were evicted */
582                 cfs_waitq_signal(&lock->l_waitq);
583                 RETURN(0);
584         }
585
586         LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
587
588         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
589                        LDLM_FL_BLOCK_CONV))) {
590                 if (NULL == data)
591                         /* mds granted the lock in the reply */
592                         goto granted;
593                 /* CP AST RPC: lock get granted, wake it up */
594                 cfs_waitq_signal(&lock->l_waitq);
595                 RETURN(0);
596         }
597
598         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
599                    "sleeping");
600         fwd.fwd_lock = lock;
601         obd = class_exp2obd(lock->l_conn_export);
602
603         /* if this is a local lock, there is no import */
604         if (NULL != obd)
605                 imp = obd->u.cli.cl_import;
606
607         if (NULL != imp) {
608                 cfs_spin_lock(&imp->imp_lock);
609                 fwd.fwd_generation = imp->imp_generation;
610                 cfs_spin_unlock(&imp->imp_lock);
611         }
612
613         lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
614
615         /* Go to sleep until the lock is granted. */
616         rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
617
618         if (rc) {
619                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
620                            rc);
621                 RETURN(rc);
622         }
623
624 granted:
625         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
626
627         if (lock->l_destroyed) {
628                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
629                 RETURN(0);
630         }
631
632         if (lock->l_flags & LDLM_FL_FAILED) {
633                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
634                 RETURN(-EIO);
635         }
636
637         if (rc) {
638                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
639                            rc);
640                 RETURN(rc);
641         }
642
643         LDLM_DEBUG(lock, "client-side enqueue granted");
644
645         /* take lock off the deadlock detection waitq. */
646         ldlm_flock_blocking_unlink(lock);
647
648         lock_res_and_lock(lock);
649         /* ldlm_lock_enqueue() has already placed lock on the granted list. */
650         cfs_list_del_init(&lock->l_res_link);
651
652         if (flags & LDLM_FL_TEST_LOCK) {
653                 /* fcntl(F_GETLK) request */
654                 /* The old mode was saved in getlk->fl_type so that if the mode
655                  * in the lock changes we can decref the appropriate refcount.*/
656                 ldlm_flock_destroy(lock, cfs_flock_type(getlk),
657                                    LDLM_FL_WAIT_NOREPROC);
658                 switch (lock->l_granted_mode) {
659                 case LCK_PR:
660                         cfs_flock_set_type(getlk, F_RDLCK);
661                         break;
662                 case LCK_PW:
663                         cfs_flock_set_type(getlk, F_WRLCK);
664                         break;
665                 default:
666                         cfs_flock_set_type(getlk, F_UNLCK);
667                 }
668                 cfs_flock_set_pid(getlk,
669                                   (pid_t)lock->l_policy_data.l_flock.pid);
670                 cfs_flock_set_start(getlk,
671                                     (loff_t)lock->l_policy_data.l_flock.start);
672                 cfs_flock_set_end(getlk,
673                                   (loff_t)lock->l_policy_data.l_flock.end);
674         } else {
675                 int noreproc = LDLM_FL_WAIT_NOREPROC;
676
677                 /* We need to reprocess the lock to do merges or splits
678                  * with existing locks owned by this process. */
679                 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
680         }
681         unlock_res_and_lock(lock);
682         RETURN(0);
683 }
684 EXPORT_SYMBOL(ldlm_flock_completion_ast);
685
686 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
687                             void *data, int flag)
688 {
689         ENTRY;
690
691         LASSERT(lock);
692         LASSERT(flag == LDLM_CB_CANCELING);
693
694         /* take lock off the deadlock detection waitq. */
695         ldlm_flock_blocking_unlink(lock);
696         RETURN(0);
697 }
698
699 void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
700                                        ldlm_policy_data_t *lpolicy)
701 {
702         memset(lpolicy, 0, sizeof(*lpolicy));
703         lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
704         lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
705         lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
706         /* Compat code, old clients had no idea about owner field and
707          * relied solely on pid for ownership. Introduced in LU-104, 2.1,
708          * April 2011 */
709         lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
710 }
711
712
713 void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
714                                        ldlm_policy_data_t *lpolicy)
715 {
716         memset(lpolicy, 0, sizeof(*lpolicy));
717         lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
718         lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
719         lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
720         lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
721 }
722
723 void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
724                                      ldlm_wire_policy_data_t *wpolicy)
725 {
726         memset(wpolicy, 0, sizeof(*wpolicy));
727         wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
728         wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
729         wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
730         wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
731 }