Whamcloud - gitweb
LU-14139 statahead: add stats for batch RPC requests
[fs/lustre-release.git] / lustre / ldlm / ldlm_flock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003 Hewlett-Packard Development Company LP.
24  * Developed under the sponsorship of the US Government under
25  * Subcontract No. B514193
26  *
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2017, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  */
35
36 /**
37  * This file implements POSIX lock type for Lustre.
38  * Its policy properties are start and end of extent and PID.
39  *
40  * These locks are only done through MDS due to POSIX semantics requiring
41  * e.g. that locks could be only partially released and as such split into
42  * two parts, and also that two adjacent locks from the same process may be
43  * merged into a single wider lock.
44  *
45  * Lock modes are mapped like this:
46  * PR and PW for READ and WRITE locks
47  * NL to request a releasing of a portion of the lock
48  *
49  * These flock locks never timeout.
50  */
51
52 #define DEBUG_SUBSYSTEM S_LDLM
53
54 #include <linux/list.h>
55 #include <lustre_dlm.h>
56 #include <obd_support.h>
57 #include <obd_class.h>
58 #include <lustre_lib.h>
59
60 #include "ldlm_internal.h"
61
62 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
63                             void *data, int flag);
64
65 static inline int
66 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
67 {
68         return ((new->l_policy_data.l_flock.owner ==
69                  lock->l_policy_data.l_flock.owner) &&
70                 (new->l_export == lock->l_export));
71 }
72
73 static inline int
74 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
75 {
76         return ((new->l_policy_data.l_flock.start <=
77                  lock->l_policy_data.l_flock.end) &&
78                 (new->l_policy_data.l_flock.end >=
79                  lock->l_policy_data.l_flock.start));
80 }
81
82 static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
83                                             struct ldlm_lock *lock)
84 {
85         /* For server only */
86         if (req->l_export == NULL)
87                 return;
88
89         LASSERT(hlist_unhashed(&req->l_exp_flock_hash));
90
91         req->l_policy_data.l_flock.blocking_owner =
92                 lock->l_policy_data.l_flock.owner;
93         req->l_policy_data.l_flock.blocking_export =
94                 lock->l_export;
95         atomic_set(&req->l_policy_data.l_flock.blocking_refs, 0);
96
97         cfs_hash_add(req->l_export->exp_flock_hash,
98                      &req->l_policy_data.l_flock.owner,
99                      &req->l_exp_flock_hash);
100 }
101
102 static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
103 {
104         /* For server only */
105         if (req->l_export == NULL)
106                 return;
107
108         check_res_locked(req->l_resource);
109         if (req->l_export->exp_flock_hash != NULL &&
110             !hlist_unhashed(&req->l_exp_flock_hash))
111                 cfs_hash_del(req->l_export->exp_flock_hash,
112                              &req->l_policy_data.l_flock.owner,
113                              &req->l_exp_flock_hash);
114 }
115
116 static inline void
117 ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
118 {
119         ENTRY;
120
121         LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: %#llx)",
122                    mode, flags);
123
124         /* Safe to not lock here, since it should be empty anyway */
125         LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
126
127         list_del_init(&lock->l_res_link);
128         if (flags == LDLM_FL_WAIT_NOREPROC) {
129                 /* client side - set a flag to prevent sending a CANCEL */
130                 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
131
132                 /* when reaching here, it is under lock_res_and_lock(). Thus,
133                  * need call the nolock version of ldlm_lock_decref_internal
134                  */
135                 ldlm_lock_decref_internal_nolock(lock, mode);
136         }
137
138         ldlm_lock_destroy_nolock(lock);
139         EXIT;
140 }
141
142 #ifdef HAVE_SERVER_SUPPORT
143 /**
144  * POSIX locks deadlock detection code.
145  *
146  * Given a new lock \a req and an existing lock \a bl_lock it conflicts
147  * with, we need to iterate through all blocked POSIX locks for this
148  * export and see if there is a deadlock condition arising. (i.e. when
149  * one client holds a lock on something and want a lock on something
150  * else and at the same time another client has the opposite situation).
151  */
152
153 struct ldlm_flock_lookup_cb_data {
154         __u64 *bl_owner;
155         struct ldlm_lock *lock;
156         struct obd_export *exp;
157 };
158
159 static int ldlm_flock_lookup_cb(struct obd_export *exp, void *data)
160 {
161         struct ldlm_flock_lookup_cb_data *cb_data = data;
162         struct ldlm_lock *lock;
163
164         if (exp->exp_failed)
165                 return 0;
166
167         lock = cfs_hash_lookup(exp->exp_flock_hash, cb_data->bl_owner);
168         if (lock == NULL)
169                 return 0;
170
171         /* Stop on first found lock. Same process can't sleep twice */
172         cb_data->lock = lock;
173         cb_data->exp = class_export_get(exp);
174
175         return 1;
176 }
177
178 static int
179 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
180 {
181         struct obd_export *req_exp = req->l_export;
182         struct obd_export *bl_exp = bl_lock->l_export;
183         __u64 req_owner = req->l_policy_data.l_flock.owner;
184         __u64 bl_owner = bl_lock->l_policy_data.l_flock.owner;
185
186         /* For server only */
187         if (req_exp == NULL)
188                 return 0;
189
190         class_export_get(bl_exp);
191         while (1) {
192                 struct ldlm_flock_lookup_cb_data cb_data = {
193                         .bl_owner = &bl_owner,
194                         .lock = NULL,
195                         .exp = NULL,
196                 };
197                 struct ptlrpc_connection *bl_exp_conn;
198                 struct obd_export *bl_exp_new;
199                 struct ldlm_lock *lock = NULL;
200                 struct ldlm_flock *flock;
201
202                 bl_exp_conn = bl_exp->exp_connection;
203                 if (bl_exp->exp_flock_hash != NULL) {
204                         int found;
205
206                         found = obd_nid_export_for_each(bl_exp->exp_obd,
207                                                         &bl_exp_conn->c_peer.nid,
208                                                         ldlm_flock_lookup_cb,
209                                                         &cb_data);
210                         if (found)
211                                 lock = cb_data.lock;
212                 }
213                 if (lock == NULL)
214                         break;
215
216                 class_export_put(bl_exp);
217                 bl_exp = cb_data.exp;
218
219                 LASSERT(req != lock);
220                 flock = &lock->l_policy_data.l_flock;
221                 LASSERT(flock->owner == bl_owner);
222                 bl_owner = flock->blocking_owner;
223                 bl_exp_new = class_export_get(flock->blocking_export);
224                 class_export_put(bl_exp);
225
226                 cfs_hash_put(bl_exp->exp_flock_hash, &lock->l_exp_flock_hash);
227                 bl_exp = bl_exp_new;
228
229                 if (bl_exp->exp_failed)
230                         break;
231
232                 if (bl_owner == req_owner &&
233                     nid_same(&bl_exp_conn->c_peer.nid,
234                               &req_exp->exp_connection->c_peer.nid)) {
235                         class_export_put(bl_exp);
236                         return 1;
237                 }
238         }
239         class_export_put(bl_exp);
240
241         return 0;
242 }
243
244 static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
245                                           struct list_head *work_list)
246 {
247         CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock);
248
249         if ((exp_connect_flags(lock->l_export) &
250              OBD_CONNECT_FLOCK_DEAD) == 0) {
251                 CERROR("deadlock found, but client doesn't support flock canceliation\n");
252         } else {
253                 LASSERT(lock->l_completion_ast);
254                 LASSERT(!ldlm_is_ast_sent(lock));
255                 lock->l_flags |= (LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK |
256                                   LDLM_FL_FLOCK_DEADLOCK);
257                 ldlm_flock_blocking_unlink(lock);
258                 ldlm_resource_unlink_lock(lock);
259                 ldlm_add_ast_work_item(lock, NULL, work_list);
260         }
261 }
262 #endif /* HAVE_SERVER_SUPPORT */
263
264 /**
265  * Process a granting attempt for flock lock.
266  * Must be called under ns lock held.
267  *
268  * This function looks for any conflicts for \a lock in the granted or
269  * waiting queues. The lock is granted if no conflicts are found in
270  * either queue.
271  */
272 int
273 ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
274                         enum ldlm_process_intention intention,
275                         enum ldlm_error *err, struct list_head *work_list)
276 {
277         struct ldlm_resource *res = req->l_resource;
278         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
279         struct ldlm_lock *tmp;
280         struct ldlm_lock *ownlocks = NULL;
281         struct ldlm_lock *lock = NULL;
282         struct ldlm_lock *new = req;
283         struct ldlm_lock *new2 = NULL;
284         enum ldlm_mode mode = req->l_req_mode;
285         int local = ns_is_client(ns);
286         int added = (mode == LCK_NL);
287         int splitted = 0;
288         const struct ldlm_callback_suite null_cbs = { NULL };
289 #ifdef HAVE_SERVER_SUPPORT
290         struct list_head *grant_work = (intention == LDLM_PROCESS_ENQUEUE ?
291                                         NULL : work_list);
292 #endif
293
294         ENTRY;
295         CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start "
296                "%llu end %llu\n", *flags,
297                new->l_policy_data.l_flock.owner,
298                new->l_policy_data.l_flock.pid, mode,
299                req->l_policy_data.l_flock.start,
300                req->l_policy_data.l_flock.end);
301
302         *err = ELDLM_OK;
303
304         if (local) {
305                 /* No blocking ASTs are sent to the clients for
306                  * Posix file & record locks
307                  */
308                 req->l_blocking_ast = NULL;
309         } else {
310                 /* Called on the server for lock cancels. */
311                 req->l_blocking_ast = ldlm_flock_blocking_ast;
312         }
313
314 reprocess:
315         if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
316                 /* This loop determines where this processes locks start
317                  * in the resource lr_granted list.
318                  */
319                 list_for_each_entry(lock, &res->lr_granted, l_res_link) {
320                         if (ldlm_same_flock_owner(lock, req)) {
321                                 ownlocks = lock;
322                                 break;
323                         }
324                 }
325         }
326 #ifdef HAVE_SERVER_SUPPORT
327         else {
328                 int reprocess_failed = 0;
329                 lockmode_verify(mode);
330
331                 /* This loop determines if there are existing locks
332                  * that conflict with the new lock request.
333                  */
334                 list_for_each_entry(lock, &res->lr_granted, l_res_link) {
335                         if (ldlm_same_flock_owner(lock, req)) {
336                                 if (!ownlocks)
337                                         ownlocks = lock;
338                                 continue;
339                         }
340
341                         if (req->l_req_mode == LCK_PR &&
342                             lock->l_granted_mode == LCK_PR &&
343                             lock->l_policy_data.l_flock.start <=
344                                 req->l_policy_data.l_flock.start &&
345                             lock->l_policy_data.l_flock.end >=
346                                 req->l_policy_data.l_flock.end) {
347                                 /* there can't be granted WR lock */
348                                 break;
349                         }
350                         /* locks are compatible, overlap doesn't matter */
351                         if (lockmode_compat(lock->l_granted_mode, mode))
352                                 continue;
353
354                         if (!ldlm_flocks_overlap(lock, req))
355                                 continue;
356
357                         if (intention != LDLM_PROCESS_ENQUEUE) {
358                                 if (ldlm_flock_deadlock(req, lock)) {
359                                         ldlm_flock_cancel_on_deadlock(
360                                                 req, grant_work);
361                                         RETURN(LDLM_ITER_CONTINUE);
362                                 }
363                                 reprocess_failed = 1;
364                                 break;
365                         }
366
367                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
368                                 ldlm_flock_destroy(req, mode, *flags);
369                                 *err = -EAGAIN;
370                                 RETURN(LDLM_ITER_STOP);
371                         }
372
373                         if (*flags & LDLM_FL_TEST_LOCK) {
374                                 ldlm_flock_destroy(req, mode, *flags);
375                                 req->l_req_mode = lock->l_granted_mode;
376                                 req->l_policy_data.l_flock.pid =
377                                         lock->l_policy_data.l_flock.pid;
378                                 req->l_policy_data.l_flock.start =
379                                         lock->l_policy_data.l_flock.start;
380                                 req->l_policy_data.l_flock.end =
381                                         lock->l_policy_data.l_flock.end;
382                                 *flags |= LDLM_FL_LOCK_CHANGED;
383                                 RETURN(LDLM_ITER_STOP);
384                         }
385
386                         /* add lock to blocking list before deadlock
387                          * check to prevent race
388                          */
389                         ldlm_flock_blocking_link(req, lock);
390
391                         if (ldlm_flock_deadlock(req, lock)) {
392                                 ldlm_flock_blocking_unlink(req);
393                                 ldlm_flock_destroy(req, mode, *flags);
394                                 *err = -EDEADLK;
395                                 RETURN(LDLM_ITER_STOP);
396                         }
397
398                         ldlm_resource_add_lock(res, &res->lr_waiting, req);
399                         *flags |= LDLM_FL_BLOCK_GRANTED;
400                         RETURN(LDLM_ITER_STOP);
401                 }
402                 if (reprocess_failed)
403                         RETURN(LDLM_ITER_CONTINUE);
404         }
405
406         if (*flags & LDLM_FL_TEST_LOCK) {
407                 ldlm_flock_destroy(req, mode, *flags);
408                 req->l_req_mode = LCK_NL;
409                 *flags |= LDLM_FL_LOCK_CHANGED;
410                 RETURN(LDLM_ITER_STOP);
411         }
412
413         /* In case we had slept on this lock request take it off of the
414          * deadlock detection hash list.
415          */
416         ldlm_flock_blocking_unlink(req);
417 #endif /* HAVE_SERVER_SUPPORT */
418
419         /* Scan the locks owned by this process to find the insertion point
420          * (as locks are ordered), and to handle overlaps.
421          * We may have to merge or split existing locks.
422          */
423         if (ownlocks)
424                 lock = ownlocks;
425         else
426                 lock = list_entry(&res->lr_granted,
427                                   struct ldlm_lock, l_res_link);
428         list_for_each_entry_safe_from(lock, tmp, &res->lr_granted, l_res_link) {
429                 if (!ldlm_same_flock_owner(lock, new))
430                         break;
431
432                 if (lock->l_granted_mode == mode) {
433                         /* If the modes are the same then we need to process
434                          * locks that overlap OR adjoin the new lock. The extra
435                          * logic condition is necessary to deal with arithmetic
436                          * overflow and underflow.
437                          */
438                         if ((new->l_policy_data.l_flock.start >
439                              (lock->l_policy_data.l_flock.end + 1))
440                             && (lock->l_policy_data.l_flock.end !=
441                                 OBD_OBJECT_EOF))
442                                 continue;
443
444                         if ((new->l_policy_data.l_flock.end <
445                              (lock->l_policy_data.l_flock.start - 1))
446                             && (lock->l_policy_data.l_flock.start != 0))
447                                 break;
448
449                         if (new->l_policy_data.l_flock.start <
450                             lock->l_policy_data.l_flock.start) {
451                                 lock->l_policy_data.l_flock.start =
452                                         new->l_policy_data.l_flock.start;
453                         } else {
454                                 new->l_policy_data.l_flock.start =
455                                         lock->l_policy_data.l_flock.start;
456                         }
457
458                         if (new->l_policy_data.l_flock.end >
459                             lock->l_policy_data.l_flock.end) {
460                                 lock->l_policy_data.l_flock.end =
461                                         new->l_policy_data.l_flock.end;
462                         } else {
463                                 new->l_policy_data.l_flock.end =
464                                         lock->l_policy_data.l_flock.end;
465                         }
466
467                         if (added) {
468                                 ldlm_flock_destroy(lock, mode, *flags);
469                         } else {
470                                 new = lock;
471                                 added = 1;
472                         }
473                         continue;
474                 }
475
476                 if (new->l_policy_data.l_flock.start >
477                     lock->l_policy_data.l_flock.end)
478                         continue;
479
480                 if (new->l_policy_data.l_flock.end <
481                     lock->l_policy_data.l_flock.start)
482                         break;
483
484                 res->lr_flock_node.lfn_needs_reprocess = true;
485
486                 if (new->l_policy_data.l_flock.start <=
487                     lock->l_policy_data.l_flock.start) {
488                         if (new->l_policy_data.l_flock.end <
489                             lock->l_policy_data.l_flock.end) {
490                                 lock->l_policy_data.l_flock.start =
491                                         new->l_policy_data.l_flock.end + 1;
492                                 break;
493                         }
494                         ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
495                         continue;
496                 }
497                 if (new->l_policy_data.l_flock.end >=
498                     lock->l_policy_data.l_flock.end) {
499                         lock->l_policy_data.l_flock.end =
500                                 new->l_policy_data.l_flock.start - 1;
501                         continue;
502                 }
503
504                 /* split the existing lock into two locks */
505
506                 /* if this is an F_UNLCK operation then we could avoid
507                  * allocating a new lock and use the req lock passed in
508                  * with the request but this would complicate the reply
509                  * processing since updates to req get reflected in the
510                  * reply. The client side replays the lock request so
511                  * it must see the original lock data in the reply.
512                  */
513
514                 /* XXX - if ldlm_lock_new() can sleep we should
515                  * release the lr_lock, allocate the new lock,
516                  * and restart processing this lock.
517                  */
518                 if (new2 == NULL) {
519                         unlock_res_and_lock(req);
520                         new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
521                                                 lock->l_granted_mode, &null_cbs,
522                                                 NULL, 0, LVB_T_NONE);
523                         lock_res_and_lock(req);
524                         if (IS_ERR(new2)) {
525                                 ldlm_flock_destroy(req, lock->l_granted_mode,
526                                                    *flags);
527                                 *err = PTR_ERR(new2);
528                                 RETURN(LDLM_ITER_STOP);
529                         }
530                         goto reprocess;
531                 }
532
533                 splitted = 1;
534
535                 new2->l_granted_mode = lock->l_granted_mode;
536                 new2->l_policy_data.l_flock.pid =
537                         new->l_policy_data.l_flock.pid;
538                 new2->l_policy_data.l_flock.owner =
539                         new->l_policy_data.l_flock.owner;
540                 new2->l_policy_data.l_flock.start =
541                         lock->l_policy_data.l_flock.start;
542                 new2->l_policy_data.l_flock.end =
543                         new->l_policy_data.l_flock.start - 1;
544                 lock->l_policy_data.l_flock.start =
545                         new->l_policy_data.l_flock.end + 1;
546                 new2->l_conn_export = lock->l_conn_export;
547                 if (lock->l_export != NULL) {
548                         new2->l_export = class_export_lock_get(lock->l_export,
549                                                                new2);
550                         if (new2->l_export->exp_lock_hash &&
551                             hlist_unhashed(&new2->l_exp_hash))
552                                 cfs_hash_add(new2->l_export->exp_lock_hash,
553                                              &new2->l_remote_handle,
554                                              &new2->l_exp_hash);
555                 }
556                 if (*flags == LDLM_FL_WAIT_NOREPROC)
557                         ldlm_lock_addref_internal_nolock(new2,
558                                                          lock->l_granted_mode);
559
560                 /* insert new2 at lock */
561                 ldlm_resource_add_lock(res, &lock->l_res_link, new2);
562                 LDLM_LOCK_RELEASE(new2);
563                 break;
564         }
565
566         /* if new2 is created but never used, destroy it*/
567         if (splitted == 0 && new2 != NULL)
568                 ldlm_lock_destroy_nolock(new2);
569
570         /* At this point we're granting the lock request. */
571         req->l_granted_mode = req->l_req_mode;
572
573         /* Add req to the granted queue before calling ldlm_reprocess_all(). */
574         if (!added) {
575                 list_del_init(&req->l_res_link);
576                 /* insert new lock before "lock", which might be the
577                  * next lock for this owner, or might be the first
578                  * lock for the next owner, or might not be a lock at
579                  * all, but instead points at the head of the list
580                  */
581                 ldlm_resource_add_lock(res, &lock->l_res_link, req);
582         }
583
584         if (*flags != LDLM_FL_WAIT_NOREPROC) {
585 #ifdef HAVE_SERVER_SUPPORT
586                 if (intention == LDLM_PROCESS_ENQUEUE) {
587                         /* If this is an unlock, reprocess the waitq and
588                          * send completions ASTs for locks that can now be
589                          * granted. The only problem with doing this
590                          * reprocessing here is that the completion ASTs for
591                          * newly granted locks will be sent before the unlock
592                          * completion is sent. It shouldn't be an issue. Also
593                          * note that ldlm_process_flock_lock() will recurse,
594                          * but only once because 'intention' won't be
595                          * LDLM_PROCESS_ENQUEUE from ldlm_reprocess_queue.
596                          */
597                         struct ldlm_flock_node *fn = &res->lr_flock_node;
598 restart:
599                         if (mode == LCK_NL && fn->lfn_needs_reprocess &&
600                             atomic_read(&fn->lfn_unlock_pending) == 0) {
601                                 LIST_HEAD(rpc_list);
602                                 int rc;
603
604                                 ldlm_reprocess_queue(res, &res->lr_waiting,
605                                                      &rpc_list,
606                                                      LDLM_PROCESS_RESCAN, 0);
607                                 fn->lfn_needs_reprocess = false;
608                                 unlock_res_and_lock(req);
609                                 rc = ldlm_run_ast_work(ns, &rpc_list,
610                                                        LDLM_WORK_CP_AST);
611                                 lock_res_and_lock(req);
612                                 if (rc == -ERESTART) {
613                                         fn->lfn_needs_reprocess = true;
614                                         GOTO(restart, rc);
615                                 }
616                         }
617                 } else {
618                         LASSERT(req->l_completion_ast);
619                         ldlm_add_ast_work_item(req, NULL, grant_work);
620                 }
621 #else /* !HAVE_SERVER_SUPPORT */
622                 /* The only one possible case for client-side calls flock
623                  * policy function is ldlm_flock_completion_ast inside which
624                  * carries LDLM_FL_WAIT_NOREPROC flag.
625                  */
626                 CERROR("Illegal parameter for client-side-only module.\n");
627                 LBUG();
628 #endif /* HAVE_SERVER_SUPPORT */
629         }
630
631         /* In case we're reprocessing the requested lock we can't destroy
632          * it until after calling ldlm_add_ast_work_item() above so that laawi()
633          * can bump the reference count on \a req. Otherwise \a req
634          * could be freed before the completion AST can be sent.
635          */
636         if (added)
637                 ldlm_flock_destroy(req, mode, *flags);
638
639         ldlm_resource_dump(D_INFO, res);
640         RETURN(LDLM_ITER_CONTINUE);
641 }
642
643 /**
644  * Flock completion callback function.
645  *
646  * \param lock [in,out]: A lock to be handled
647  * \param flags    [in]: flags
648  * \param *data    [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
649  *
650  * \retval 0    : success
651  * \retval <0   : failure
652  */
653 int
654 ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
655 {
656         struct file_lock *getlk = lock->l_ast_data;
657         struct obd_device *obd;
658         enum ldlm_error err;
659         int rc = 0;
660         ENTRY;
661
662         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
663         if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
664                 lock_res_and_lock(lock);
665                 lock->l_flags |= LDLM_FL_FAIL_LOC;
666                 unlock_res_and_lock(lock);
667                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT3, 4);
668         }
669         CDEBUG(D_DLMTRACE, "flags: %#llx data: %p getlk: %p\n",
670                flags, data, getlk);
671
672         LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
673
674         if (flags & LDLM_FL_FAILED)
675                 goto granted;
676
677         if (!(flags & LDLM_FL_BLOCKED_MASK)) {
678                 if (NULL == data)
679                         /* mds granted the lock in the reply */
680                         goto granted;
681                 /* CP AST RPC: lock get granted, wake it up */
682                 wake_up(&lock->l_waitq);
683                 RETURN(0);
684         }
685
686         LDLM_DEBUG(lock,
687                    "client-side enqueue returned a blocked lock, sleeping");
688         obd = class_exp2obd(lock->l_conn_export);
689
690         /* Go to sleep until the lock is granted. */
691         rc = l_wait_event_abortable(lock->l_waitq,
692                                     is_granted_or_cancelled(lock));
693         if (rc < 0) {
694                 /* take lock off the deadlock detection hash list. */
695                 lock_res_and_lock(lock);
696                 ldlm_flock_blocking_unlink(lock);
697
698                 /* client side - set flag to prevent lock from being
699                  * put on LRU list
700                  */
701                 ldlm_set_cbpending(lock);
702                 unlock_res_and_lock(lock);
703
704                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
705                            rc);
706                 RETURN(rc);
707         }
708
709 granted:
710         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
711
712         if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT4)) {
713                 lock_res_and_lock(lock);
714                 /* DEADLOCK is always set with CBPENDING */
715                 lock->l_flags |= LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
716                 unlock_res_and_lock(lock);
717                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT4, 4);
718         }
719         if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT5)) {
720                 lock_res_and_lock(lock);
721                 /* DEADLOCK is always set with CBPENDING */
722                 lock->l_flags |= (LDLM_FL_FAIL_LOC |
723                                   LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING);
724                 unlock_res_and_lock(lock);
725                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT5, 4);
726         }
727
728         lock_res_and_lock(lock);
729
730
731         /* Protect against race where lock could have been just destroyed
732          * due to overlap in ldlm_process_flock_lock().
733          */
734         if (ldlm_is_destroyed(lock)) {
735                 unlock_res_and_lock(lock);
736                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
737
738                 /* An error is still to be returned, to propagate it up to
739                  * ldlm_cli_enqueue_fini() caller. */
740                 RETURN(-EIO);
741         }
742
743         /* ldlm_lock_enqueue() has already placed lock on the granted list. */
744         ldlm_resource_unlink_lock(lock);
745
746         /* Import invalidation. We need to actually release the lock
747          * references being held, so that it can go away. No point in
748          * holding the lock even if app still believes it has it, since
749          * server already dropped it anyway. Only for granted locks too.
750          */
751         /* Do the same for DEADLOCK'ed locks. */
752         if (ldlm_is_failed(lock) || ldlm_is_flock_deadlock(lock)) {
753                 int mode;
754
755                 if (flags & LDLM_FL_TEST_LOCK)
756                         LASSERT(ldlm_is_test_lock(lock));
757
758                 if (ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
759                         mode = getlk->fl_type;
760                 else
761                         mode = lock->l_req_mode;
762
763                 if (ldlm_is_flock_deadlock(lock)) {
764                         LDLM_DEBUG(lock, "client-side enqueue deadlock "
765                                    "received");
766                         rc = -EDEADLK;
767                 }
768                 ldlm_flock_destroy(lock, mode, LDLM_FL_WAIT_NOREPROC);
769                 unlock_res_and_lock(lock);
770
771                 /* Need to wake up the waiter if we were evicted */
772                 wake_up(&lock->l_waitq);
773
774                 /* An error is still to be returned, to propagate it up to
775                  * ldlm_cli_enqueue_fini() caller.
776                  */
777                 RETURN(rc ? : -EIO);
778         }
779
780         LDLM_DEBUG(lock, "client-side enqueue granted");
781
782         if (flags & LDLM_FL_TEST_LOCK) {
783                 /*
784                  * fcntl(F_GETLK) request
785                  * The old mode was saved in getlk->fl_type so that if the mode
786                  * in the lock changes we can decref the appropriate refcount.
787                  */
788                 LASSERT(ldlm_is_test_lock(lock));
789                 ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
790                 switch (lock->l_granted_mode) {
791                 case LCK_PR:
792                         getlk->fl_type = F_RDLCK;
793                         break;
794                 case LCK_PW:
795                         getlk->fl_type = F_WRLCK;
796                         break;
797                 default:
798                         getlk->fl_type = F_UNLCK;
799                 }
800                 getlk->fl_pid = (pid_t)lock->l_policy_data.l_flock.pid;
801                 getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
802                 getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
803         } else {
804                 __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
805
806                 /* We need to reprocess the lock to do merges or splits
807                  * with existing locks owned by this process.
808                  */
809                 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
810         }
811         unlock_res_and_lock(lock);
812         RETURN(rc);
813 }
814 EXPORT_SYMBOL(ldlm_flock_completion_ast);
815
816 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
817                             void *data, int flag)
818 {
819         ENTRY;
820
821         LASSERT(lock);
822         LASSERT(flag == LDLM_CB_CANCELING);
823
824         /* take lock off the deadlock detection hash list. */
825         lock_res_and_lock(lock);
826         ldlm_flock_blocking_unlink(lock);
827         unlock_res_and_lock(lock);
828         RETURN(0);
829 }
830
831 void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
832                                      union ldlm_policy_data *lpolicy)
833 {
834         lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
835         lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
836         lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
837         lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
838 }
839
840 void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
841                                      union ldlm_wire_policy_data *wpolicy)
842 {
843         memset(wpolicy, 0, sizeof(*wpolicy));
844         wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
845         wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
846         wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
847         wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
848 }
849
850 /*
851  * Export handle<->flock hash operations.
852  */
853 static unsigned
854 ldlm_export_flock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
855 {
856         return cfs_hash_64(*(__u64 *)key, 0) & mask;
857 }
858
859 static void *
860 ldlm_export_flock_key(struct hlist_node *hnode)
861 {
862         struct ldlm_lock *lock;
863
864         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
865         return &lock->l_policy_data.l_flock.owner;
866 }
867
868 static int
869 ldlm_export_flock_keycmp(const void *key, struct hlist_node *hnode)
870 {
871         return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
872 }
873
874 static void *
875 ldlm_export_flock_object(struct hlist_node *hnode)
876 {
877         return hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
878 }
879
880 static void
881 ldlm_export_flock_get(struct cfs_hash *hs, struct hlist_node *hnode)
882 {
883         struct ldlm_lock *lock;
884         struct ldlm_flock *flock;
885
886         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
887         LDLM_LOCK_GET(lock);
888
889         flock = &lock->l_policy_data.l_flock;
890         LASSERT(flock->blocking_export != NULL);
891         class_export_get(flock->blocking_export);
892         atomic_inc(&flock->blocking_refs);
893 }
894
895 static void
896 ldlm_export_flock_put(struct cfs_hash *hs, struct hlist_node *hnode)
897 {
898         struct ldlm_lock *lock;
899         struct ldlm_flock *flock;
900
901         lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
902
903         flock = &lock->l_policy_data.l_flock;
904         LASSERT(flock->blocking_export != NULL);
905         class_export_put(flock->blocking_export);
906         if (atomic_dec_and_test(&flock->blocking_refs)) {
907                 flock->blocking_owner = 0;
908                 flock->blocking_export = NULL;
909         }
910         LDLM_LOCK_RELEASE(lock);
911 }
912
913 static struct cfs_hash_ops ldlm_export_flock_ops = {
914         .hs_hash        = ldlm_export_flock_hash,
915         .hs_key         = ldlm_export_flock_key,
916         .hs_keycmp      = ldlm_export_flock_keycmp,
917         .hs_object      = ldlm_export_flock_object,
918         .hs_get         = ldlm_export_flock_get,
919         .hs_put         = ldlm_export_flock_put,
920         .hs_put_locked  = ldlm_export_flock_put,
921 };
922
923 int ldlm_init_flock_export(struct obd_export *exp)
924 {
925         if( strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0)
926                 RETURN(0);
927
928         exp->exp_flock_hash =
929                 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
930                                 HASH_EXP_LOCK_CUR_BITS,
931                                 HASH_EXP_LOCK_MAX_BITS,
932                                 HASH_EXP_LOCK_BKT_BITS, 0,
933                                 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
934                                 &ldlm_export_flock_ops,
935                                 CFS_HASH_DEFAULT | CFS_HASH_NBLK_CHANGE);
936         if (!exp->exp_flock_hash)
937                 RETURN(-ENOMEM);
938
939         RETURN(0);
940 }
941
942 void ldlm_destroy_flock_export(struct obd_export *exp)
943 {
944         ENTRY;
945         if (exp->exp_flock_hash) {
946                 cfs_hash_putref(exp->exp_flock_hash);
947                 exp->exp_flock_hash = NULL;
948         }
949         EXIT;
950 }