Whamcloud - gitweb
LU-13645 ldlm: extra checks for DOM locks
[fs/lustre-release.git] / lustre / ldlm / ldlm_inodebits.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_inodebits.c
33  *
34  * Author: Peter Braam <braam@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 /**
39  * This file contains implementation of IBITS lock type
40  *
41  * IBITS lock type contains a bit mask determining various properties of an
42  * object. The meanings of specific bits are specific to the caller and are
43  * opaque to LDLM code.
44  *
45  * Locks with intersecting bitmasks and conflicting lock modes (e.g.  LCK_PW)
46  * are considered conflicting.  See the lock mode compatibility matrix
47  * in lustre_dlm.h.
48  */
49
50 #define DEBUG_SUBSYSTEM S_LDLM
51
52 #include <lustre_dlm.h>
53 #include <obd_support.h>
54 #include <lustre_lib.h>
55 #include <obd_class.h>
56
57 #include "ldlm_internal.h"
58
59 #ifdef HAVE_SERVER_SUPPORT
60
61 /**
62  * It should iterate through all waiting locks on a given resource queue and
63  * attempt to grant them. An optimization is to check only heads waitintg
64  * locks for each inodebit type.
65  *
66  * Must be called with resource lock held.
67  */
68 int ldlm_reprocess_inodebits_queue(struct ldlm_resource *res,
69                                    struct list_head *queue,
70                                    struct list_head *work_list,
71                                    enum ldlm_process_intention intention,
72                                    struct ldlm_lock *hint)
73 {
74         __u64 flags;
75         int rc = LDLM_ITER_CONTINUE;
76         enum ldlm_error err;
77         LIST_HEAD(bl_ast_list);
78         struct ldlm_ibits_queues *queues = res->lr_ibits_queues;
79         int i;
80
81         ENTRY;
82
83         check_res_locked(res);
84
85         LASSERT(res->lr_type == LDLM_IBITS);
86         LASSERT(intention == LDLM_PROCESS_RESCAN ||
87                 intention == LDLM_PROCESS_RECOVERY);
88
89         if (intention == LDLM_PROCESS_RECOVERY)
90                 return ldlm_reprocess_queue(res, queue, work_list, intention,
91                                             NULL);
92
93 restart:
94         CDEBUG(D_DLMTRACE, "--- Reprocess resource "DLDLMRES" (%p)\n",
95                PLDLMRES(res), res);
96
97         for (i = 0; i < MDS_INODELOCK_NUMBITS; i++) {
98                 LIST_HEAD(rpc_list);
99                 struct list_head *head = &queues->liq_waiting[i];
100                 struct ldlm_lock *pending;
101                 struct ldlm_ibits_node *node;
102
103                 if (list_empty(head))
104                         continue;
105                 if (hint && !(hint->l_policy_data.l_inodebits.bits & BIT(i)))
106                         continue;
107
108                 node = list_entry(head->next, struct ldlm_ibits_node,
109                                   lin_link[i]);
110
111                 pending = node->lock;
112                 LDLM_DEBUG(pending, "Reprocessing lock from queue %d", i);
113
114                 flags = 0;
115                 rc = ldlm_process_inodebits_lock(pending, &flags, intention,
116                                                  &err, &rpc_list);
117                 if (ldlm_is_granted(pending)) {
118                         list_splice(&rpc_list, work_list);
119                         /* Try to grant more locks from current queue */
120                         i--;
121                 } else {
122                         list_splice(&rpc_list, &bl_ast_list);
123                 }
124         }
125
126         if (!list_empty(&bl_ast_list)) {
127                 unlock_res(res);
128
129                 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &bl_ast_list,
130                                        LDLM_WORK_BL_AST);
131
132                 lock_res(res);
133                 if (rc == -ERESTART)
134                         GOTO(restart, rc);
135         }
136
137         if (!list_empty(&bl_ast_list))
138                 ldlm_discard_bl_list(&bl_ast_list);
139
140         RETURN(rc);
141 }
142
143 /**
144  * Determine if the lock is compatible with all locks on the queue.
145  *
146  * If \a work_list is provided, conflicting locks are linked there.
147  * If \a work_list is not provided, we exit this function on first conflict.
148  *
149  * \retval 0 if there are conflicting locks in the \a queue
150  * \retval 1 if the lock is compatible to all locks in \a queue
151  *
152  * IBITS locks in granted queue are organized in bunches of
153  * same-mode/same-bits locks called "skip lists". The First lock in the
154  * bunch contains a pointer to the end of the bunch.  This allows us to
155  * skip an entire bunch when iterating the list in search for conflicting
156  * locks if first lock of the bunch is not conflicting with us.
157  */
158 static int
159 ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
160                             __u64 *ldlm_flags, struct list_head *work_list)
161 {
162         enum ldlm_mode req_mode = req->l_req_mode;
163         struct list_head *tmp;
164         struct ldlm_lock *lock;
165         __u64 req_bits = req->l_policy_data.l_inodebits.bits;
166         __u64 *try_bits = &req->l_policy_data.l_inodebits.try_bits;
167         int compat = 1;
168
169         ENTRY;
170
171         lockmode_verify(req_mode);
172
173         /* There is no sense in lock with no bits set. Also such a lock
174          * would be compatible with any other bit lock.
175          * Meanwhile that can be true if there were just try_bits and all
176          * are failed, so just exit gracefully and let the caller to care.
177          */
178         if ((req_bits | *try_bits) == 0)
179                 RETURN(0);
180
181         /* Group lock could be only DOM */
182         if (unlikely(req_mode == LCK_GROUP &&
183                      (req_bits | *try_bits) != MDS_INODELOCK_DOM))
184                 RETURN(-EPROTO);
185
186         list_for_each(tmp, queue) {
187                 struct list_head *mode_tail;
188
189                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
190
191                 /* We stop walking the queue if we hit ourselves so we don't
192                  * take conflicting locks enqueued after us into account,
193                  * or we'd wait forever. */
194                 if (req == lock)
195                         RETURN(compat);
196
197                 /* last lock in mode group */
198                 LASSERT(lock->l_sl_mode.prev != NULL);
199                 mode_tail = &list_entry(lock->l_sl_mode.prev, struct ldlm_lock,
200                                         l_sl_mode)->l_res_link;
201
202                 /* if request lock is not COS_INCOMPAT and COS is disabled,
203                  * they are compatible, IOW this request is from a local
204                  * transaction on a DNE system. */
205                 if (lock->l_req_mode == LCK_COS && !ldlm_is_cos_incompat(req) &&
206                     !ldlm_is_cos_enabled(req)) {
207                         /* jump to last lock in mode group */
208                         tmp = mode_tail;
209                         continue;
210                 }
211
212                 if (lockmode_compat(lock->l_req_mode, req_mode)) {
213                         /* non group locks are compatible, bits don't matter */
214                         if (likely(req_mode != LCK_GROUP)) {
215                                 /* jump to last lock in mode group */
216                                 tmp = mode_tail;
217                                 continue;
218                         }
219
220                         if (req->l_policy_data.l_inodebits.li_gid ==
221                             lock->l_policy_data.l_inodebits.li_gid) {
222                                 if (ldlm_is_granted(lock))
223                                         RETURN(2);
224
225                                 if (*ldlm_flags & LDLM_FL_BLOCK_NOWAIT)
226                                         RETURN(-EWOULDBLOCK);
227
228                                 /* Place the same group together */
229                                 ldlm_resource_insert_lock_after(lock, req);
230                                 RETURN(0);
231                         }
232                 }
233
234                 /* GROUP locks are placed to a head of the waiting list, but
235                  * grouped by gid. */
236                 if (unlikely(req_mode == LCK_GROUP && !ldlm_is_granted(lock))) {
237                         compat = 0;
238                         if (lock->l_req_mode != LCK_GROUP) {
239                                 /* Already not a GROUP lock, insert before. */
240                                 ldlm_resource_insert_lock_before(lock, req);
241                                 break;
242                         }
243                         /* Still GROUP but a different gid(the same gid would
244                          * be handled above). Keep searching for the same gid */
245                         LASSERT(req->l_policy_data.l_inodebits.li_gid !=
246                                 lock->l_policy_data.l_inodebits.li_gid);
247                         continue;
248                 }
249
250                 for (;;) {
251                         struct list_head *head;
252
253                         /* Advance loop cursor to last lock in policy group. */
254                         tmp = &list_entry(lock->l_sl_policy.prev,
255                                           struct ldlm_lock,
256                                           l_sl_policy)->l_res_link;
257
258                         /* New lock's try_bits are filtered out by ibits
259                          * of all locks in both granted and waiting queues.
260                          */
261                         *try_bits &= ~(lock->l_policy_data.l_inodebits.bits |
262                                 lock->l_policy_data.l_inodebits.try_bits);
263
264                         if ((req_bits | *try_bits) == 0)
265                                 RETURN(0);
266
267                         /* The new lock ibits is more preferable than try_bits
268                          * of waiting locks so drop conflicting try_bits in
269                          * the waiting queue.
270                          * Notice that try_bits of granted locks must be zero.
271                          */
272                         lock->l_policy_data.l_inodebits.try_bits &= ~req_bits;
273
274                         /* Locks with overlapping bits conflict. */
275                         if (lock->l_policy_data.l_inodebits.bits & req_bits) {
276                                 /* COS lock mode has a special compatibility
277                                  * requirement: it is only compatible with
278                                  * locks from the same client. */
279                                 if (lock->l_req_mode == LCK_COS &&
280                                     !ldlm_is_cos_incompat(req) &&
281                                     ldlm_is_cos_enabled(req) &&
282                                     lock->l_client_cookie == req->l_client_cookie)
283                                         goto skip_work_list;
284
285                                 compat = 0;
286
287                                 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
288                                         LASSERT(ldlm_has_dom(lock));
289
290                                         if (*ldlm_flags & LDLM_FL_BLOCK_NOWAIT)
291                                                 RETURN(-EWOULDBLOCK);
292
293                                         /* Combined DOM lock came across GROUP
294                                          * DOM lock, it makes the thread to be
295                                          * blocked for a long time, not allowed,
296                                          * the trybits to be used instead.
297                                          * Not combined DOM lock is requested by
298                                          * client, and have to wait for long
299                                          * until re-worked to a non-intent
300                                          * request). */
301                                         if ((req_bits & MDS_INODELOCK_DOM) &&
302                                             (req_bits & ~MDS_INODELOCK_DOM))
303                                                 RETURN(-EPROTO);
304
305                                         goto skip_work_list;
306                                 }
307
308                                 /* Found a conflicting policy group. */
309                                 if (!work_list)
310                                         RETURN(0);
311
312                                 /* Add locks of the policy group to @work_list
313                                  * as blocking locks for @req */
314                                 if (lock->l_blocking_ast)
315                                         ldlm_add_ast_work_item(lock, req,
316                                                                work_list);
317                                 head = &lock->l_sl_policy;
318                                 list_for_each_entry(lock, head, l_sl_policy)
319                                         if (lock->l_blocking_ast)
320                                                 ldlm_add_ast_work_item(lock,
321                                                                 req, work_list);
322                         }
323 skip_work_list:
324                         if (tmp == mode_tail)
325                                 break;
326
327                         tmp = tmp->next;
328                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
329                 } /* Loop over policy groups within one mode group. */
330         } /* Loop over mode groups within @queue. */
331
332         RETURN(compat);
333 }
334
335 /**
336  * Process a granting attempt for IBITS lock.
337  * Must be called with ns lock held
338  *
339  * This function looks for any conflicts for \a lock in the granted or
340  * waiting queues. The lock is granted if no conflicts are found in
341  * either queue.
342  */
343 int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *ldlm_flags,
344                                 enum ldlm_process_intention intention,
345                                 enum ldlm_error *err,
346                                 struct list_head *work_list)
347 {
348         struct ldlm_resource *res = lock->l_resource;
349         struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
350                                                         NULL : work_list;
351         int rc, rc2 = 0;
352         ENTRY;
353
354         *err = ELDLM_LOCK_ABORTED;
355         LASSERT(!ldlm_is_granted(lock));
356         check_res_locked(res);
357
358         if (intention == LDLM_PROCESS_RESCAN) {
359                 struct list_head *bl_list =
360                         *ldlm_flags & LDLM_FL_BLOCK_NOWAIT ? NULL : work_list;
361
362                 LASSERT(lock->l_policy_data.l_inodebits.bits != 0);
363
364                 /* It is possible that some of granted locks was not canceled
365                  * but converted and is kept in granted queue. So there is
366                  * a window where lock with 'ast_sent' might become granted
367                  * again. Meanwhile a new lock may appear in that window and
368                  * conflicts with the converted lock so the following scenario
369                  * is possible:
370                  *
371                  * 1) lock1 conflicts with lock2
372                  * 2) bl_ast was sent for lock2
373                  * 3) lock3 comes and conflicts with lock2 too
374                  * 4) no bl_ast sent because lock2->l_bl_ast_sent is 1
375                  * 5) lock2 was converted for lock1 but not for lock3
376                  * 6) lock1 granted, lock3 still is waiting for lock2, but
377                  *    there will never be another bl_ast for that
378                  *
379                  * To avoid this scenario the work_list is used below to collect
380                  * any blocked locks from granted queue during every reprocess
381                  * and bl_ast will be sent if needed.
382                  */
383                 *ldlm_flags = 0;
384                 rc = ldlm_inodebits_compat_queue(&res->lr_granted, lock,
385                                                  ldlm_flags, bl_list);
386                 if (!rc)
387                         RETURN(LDLM_ITER_STOP);
388                 rc = ldlm_inodebits_compat_queue(&res->lr_waiting, lock,
389                                                  ldlm_flags, NULL);
390                 if (!rc)
391                         RETURN(LDLM_ITER_STOP);
392
393                 /* grant also try_bits if any */
394                 if (lock->l_policy_data.l_inodebits.try_bits != 0) {
395                         lock->l_policy_data.l_inodebits.bits |=
396                                 lock->l_policy_data.l_inodebits.try_bits;
397                         lock->l_policy_data.l_inodebits.try_bits = 0;
398                         *ldlm_flags |= LDLM_FL_LOCK_CHANGED;
399                 }
400                 ldlm_resource_unlink_lock(lock);
401                 ldlm_grant_lock(lock, grant_work);
402
403                 *err = ELDLM_OK;
404                 RETURN(LDLM_ITER_CONTINUE);
405         }
406
407         rc = ldlm_inodebits_compat_queue(&res->lr_granted, lock,
408                                          ldlm_flags, work_list);
409         if (rc < 0)
410                 GOTO(out, *err = rc);
411
412         if (rc != 2) {
413                 rc2 = ldlm_inodebits_compat_queue(&res->lr_waiting, lock,
414                                                   ldlm_flags, work_list);
415                 if (rc2 < 0)
416                         GOTO(out, *err = rc = rc2);
417         }
418
419         if (rc + rc2 != 2) {
420                 /* if there were only bits to try and all are conflicting */
421                 if ((lock->l_policy_data.l_inodebits.bits |
422                      lock->l_policy_data.l_inodebits.try_bits)) {
423                         /* There is no sense to set LDLM_FL_NO_TIMEOUT to
424                          * @ldlm_flags for DOM lock while they are enqueued
425                          * through intents, i.e. @lock here is local which does
426                          * not timeout. */
427                         *err = ELDLM_OK;
428                 }
429         } else {
430                 /* grant also all remaining try_bits */
431                 if (lock->l_policy_data.l_inodebits.try_bits != 0) {
432                         lock->l_policy_data.l_inodebits.bits |=
433                                 lock->l_policy_data.l_inodebits.try_bits;
434                         lock->l_policy_data.l_inodebits.try_bits = 0;
435                         *ldlm_flags |= LDLM_FL_LOCK_CHANGED;
436                 }
437                 LASSERT(lock->l_policy_data.l_inodebits.bits);
438                 ldlm_resource_unlink_lock(lock);
439                 ldlm_grant_lock(lock, grant_work);
440                 *err = ELDLM_OK;
441         }
442
443         RETURN(LDLM_ITER_CONTINUE);
444 out:
445         return rc;
446 }
447 #endif /* HAVE_SERVER_SUPPORT */
448
449 void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
450                                      union ldlm_policy_data *lpolicy)
451 {
452         lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
453         lpolicy->l_inodebits.li_gid = wpolicy->l_inodebits.li_gid;
454         /**
455          * try_bits are to be handled outside of generic write_to_local due
456          * to different behavior on a server and client.
457          */
458 }
459
460 void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
461                                      union ldlm_wire_policy_data *wpolicy)
462 {
463         memset(wpolicy, 0, sizeof(*wpolicy));
464         wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
465         wpolicy->l_inodebits.try_bits = lpolicy->l_inodebits.try_bits;
466         wpolicy->l_inodebits.li_gid = lpolicy->l_inodebits.li_gid;
467 }
468
469 /**
470  * Attempt to convert already granted IBITS lock with several bits set to
471  * a lock with less bits (downgrade).
472  *
473  * Such lock conversion is used to keep lock with non-blocking bits instead of
474  * cancelling it, introduced for better support of DoM files.
475  */
476 int ldlm_inodebits_drop(struct ldlm_lock *lock, __u64 to_drop)
477 {
478         ENTRY;
479
480         check_res_locked(lock->l_resource);
481
482         /* Just return if there are no conflicting bits */
483         if ((lock->l_policy_data.l_inodebits.bits & to_drop) == 0) {
484                 LDLM_WARN(lock, "try to drop unset bits %#llx/%#llx",
485                           lock->l_policy_data.l_inodebits.bits, to_drop);
486                 /* nothing to do */
487                 RETURN(0);
488         }
489
490         /* remove lock from a skiplist and put in the new place
491          * according with new inodebits */
492         ldlm_resource_unlink_lock(lock);
493         lock->l_policy_data.l_inodebits.bits &= ~to_drop;
494         ldlm_grant_lock_with_skiplist(lock);
495         RETURN(0);
496 }
497 EXPORT_SYMBOL(ldlm_inodebits_drop);
498
499 /* convert single lock */
500 int ldlm_cli_inodebits_convert(struct ldlm_lock *lock,
501                                enum ldlm_cancel_flags cancel_flags)
502 {
503         struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
504         struct ldlm_lock_desc ld = { { 0 } };
505         __u64 drop_bits, new_bits;
506         __u32 flags = 0;
507         int rc;
508
509         ENTRY;
510
511         check_res_locked(lock->l_resource);
512
513         /* Lock is being converted already */
514         if (ldlm_is_converting(lock)) {
515                 if (!(cancel_flags & LCF_ASYNC)) {
516                         unlock_res_and_lock(lock);
517                         wait_event_idle(lock->l_waitq,
518                                         is_lock_converted(lock));
519                         lock_res_and_lock(lock);
520                 }
521                 RETURN(0);
522         }
523
524         /* lru_cancel may happen in parallel and call ldlm_cli_cancel_list()
525          * independently.
526          */
527         if (ldlm_is_canceling(lock))
528                 RETURN(-EINVAL);
529
530         /* no need in only local convert */
531         if (lock->l_flags & (LDLM_FL_LOCAL_ONLY | LDLM_FL_CANCEL_ON_BLOCK))
532                 RETURN(-EINVAL);
533
534         drop_bits = lock->l_policy_data.l_inodebits.cancel_bits;
535         /* no cancel bits - means that caller needs full cancel */
536         if (drop_bits == 0)
537                 RETURN(-EINVAL);
538
539         new_bits = lock->l_policy_data.l_inodebits.bits & ~drop_bits;
540         /* check if all lock bits are dropped, proceed with cancel */
541         if (!new_bits)
542                 RETURN(-EINVAL);
543
544         /* check if no dropped bits, consider this as successful convert */
545         if (lock->l_policy_data.l_inodebits.bits == new_bits)
546                 RETURN(0);
547
548         ldlm_set_converting(lock);
549         /* Finally call cancel callback for remaining bits only.
550          * It is important to have converting flag during that
551          * so blocking_ast callback can distinguish convert from
552          * cancels.
553          */
554         ld.l_policy_data.l_inodebits.cancel_bits = drop_bits;
555         unlock_res_and_lock(lock);
556         lock->l_blocking_ast(lock, &ld, lock->l_ast_data, LDLM_CB_CANCELING);
557         /* now notify server about convert */
558         rc = ldlm_cli_convert_req(lock, &flags, new_bits);
559         lock_res_and_lock(lock);
560         if (rc)
561                 GOTO(full_cancel, rc);
562
563         /* Finally clear these bits in lock ibits */
564         ldlm_inodebits_drop(lock, drop_bits);
565
566         /* Being locked again check if lock was canceled, it is important
567          * to do and don't drop cbpending below
568          */
569         if (ldlm_is_canceling(lock))
570                 GOTO(full_cancel, rc = -EINVAL);
571
572         /* also check again if more bits to be cancelled appeared */
573         if (drop_bits != lock->l_policy_data.l_inodebits.cancel_bits)
574                 GOTO(clear_converting, rc = -EAGAIN);
575
576         /* clear cbpending flag early, it is safe to match lock right after
577          * client convert because it is downgrade always.
578          */
579         ldlm_clear_cbpending(lock);
580         ldlm_clear_bl_ast(lock);
581         spin_lock(&ns->ns_lock);
582         if (list_empty(&lock->l_lru))
583                 ldlm_lock_add_to_lru_nolock(lock);
584         spin_unlock(&ns->ns_lock);
585
586         /* the job is done, zero the cancel_bits. If more conflicts appear,
587          * it will result in another cycle of ldlm_cli_inodebits_convert().
588          */
589 full_cancel:
590         lock->l_policy_data.l_inodebits.cancel_bits = 0;
591 clear_converting:
592         ldlm_clear_converting(lock);
593         RETURN(rc);
594 }
595
596 int ldlm_inodebits_alloc_lock(struct ldlm_lock *lock)
597 {
598         if (ldlm_is_ns_srv(lock)) {
599                 int i;
600
601                 OBD_SLAB_ALLOC_PTR(lock->l_ibits_node, ldlm_inodebits_slab);
602                 if (lock->l_ibits_node == NULL)
603                         return -ENOMEM;
604                 for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
605                         INIT_LIST_HEAD(&lock->l_ibits_node->lin_link[i]);
606                 lock->l_ibits_node->lock = lock;
607         } else {
608                 lock->l_ibits_node = NULL;
609         }
610         return 0;
611 }
612
613 void ldlm_inodebits_add_lock(struct ldlm_resource *res, struct list_head *head,
614                              struct ldlm_lock *lock, bool tail)
615 {
616         int i;
617
618         if (!ldlm_is_ns_srv(lock))
619                 return;
620
621         if (head == &res->lr_waiting) {
622                 for (i = 0; i < MDS_INODELOCK_NUMBITS; i++) {
623                         if (!(lock->l_policy_data.l_inodebits.bits & BIT(i)))
624                                 continue;
625                         if (tail)
626                                 list_add_tail(&lock->l_ibits_node->lin_link[i],
627                                          &res->lr_ibits_queues->liq_waiting[i]);
628                         else
629                                 list_add(&lock->l_ibits_node->lin_link[i],
630                                          &res->lr_ibits_queues->liq_waiting[i]);
631                 }
632         } else if (head == &res->lr_granted && lock->l_ibits_node != NULL) {
633                 for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
634                         LASSERT(list_empty(&lock->l_ibits_node->lin_link[i]));
635                 OBD_SLAB_FREE_PTR(lock->l_ibits_node, ldlm_inodebits_slab);
636                 lock->l_ibits_node = NULL;
637         } else if (head != &res->lr_granted) {
638                 /* we are inserting in a middle of a list, after @head */
639                 struct ldlm_lock *orig = list_entry(head, struct ldlm_lock,
640                                                     l_res_link);
641                 LASSERT(orig->l_policy_data.l_inodebits.bits ==
642                         lock->l_policy_data.l_inodebits.bits);
643                 /* The is no a use case to insert before with exactly matched
644                  * set of bits */
645                 LASSERT(tail == false);
646
647                 for (i = 0; i < MDS_INODELOCK_NUMBITS; i++) {
648                         if (!(lock->l_policy_data.l_inodebits.bits & (1 << i)))
649                                 continue;
650                         list_add(&lock->l_ibits_node->lin_link[i],
651                                  &orig->l_ibits_node->lin_link[i]);
652                 }
653         }
654 }
655
656 void ldlm_inodebits_unlink_lock(struct ldlm_lock *lock)
657 {
658         int i;
659
660         ldlm_unlink_lock_skiplist(lock);
661         if (!ldlm_is_ns_srv(lock))
662                 return;
663
664         for (i = 0; i < MDS_INODELOCK_NUMBITS; i++)
665                 list_del_init(&lock->l_ibits_node->lin_link[i]);
666 }