Whamcloud - gitweb
1d1d980e051e03b95431bbec20ee47b98e5e8c1b
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for LOV layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LOV
42
43 #include "lov_cl_internal.h"
44
45 /** \addtogroup lov
46  *  @{
47  */
48
49 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
50                                                struct cl_lock *parent);
51
52 static int lov_lock_unuse(const struct lu_env *env,
53                           const struct cl_lock_slice *slice);
54 /*****************************************************************************
55  *
56  * Lov lock operations.
57  *
58  */
59
60 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
61                                                    struct cl_lock *parent,
62                                                    struct lov_lock_sub *lls)
63 {
64         struct lov_sublock_env *subenv;
65         struct lov_io          *lio    = lov_env_io(env);
66         struct cl_io           *io     = lio->lis_cl.cis_io;
67         struct lov_io_sub      *sub;
68
69         subenv = &lov_env_session(env)->ls_subenv;
70
71         /*
72          * FIXME: We tend to use the subio's env & io to call the sublock
73          * lock operations because osc lock sometimes stores some control
74          * variables in thread's IO infomation(Now only lockless information).
75          * However, if the lock's host(object) is different from the object
76          * for current IO, we have no way to get the subenv and subio because
77          * they are not initialized at all. As a temp fix, in this case,
78          * we still borrow the parent's env to call sublock operations.
79          */
80         if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
81                 subenv->lse_env = env;
82                 subenv->lse_io  = io;
83                 subenv->lse_sub = NULL;
84         } else {
85                 sub = lov_sub_get(env, lio, lls->sub_stripe);
86                 if (!IS_ERR(sub)) {
87                         subenv->lse_env = sub->sub_env;
88                         subenv->lse_io  = sub->sub_io;
89                         subenv->lse_sub = sub;
90                 } else {
91                         subenv = (void*)sub;
92                 }
93         }
94         return subenv;
95 }
96
97 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
98 {
99         if (subenv && subenv->lse_sub)
100                 lov_sub_put(subenv->lse_sub);
101 }
102
103 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
104                               struct cl_lock *sublock, int idx,
105                               struct lov_lock_link *link)
106 {
107         struct lovsub_lock *lsl;
108         struct cl_lock     *parent = lck->lls_cl.cls_lock;
109         int                 rc;
110
111         LASSERT(cl_lock_is_mutexed(parent));
112         LASSERT(cl_lock_is_mutexed(sublock));
113         ENTRY;
114
115         lsl = cl2sub_lock(sublock);
116         /*
117          * check that sub-lock doesn't have lock link to this top-lock.
118          */
119         LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
120         LASSERT(idx < lck->lls_nr);
121
122         lck->lls_sub[idx].sub_lock = lsl;
123         lck->lls_nr_filled++;
124         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
125         list_add_tail(&link->lll_list, &lsl->lss_parents);
126         link->lll_idx = idx;
127         link->lll_super = lck;
128         cl_lock_get(parent);
129         lu_ref_add(&parent->cll_reference, "lov-child", sublock);
130         lck->lls_sub[idx].sub_flags |= LSF_HELD;
131         cl_lock_user_add(env, sublock);
132
133         rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
134         LASSERT(rc == 0); /* there is no way this can fail, currently */
135         EXIT;
136 }
137
138 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
139                                          const struct cl_io *io,
140                                          struct lov_lock *lck,
141                                          int idx, struct lov_lock_link **out)
142 {
143         struct cl_lock       *sublock;
144         struct cl_lock       *parent;
145         struct lov_lock_link *link;
146
147         LASSERT(idx < lck->lls_nr);
148         ENTRY;
149
150         OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, GFP_NOFS);
151         if (link != NULL) {
152                 struct lov_sublock_env *subenv;
153                 struct lov_lock_sub  *lls;
154                 struct cl_lock_descr *descr;
155
156                 parent = lck->lls_cl.cls_lock;
157                 lls    = &lck->lls_sub[idx];
158                 descr  = &lls->sub_got;
159
160                 subenv = lov_sublock_env_get(env, parent, lls);
161                 if (!IS_ERR(subenv)) {
162                         /* CAVEAT: Don't try to add a field in lov_lock_sub
163                          * to remember the subio. This is because lock is able
164                          * to be cached, but this is not true for IO. This
165                          * further means a sublock might be referenced in
166                          * different io context. -jay */
167
168                         sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
169                                                descr, "lov-parent", parent);
170                         lov_sublock_env_put(subenv);
171                 } else {
172                         /* error occurs. */
173                         sublock = (void *)subenv;
174                 }
175
176                 if (!IS_ERR(sublock))
177                         *out = link;
178                 else
179                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
180         } else
181                 sublock = ERR_PTR(-ENOMEM);
182         RETURN(sublock);
183 }
184
185 static void lov_sublock_unlock(const struct lu_env *env,
186                                struct lovsub_lock *lsl,
187                                struct cl_lock_closure *closure,
188                                struct lov_sublock_env *subenv)
189 {
190         ENTRY;
191         lov_sublock_env_put(subenv);
192         lsl->lss_active = NULL;
193         cl_lock_disclosure(env, closure);
194         EXIT;
195 }
196
197 static int lov_sublock_lock(const struct lu_env *env,
198                             struct lov_lock *lck,
199                             struct lov_lock_sub *lls,
200                             struct cl_lock_closure *closure,
201                             struct lov_sublock_env **lsep)
202 {
203         struct lovsub_lock *sublock;
204         struct cl_lock     *child;
205         int                 result = 0;
206         ENTRY;
207
208         LASSERT(list_empty(&closure->clc_list));
209
210         sublock = lls->sub_lock;
211         child = sublock->lss_cl.cls_lock;
212         result = cl_lock_closure_build(env, child, closure);
213         if (result == 0) {
214                 struct cl_lock *parent = closure->clc_origin;
215
216                 LASSERT(cl_lock_is_mutexed(child));
217                 sublock->lss_active = parent;
218
219                 if (unlikely((child->cll_state == CLS_FREEING) ||
220                              (child->cll_flags & CLF_CANCELLED))) {
221                         struct lov_lock_link *link;
222                         /*
223                          * we could race with lock deletion which temporarily
224                          * put the lock in freeing state, bug 19080.
225                          */
226                         LASSERT(!(lls->sub_flags & LSF_HELD));
227
228                         link = lov_lock_link_find(env, lck, sublock);
229                         LASSERT(link != NULL);
230                         lov_lock_unlink(env, link, sublock);
231                         lov_sublock_unlock(env, sublock, closure, NULL);
232                         lck->lls_cancel_race = 1;
233                         result = CLO_REPEAT;
234                 } else if (lsep) {
235                         struct lov_sublock_env *subenv;
236                         subenv = lov_sublock_env_get(env, parent, lls);
237                         if (IS_ERR(subenv)) {
238                                 lov_sublock_unlock(env, sublock,
239                                                    closure, NULL);
240                                 result = PTR_ERR(subenv);
241                         } else {
242                                 *lsep = subenv;
243                         }
244                 }
245         }
246         RETURN(result);
247 }
248
249 /**
250  * Updates the result of a top-lock operation from a result of sub-lock
251  * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
252  * over sub-locks and lov_subresult() is used to calculate return value of a
253  * top-operation. To this end, possible return values of sub-operations are
254  * ordered as
255  *
256  *     - 0                  success
257  *     - CLO_WAIT           wait for event
258  *     - CLO_REPEAT         repeat top-operation
259  *     - -ne                fundamental error
260  *
261  * Top-level return code can only go down through this list. CLO_REPEAT
262  * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
263  * has to be rechecked by the upper layer.
264  */
265 static int lov_subresult(int result, int rc)
266 {
267         int result_rank;
268         int rc_rank;
269
270         ENTRY;
271
272         LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
273                  "result = %d\n", result);
274         LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
275                  "rc = %d\n", rc);
276         CLASSERT(CLO_WAIT < CLO_REPEAT);
277
278         /* calculate ranks in the ordering above */
279         result_rank = result < 0 ? 1 + CLO_REPEAT : result;
280         rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
281
282         if (result_rank < rc_rank)
283                 result = rc;
284         RETURN(result);
285 }
286
287 /**
288  * Creates sub-locks for a given lov_lock for the first time.
289  *
290  * Goes through all sub-objects of top-object, and creates sub-locks on every
291  * sub-object intersecting with top-lock extent. This is complicated by the
292  * fact that top-lock (that is being created) can be accessed concurrently
293  * through already created sub-locks (possibly shared with other top-locks).
294  */
295 static int lov_lock_sub_init(const struct lu_env *env,
296                              struct lov_lock *lck, const struct cl_io *io)
297 {
298         int result = 0;
299         int i;
300         int nr;
301         obd_off start;
302         obd_off end;
303         obd_off file_start;
304         obd_off file_end;
305
306         struct lov_object       *loo    = cl2lov(lck->lls_cl.cls_obj);
307         struct lov_layout_raid0 *r0     = lov_r0(loo);
308         struct cl_lock          *parent = lck->lls_cl.cls_lock;
309
310         ENTRY;
311
312         lck->lls_orig = parent->cll_descr;
313         file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
314         file_end   = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
315
316         for (i = 0, nr = 0; i < r0->lo_nr; i++) {
317                 /*
318                  * XXX for wide striping smarter algorithm is desirable,
319                  * breaking out of the loop, early.
320                  */
321                 if (likely(r0->lo_sub[i] != NULL) &&
322                     lov_stripe_intersects(loo->lo_lsm, i,
323                                           file_start, file_end, &start, &end))
324                         nr++;
325         }
326         LASSERT(nr > 0);
327         OBD_ALLOC_LARGE(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
328         if (lck->lls_sub == NULL)
329                 RETURN(-ENOMEM);
330
331         lck->lls_nr = nr;
332         /*
333          * First, fill in sub-lock descriptions in
334          * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
335          * (called below in this function, and by lov_lock_enqueue()) to
336          * create sub-locks. At this moment, no other thread can access
337          * top-lock.
338          */
339         for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
340                 if (likely(r0->lo_sub[i] != NULL) &&
341                     lov_stripe_intersects(loo->lo_lsm, i,
342                                           file_start, file_end, &start, &end)) {
343                         struct cl_lock_descr *descr;
344
345                         descr = &lck->lls_sub[nr].sub_descr;
346
347                         LASSERT(descr->cld_obj == NULL);
348                         descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
349                         descr->cld_start = cl_index(descr->cld_obj, start);
350                         descr->cld_end   = cl_index(descr->cld_obj, end);
351                         descr->cld_mode  = parent->cll_descr.cld_mode;
352                         descr->cld_gid   = parent->cll_descr.cld_gid;
353                         descr->cld_enq_flags   = parent->cll_descr.cld_enq_flags;
354                         /* XXX has no effect */
355                         lck->lls_sub[nr].sub_got = *descr;
356                         lck->lls_sub[nr].sub_stripe = i;
357                         nr++;
358                 }
359         }
360         LASSERT(nr == lck->lls_nr);
361
362         /*
363          * Some sub-locks can be missing at this point. This is not a problem,
364          * because enqueue will create them anyway. Main duty of this function
365          * is to fill in sub-lock descriptions in a race free manner.
366          */
367         RETURN(result);
368 }
369
370 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
371                                int i, int deluser, int rc)
372 {
373         struct cl_lock *parent = lck->lls_cl.cls_lock;
374
375         LASSERT(cl_lock_is_mutexed(parent));
376         ENTRY;
377
378         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
379                 struct cl_lock    *sublock;
380                 int dying;
381
382                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
383                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
384                 LASSERT(cl_lock_is_mutexed(sublock));
385
386                 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
387                 if (deluser)
388                         cl_lock_user_del(env, sublock);
389                 /*
390                  * If the last hold is released, and cancellation is pending
391                  * for a sub-lock, release parent mutex, to avoid keeping it
392                  * while sub-lock is being paged out.
393                  */
394                 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
395                          sublock->cll_descr.cld_mode == CLM_GROUP ||
396                          (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
397                         sublock->cll_holds == 1;
398                 if (dying)
399                         cl_lock_mutex_put(env, parent);
400                 cl_lock_unhold(env, sublock, "lov-parent", parent);
401                 if (dying) {
402                         cl_lock_mutex_get(env, parent);
403                         rc = lov_subresult(rc, CLO_REPEAT);
404                 }
405                 /*
406                  * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
407                  * not backed by a reference on a
408                  * sub-lock. lovsub_lock_delete() will clear
409                  * lck->lls_sub[i].sub_lock under semaphores, just before
410                  * sub-lock is destroyed.
411                  */
412         }
413         RETURN(rc);
414 }
415
416 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
417                              int i)
418 {
419         struct cl_lock *parent = lck->lls_cl.cls_lock;
420
421         LASSERT(cl_lock_is_mutexed(parent));
422         ENTRY;
423
424         if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
425                 struct cl_lock *sublock;
426
427                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
428                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
429                 LASSERT(cl_lock_is_mutexed(sublock));
430                 LASSERT(sublock->cll_state != CLS_FREEING);
431
432                 lck->lls_sub[i].sub_flags |= LSF_HELD;
433
434                 cl_lock_get_trust(sublock);
435                 cl_lock_hold_add(env, sublock, "lov-parent", parent);
436                 cl_lock_user_add(env, sublock);
437                 cl_lock_put(env, sublock);
438         }
439         EXIT;
440 }
441
442 static void lov_lock_fini(const struct lu_env *env,
443                           struct cl_lock_slice *slice)
444 {
445         struct lov_lock *lck;
446         int i;
447
448         ENTRY;
449         lck = cl2lov_lock(slice);
450         LASSERT(lck->lls_nr_filled == 0);
451         if (lck->lls_sub != NULL) {
452                 for (i = 0; i < lck->lls_nr; ++i)
453                         /*
454                          * No sub-locks exists at this point, as sub-lock has
455                          * a reference on its parent.
456                          */
457                         LASSERT(lck->lls_sub[i].sub_lock == NULL);
458                 OBD_FREE_LARGE(lck->lls_sub,
459                                lck->lls_nr * sizeof lck->lls_sub[0]);
460         }
461         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
462         EXIT;
463 }
464
465 static int lov_lock_enqueue_wait(const struct lu_env *env,
466                                  struct lov_lock *lck,
467                                  struct cl_lock *sublock)
468 {
469         struct cl_lock *lock = lck->lls_cl.cls_lock;
470         int             result;
471         ENTRY;
472
473         LASSERT(cl_lock_is_mutexed(lock));
474
475         cl_lock_mutex_put(env, lock);
476         result = cl_lock_enqueue_wait(env, sublock, 0);
477         cl_lock_mutex_get(env, lock);
478         RETURN(result ?: CLO_REPEAT);
479 }
480
481 /**
482  * Tries to advance a state machine of a given sub-lock toward enqueuing of
483  * the top-lock.
484  *
485  * \retval 0 if state-transition can proceed
486  * \retval -ve otherwise.
487  */
488 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
489                                 struct cl_lock *sublock,
490                                 struct cl_io *io, __u32 enqflags, int last)
491 {
492         int result;
493         ENTRY;
494
495         /* first, try to enqueue a sub-lock ... */
496         result = cl_enqueue_try(env, sublock, io, enqflags);
497         if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
498                 /* if it is enqueued, try to `wait' on it---maybe it's already
499                  * granted */
500                 result = cl_wait_try(env, sublock);
501                 if (result == CLO_REENQUEUED)
502                         result = CLO_WAIT;
503         }
504         /*
505          * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
506          * parallel, otherwise---enqueue has to wait until sub-lock is granted
507          * before proceeding to the next one.
508          */
509         if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
510             (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
511                 result = 0;
512         RETURN(result);
513 }
514
515 /**
516  * Helper function for lov_lock_enqueue() that creates missing sub-lock.
517  */
518 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
519                             struct cl_io *io, struct lov_lock *lck, int idx)
520 {
521         struct lov_lock_link *link = NULL;
522         struct cl_lock       *sublock;
523         int                   result;
524
525         LASSERT(parent->cll_depth == 1);
526         cl_lock_mutex_put(env, parent);
527         sublock = lov_sublock_alloc(env, io, lck, idx, &link);
528         if (!IS_ERR(sublock))
529                 cl_lock_mutex_get(env, sublock);
530         cl_lock_mutex_get(env, parent);
531
532         if (!IS_ERR(sublock)) {
533                 cl_lock_get_trust(sublock);
534                 if (parent->cll_state == CLS_QUEUING &&
535                     lck->lls_sub[idx].sub_lock == NULL) {
536                         lov_sublock_adopt(env, lck, sublock, idx, link);
537                 } else {
538                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
539                         /* other thread allocated sub-lock, or enqueue is no
540                          * longer going on */
541                         cl_lock_mutex_put(env, parent);
542                         cl_lock_unhold(env, sublock, "lov-parent", parent);
543                         cl_lock_mutex_get(env, parent);
544                 }
545                 cl_lock_mutex_put(env, sublock);
546                 cl_lock_put(env, sublock);
547                 result = CLO_REPEAT;
548         } else
549                 result = PTR_ERR(sublock);
550         return result;
551 }
552
553 /**
554  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
555  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
556  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
557  * state machines in the face of sub-locks sharing (by multiple top-locks),
558  * and concurrent sub-lock cancellations.
559  */
560 static int lov_lock_enqueue(const struct lu_env *env,
561                             const struct cl_lock_slice *slice,
562                             struct cl_io *io, __u32 enqflags)
563 {
564         struct cl_lock         *lock    = slice->cls_lock;
565         struct lov_lock        *lck     = cl2lov_lock(slice);
566         struct cl_lock_closure *closure = lov_closure_get(env, lock);
567         int i;
568         int result;
569         enum cl_lock_state minstate;
570
571         ENTRY;
572
573         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
574                 int rc;
575                 struct lovsub_lock     *sub;
576                 struct lov_lock_sub    *lls;
577                 struct cl_lock         *sublock;
578                 struct lov_sublock_env *subenv;
579
580                 if (lock->cll_state != CLS_QUEUING) {
581                         /*
582                          * Lock might have left QUEUING state if previous
583                          * iteration released its mutex. Stop enqueing in this
584                          * case and let the upper layer to decide what to do.
585                          */
586                         LASSERT(i > 0 && result != 0);
587                         break;
588                 }
589
590                 lls = &lck->lls_sub[i];
591                 sub = lls->sub_lock;
592                 /*
593                  * Sub-lock might have been canceled, while top-lock was
594                  * cached.
595                  */
596                 if (sub == NULL) {
597                         result = lov_sublock_fill(env, lock, io, lck, i);
598                         /* lov_sublock_fill() released @lock mutex,
599                          * restart. */
600                         break;
601                 }
602                 sublock = sub->lss_cl.cls_lock;
603                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
604                 if (rc == 0) {
605                         lov_sublock_hold(env, lck, i);
606                         rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
607                                                   subenv->lse_io, enqflags,
608                                                   i == lck->lls_nr - 1);
609                         minstate = min(minstate, sublock->cll_state);
610                         if (rc == CLO_WAIT) {
611                                 switch (sublock->cll_state) {
612                                 case CLS_QUEUING:
613                                         /* take recursive mutex, the lock is
614                                          * released in lov_lock_enqueue_wait.
615                                          */
616                                         cl_lock_mutex_get(env, sublock);
617                                         lov_sublock_unlock(env, sub, closure,
618                                                            subenv);
619                                         rc = lov_lock_enqueue_wait(env, lck,
620                                                                    sublock);
621                                         break;
622                                 case CLS_CACHED:
623                                         cl_lock_get(sublock);
624                                         /* take recursive mutex of sublock */
625                                         cl_lock_mutex_get(env, sublock);
626                                         /* need to release all locks in closure
627                                          * otherwise it may deadlock. LU-2683.*/
628                                         lov_sublock_unlock(env, sub, closure,
629                                                            subenv);
630                                         /* sublock and parent are held. */
631                                         rc = lov_sublock_release(env, lck, i,
632                                                                  1, rc);
633                                         cl_lock_mutex_put(env, sublock);
634                                         cl_lock_put(env, sublock);
635                                         break;
636                                 default:
637                                         lov_sublock_unlock(env, sub, closure,
638                                                            subenv);
639                                         break;
640                                 }
641                         } else {
642                                 LASSERT(sublock->cll_conflict == NULL);
643                                 lov_sublock_unlock(env, sub, closure, subenv);
644                         }
645                 }
646                 result = lov_subresult(result, rc);
647                 if (result != 0)
648                         break;
649         }
650         cl_lock_closure_fini(closure);
651         RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
652 }
653
654 static int lov_lock_unuse(const struct lu_env *env,
655                           const struct cl_lock_slice *slice)
656 {
657         struct lov_lock        *lck     = cl2lov_lock(slice);
658         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
659         int i;
660         int result;
661
662         ENTRY;
663
664         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
665                 int rc;
666                 struct lovsub_lock     *sub;
667                 struct cl_lock         *sublock;
668                 struct lov_lock_sub    *lls;
669                 struct lov_sublock_env *subenv;
670
671                 /* top-lock state cannot change concurrently, because single
672                  * thread (one that released the last hold) carries unlocking
673                  * to the completion. */
674                 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
675                 lls = &lck->lls_sub[i];
676                 sub = lls->sub_lock;
677                 if (sub == NULL)
678                         continue;
679
680                 sublock = sub->lss_cl.cls_lock;
681                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
682                 if (rc == 0) {
683                         if (!(lls->sub_flags & LSF_HELD)) {
684                                 lov_sublock_unlock(env, sub, closure, subenv);
685                                 continue;
686                         }
687
688                         switch(sublock->cll_state) {
689                         case CLS_HELD:
690                                 rc = cl_unuse_try(subenv->lse_env, sublock);
691                                 lov_sublock_release(env, lck, i, 0, 0);
692                                 break;
693                         default:
694                                 cl_lock_cancel(subenv->lse_env, sublock);
695                                 lov_sublock_release(env, lck, i, 1, 0);
696                                 break;
697                         }
698                         lov_sublock_unlock(env, sub, closure, subenv);
699                 }
700                 result = lov_subresult(result, rc);
701         }
702
703         if (result == 0 && lck->lls_cancel_race) {
704                 lck->lls_cancel_race = 0;
705                 result = -ESTALE;
706         }
707         cl_lock_closure_fini(closure);
708         RETURN(result);
709 }
710
711
712 static void lov_lock_cancel(const struct lu_env *env,
713                            const struct cl_lock_slice *slice)
714 {
715         struct lov_lock        *lck     = cl2lov_lock(slice);
716         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
717         int i;
718         int result;
719
720         ENTRY;
721
722         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
723                 int rc;
724                 struct lovsub_lock     *sub;
725                 struct cl_lock         *sublock;
726                 struct lov_lock_sub    *lls;
727                 struct lov_sublock_env *subenv;
728
729                 /* top-lock state cannot change concurrently, because single
730                  * thread (one that released the last hold) carries unlocking
731                  * to the completion. */
732                 lls = &lck->lls_sub[i];
733                 sub = lls->sub_lock;
734                 if (sub == NULL)
735                         continue;
736
737                 sublock = sub->lss_cl.cls_lock;
738                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
739                 if (rc == 0) {
740                         if (!(lls->sub_flags & LSF_HELD)) {
741                                 lov_sublock_unlock(env, sub, closure, subenv);
742                                 continue;
743                         }
744
745                         switch(sublock->cll_state) {
746                         case CLS_HELD:
747                                 rc = cl_unuse_try(subenv->lse_env, sublock);
748                                 lov_sublock_release(env, lck, i, 0, 0);
749                                 break;
750                         default:
751                                 cl_lock_cancel(subenv->lse_env, sublock);
752                                 lov_sublock_release(env, lck, i, 1, 0);
753                                 break;
754                         }
755                         lov_sublock_unlock(env, sub, closure, subenv);
756                 }
757
758                 if (rc == CLO_REPEAT) {
759                         --i;
760                         continue;
761                 }
762
763                 result = lov_subresult(result, rc);
764         }
765
766         if (result)
767                 CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
768                               "lov_lock_cancel fails with %d.\n", result);
769
770         cl_lock_closure_fini(closure);
771 }
772
773 static int lov_lock_wait(const struct lu_env *env,
774                          const struct cl_lock_slice *slice)
775 {
776         struct lov_lock        *lck     = cl2lov_lock(slice);
777         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
778         enum cl_lock_state      minstate;
779         int                     reenqueued;
780         int                     result;
781         int                     i;
782
783         ENTRY;
784
785 again:
786         for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
787              i < lck->lls_nr; ++i) {
788                 int rc;
789                 struct lovsub_lock     *sub;
790                 struct cl_lock         *sublock;
791                 struct lov_lock_sub    *lls;
792                 struct lov_sublock_env *subenv;
793
794                 lls = &lck->lls_sub[i];
795                 sub = lls->sub_lock;
796                 LASSERT(sub != NULL);
797                 sublock = sub->lss_cl.cls_lock;
798                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
799                 if (rc == 0) {
800                         LASSERT(sublock->cll_state >= CLS_ENQUEUED);
801                         if (sublock->cll_state < CLS_HELD)
802                                 rc = cl_wait_try(env, sublock);
803
804                         minstate = min(minstate, sublock->cll_state);
805                         lov_sublock_unlock(env, sub, closure, subenv);
806                 }
807                 if (rc == CLO_REENQUEUED) {
808                         reenqueued++;
809                         rc = 0;
810                 }
811                 result = lov_subresult(result, rc);
812                 if (result != 0)
813                         break;
814         }
815         /* Each sublock only can be reenqueued once, so will not loop for
816          * ever. */
817         if (result == 0 && reenqueued != 0)
818                 goto again;
819         cl_lock_closure_fini(closure);
820         RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
821 }
822
823 static int lov_lock_use(const struct lu_env *env,
824                         const struct cl_lock_slice *slice)
825 {
826         struct lov_lock        *lck     = cl2lov_lock(slice);
827         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
828         int                     result;
829         int                     i;
830
831         LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
832         ENTRY;
833
834         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
835                 int rc;
836                 struct lovsub_lock     *sub;
837                 struct cl_lock         *sublock;
838                 struct lov_lock_sub    *lls;
839                 struct lov_sublock_env *subenv;
840
841                 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
842
843                 lls = &lck->lls_sub[i];
844                 sub = lls->sub_lock;
845                 if (sub == NULL) {
846                         /*
847                          * Sub-lock might have been canceled, while top-lock was
848                          * cached.
849                          */
850                         result = -ESTALE;
851                         break;
852                 }
853
854                 sublock = sub->lss_cl.cls_lock;
855                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
856                 if (rc == 0) {
857                         LASSERT(sublock->cll_state != CLS_FREEING);
858                         lov_sublock_hold(env, lck, i);
859                         if (sublock->cll_state == CLS_CACHED) {
860                                 rc = cl_use_try(subenv->lse_env, sublock, 0);
861                                 if (rc != 0)
862                                         rc = lov_sublock_release(env, lck,
863                                                                  i, 1, rc);
864                         } else if (sublock->cll_state == CLS_NEW) {
865                                 /* Sub-lock might have been canceled, while
866                                  * top-lock was cached. */
867                                 result = -ESTALE;
868                                 lov_sublock_release(env, lck, i, 1, result);
869                         }
870                         lov_sublock_unlock(env, sub, closure, subenv);
871                 }
872                 result = lov_subresult(result, rc);
873                 if (result != 0)
874                         break;
875         }
876
877         if (lck->lls_cancel_race) {
878                 /*
879                  * If there is unlocking happened at the same time, then
880                  * sublock_lock state should be FREEING, and lov_sublock_lock
881                  * should return CLO_REPEAT. In this case, it should return
882                  * ESTALE, and up layer should reset the lock state to be NEW.
883                  */
884                 lck->lls_cancel_race = 0;
885                 LASSERT(result != 0);
886                 result = -ESTALE;
887         }
888         cl_lock_closure_fini(closure);
889         RETURN(result);
890 }
891
892 #if 0
893 static int lock_lock_multi_match()
894 {
895         struct cl_lock          *lock    = slice->cls_lock;
896         struct cl_lock_descr    *subneed = &lov_env_info(env)->lti_ldescr;
897         struct lov_object       *loo     = cl2lov(lov->lls_cl.cls_obj);
898         struct lov_layout_raid0 *r0      = lov_r0(loo);
899         struct lov_lock_sub     *sub;
900         struct cl_object        *subobj;
901         obd_off  fstart;
902         obd_off  fend;
903         obd_off  start;
904         obd_off  end;
905         int i;
906
907         fstart = cl_offset(need->cld_obj, need->cld_start);
908         fend   = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
909         subneed->cld_mode = need->cld_mode;
910         cl_lock_mutex_get(env, lock);
911         for (i = 0; i < lov->lls_nr; ++i) {
912                 sub = &lov->lls_sub[i];
913                 if (sub->sub_lock == NULL)
914                         continue;
915                 subobj = sub->sub_descr.cld_obj;
916                 if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe,
917                                            fstart, fend, &start, &end))
918                         continue;
919                 subneed->cld_start = cl_index(subobj, start);
920                 subneed->cld_end   = cl_index(subobj, end);
921                 subneed->cld_obj   = subobj;
922                 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
923                         result = 0;
924                         break;
925                 }
926         }
927         cl_lock_mutex_put(env, lock);
928 }
929 #endif
930
931 /**
932  * Check if the extent region \a descr is covered by \a child against the
933  * specific \a stripe.
934  */
935 static int lov_lock_stripe_is_matching(const struct lu_env *env,
936                                        struct lov_object *lov, int stripe,
937                                        const struct cl_lock_descr *child,
938                                        const struct cl_lock_descr *descr)
939 {
940         struct lov_stripe_md *lsm = lov->lo_lsm;
941         obd_off start;
942         obd_off end;
943         int result;
944
945         if (lov_r0(lov)->lo_nr == 1)
946                 return cl_lock_ext_match(child, descr);
947
948         /*
949          * For a multi-stripes object:
950          * - make sure the descr only covers child's stripe, and
951          * - check if extent is matching.
952          */
953         start = cl_offset(&lov->lo_cl, descr->cld_start);
954         end   = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
955
956         result = 0;
957         /* glimpse should work on the object with LOV EA hole. */
958         if ((end - start <= lsm->lsm_stripe_size) ||
959             (descr->cld_end == CL_PAGE_EOF &&
960              unlikely(lov->lo_lsm->lsm_pattern & LOV_PATTERN_F_HOLE))) {
961                 int idx;
962
963                 idx = lov_stripe_number(lsm, start);
964                 if (idx == stripe ||
965                     unlikely(lov_r0(lov)->lo_sub[idx] == NULL)) {
966                         idx = lov_stripe_number(lsm, end);
967                         if (idx == stripe ||
968                             unlikely(lov_r0(lov)->lo_sub[idx] == NULL))
969                                 result = 1;
970                 }
971         }
972
973         if (result != 0) {
974                 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
975                 obd_off sub_start;
976                 obd_off sub_end;
977
978                 subd->cld_obj  = NULL;   /* don't need sub object at all */
979                 subd->cld_mode = descr->cld_mode;
980                 subd->cld_gid  = descr->cld_gid;
981                 result = lov_stripe_intersects(lsm, stripe, start, end,
982                                                &sub_start, &sub_end);
983                 LASSERT(result);
984                 subd->cld_start = cl_index(child->cld_obj, sub_start);
985                 subd->cld_end   = cl_index(child->cld_obj, sub_end);
986                 result = cl_lock_ext_match(child, subd);
987         }
988         return result;
989 }
990
991 /**
992  * An implementation of cl_lock_operations::clo_fits_into() method.
993  *
994  * Checks whether a lock (given by \a slice) is suitable for \a
995  * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
996  * O_APPEND write.
997  *
998  * \see ccc_lock_fits_into().
999  */
1000 static int lov_lock_fits_into(const struct lu_env *env,
1001                               const struct cl_lock_slice *slice,
1002                               const struct cl_lock_descr *need,
1003                               const struct cl_io *io)
1004 {
1005         struct lov_lock   *lov = cl2lov_lock(slice);
1006         struct lov_object *obj = cl2lov(slice->cls_obj);
1007         int result;
1008
1009         LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
1010         LASSERT(lov->lls_nr > 0);
1011
1012         ENTRY;
1013
1014         /* for top lock, it's necessary to match enq flags otherwise it will
1015          * run into problem if a sublock is missing and reenqueue. */
1016         if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
1017                 return 0;
1018
1019         if (lov->lls_ever_canceled)
1020                 return 0;
1021
1022         if (need->cld_mode == CLM_GROUP)
1023                 /*
1024                  * always allow to match group lock.
1025                  */
1026                 result = cl_lock_ext_match(&lov->lls_orig, need);
1027         else if (lov->lls_nr == 1) {
1028                 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
1029                 result = lov_lock_stripe_is_matching(env,
1030                                                      cl2lov(slice->cls_obj),
1031                                                      lov->lls_sub[0].sub_stripe,
1032                                                      got, need);
1033         } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
1034                    !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
1035                 /*
1036                  * Multi-stripe locks are only suitable for `quick' IO and for
1037                  * glimpse.
1038                  */
1039                 result = 0;
1040         else
1041                 /*
1042                  * Most general case: multi-stripe existing lock, and
1043                  * (potentially) multi-stripe @need lock. Check that @need is
1044                  * covered by @lov's sub-locks.
1045                  *
1046                  * For now, ignore lock expansions made by the server, and
1047                  * match against original lock extent.
1048                  */
1049                 result = cl_lock_ext_match(&lov->lls_orig, need);
1050         CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
1051                PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
1052                lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
1053                result);
1054         RETURN(result);
1055 }
1056
1057 void lov_lock_unlink(const struct lu_env *env,
1058                      struct lov_lock_link *link, struct lovsub_lock *sub)
1059 {
1060         struct lov_lock *lck    = link->lll_super;
1061         struct cl_lock  *parent = lck->lls_cl.cls_lock;
1062
1063         LASSERT(cl_lock_is_mutexed(parent));
1064         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1065         ENTRY;
1066
1067         list_del_init(&link->lll_list);
1068         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
1069         /* yank this sub-lock from parent's array */
1070         lck->lls_sub[link->lll_idx].sub_lock = NULL;
1071         LASSERT(lck->lls_nr_filled > 0);
1072         lck->lls_nr_filled--;
1073         lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
1074         cl_lock_put(env, parent);
1075         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
1076         EXIT;
1077 }
1078
1079 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
1080                                          struct lov_lock *lck,
1081                                          struct lovsub_lock *sub)
1082 {
1083         struct lov_lock_link *scan;
1084
1085         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1086         ENTRY;
1087
1088         list_for_each_entry(scan, &sub->lss_parents, lll_list) {
1089                 if (scan->lll_super == lck)
1090                         RETURN(scan);
1091         }
1092         RETURN(NULL);
1093 }
1094
1095 /**
1096  * An implementation of cl_lock_operations::clo_delete() method. This is
1097  * invoked for "top-to-bottom" delete, when lock destruction starts from the
1098  * top-lock, e.g., as a result of inode destruction.
1099  *
1100  * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
1101  * this is done separately elsewhere:
1102  *
1103  *     - for inode destruction, lov_object_delete() calls cl_object_kill() for
1104  *       each sub-object, purging its locks;
1105  *
1106  *     - in other cases (e.g., a fatal error with a top-lock) sub-locks are
1107  *       left in the cache.
1108  */
1109 static void lov_lock_delete(const struct lu_env *env,
1110                             const struct cl_lock_slice *slice)
1111 {
1112         struct lov_lock        *lck     = cl2lov_lock(slice);
1113         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
1114         struct lov_lock_link   *link;
1115         int                     rc;
1116         int                     i;
1117
1118         LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
1119         ENTRY;
1120
1121         for (i = 0; i < lck->lls_nr; ++i) {
1122                 struct lov_lock_sub *lls = &lck->lls_sub[i];
1123                 struct lovsub_lock  *lsl = lls->sub_lock;
1124
1125                 if (lsl == NULL) /* already removed */
1126                         continue;
1127
1128                 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
1129                 if (rc == CLO_REPEAT) {
1130                         --i;
1131                         continue;
1132                 }
1133
1134                 LASSERT(rc == 0);
1135                 LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
1136
1137                 if (lls->sub_flags & LSF_HELD)
1138                         lov_sublock_release(env, lck, i, 1, 0);
1139
1140                 link = lov_lock_link_find(env, lck, lsl);
1141                 LASSERT(link != NULL);
1142                 lov_lock_unlink(env, link, lsl);
1143                 LASSERT(lck->lls_sub[i].sub_lock == NULL);
1144
1145                 lov_sublock_unlock(env, lsl, closure, NULL);
1146         }
1147
1148         cl_lock_closure_fini(closure);
1149         EXIT;
1150 }
1151
1152 static int lov_lock_print(const struct lu_env *env, void *cookie,
1153                           lu_printer_t p, const struct cl_lock_slice *slice)
1154 {
1155         struct lov_lock *lck = cl2lov_lock(slice);
1156         int              i;
1157
1158         (*p)(env, cookie, "%d\n", lck->lls_nr);
1159         for (i = 0; i < lck->lls_nr; ++i) {
1160                 struct lov_lock_sub *sub;
1161
1162                 sub = &lck->lls_sub[i];
1163                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_flags);
1164                 if (sub->sub_lock != NULL)
1165                         cl_lock_print(env, cookie, p,
1166                                       sub->sub_lock->lss_cl.cls_lock);
1167                 else
1168                         (*p)(env, cookie, "---\n");
1169         }
1170         return 0;
1171 }
1172
1173 static const struct cl_lock_operations lov_lock_ops = {
1174         .clo_fini      = lov_lock_fini,
1175         .clo_enqueue   = lov_lock_enqueue,
1176         .clo_wait      = lov_lock_wait,
1177         .clo_use       = lov_lock_use,
1178         .clo_unuse     = lov_lock_unuse,
1179         .clo_cancel    = lov_lock_cancel,
1180         .clo_fits_into = lov_lock_fits_into,
1181         .clo_delete    = lov_lock_delete,
1182         .clo_print     = lov_lock_print
1183 };
1184
1185 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1186                         struct cl_lock *lock, const struct cl_io *io)
1187 {
1188         struct lov_lock *lck;
1189         int result;
1190
1191         ENTRY;
1192         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
1193         if (lck != NULL) {
1194                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1195                 result = lov_lock_sub_init(env, lck, io);
1196         } else
1197                 result = -ENOMEM;
1198         RETURN(result);
1199 }
1200
1201 static void lov_empty_lock_fini(const struct lu_env *env,
1202                                 struct cl_lock_slice *slice)
1203 {
1204         struct lov_lock *lck = cl2lov_lock(slice);
1205         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
1206 }
1207
1208 static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
1209                         lu_printer_t p, const struct cl_lock_slice *slice)
1210 {
1211         (*p)(env, cookie, "empty\n");
1212         return 0;
1213 }
1214
1215 /* XXX: more methods will be added later. */
1216 static const struct cl_lock_operations lov_empty_lock_ops = {
1217         .clo_fini  = lov_empty_lock_fini,
1218         .clo_print = lov_empty_lock_print
1219 };
1220
1221 int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
1222                         struct cl_lock *lock, const struct cl_io *io)
1223 {
1224         struct lov_lock *lck;
1225         int result = -ENOMEM;
1226
1227         ENTRY;
1228         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
1229         if (lck != NULL) {
1230                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
1231                 lck->lls_orig = lock->cll_descr;
1232                 result = 0;
1233         }
1234         RETURN(result);
1235 }
1236
1237 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1238                                                struct cl_lock *parent)
1239 {
1240         struct cl_lock_closure *closure;
1241
1242         closure = &lov_env_info(env)->lti_closure;
1243         LASSERT(list_empty(&closure->clc_list));
1244         cl_lock_closure_init(env, closure, parent, 1);
1245         return closure;
1246 }
1247
1248
1249 /** @} lov */