Whamcloud - gitweb
c12f246412cf76b41ecf2fe7fdf0cdb59992a2e9
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, 2012, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  *
38  * Implementation of cl_lock for LOV layer.
39  *
40  *   Author: Nikita Danilov <nikita.danilov@sun.com>
41  */
42
43 #define DEBUG_SUBSYSTEM S_LOV
44
45 #include "lov_cl_internal.h"
46
47 /** \addtogroup lov
48  *  @{
49  */
50
51 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
52                                                struct cl_lock *parent);
53
54 static int lov_lock_unuse(const struct lu_env *env,
55                           const struct cl_lock_slice *slice);
56 /*****************************************************************************
57  *
58  * Lov lock operations.
59  *
60  */
61
62 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
63                                                    struct cl_lock *parent,
64                                                    struct lov_lock_sub *lls)
65 {
66         struct lov_sublock_env *subenv;
67         struct lov_io          *lio    = lov_env_io(env);
68         struct cl_io           *io     = lio->lis_cl.cis_io;
69         struct lov_io_sub      *sub;
70
71         subenv = &lov_env_session(env)->ls_subenv;
72
73         /*
74          * FIXME: We tend to use the subio's env & io to call the sublock
75          * lock operations because osc lock sometimes stores some control
76          * variables in thread's IO infomation(Now only lockless information).
77          * However, if the lock's host(object) is different from the object
78          * for current IO, we have no way to get the subenv and subio because
79          * they are not initialized at all. As a temp fix, in this case,
80          * we still borrow the parent's env to call sublock operations.
81          */
82         if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
83                 subenv->lse_env = env;
84                 subenv->lse_io  = io;
85                 subenv->lse_sub = NULL;
86         } else {
87                 sub = lov_sub_get(env, lio, lls->sub_stripe);
88                 if (!IS_ERR(sub)) {
89                         subenv->lse_env = sub->sub_env;
90                         subenv->lse_io  = sub->sub_io;
91                         subenv->lse_sub = sub;
92                 } else {
93                         subenv = (void*)sub;
94                 }
95         }
96         return subenv;
97 }
98
99 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
100 {
101         if (subenv && subenv->lse_sub)
102                 lov_sub_put(subenv->lse_sub);
103 }
104
105 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
106                               struct cl_lock *sublock, int idx,
107                               struct lov_lock_link *link)
108 {
109         struct lovsub_lock *lsl;
110         struct cl_lock     *parent = lck->lls_cl.cls_lock;
111         int                 rc;
112
113         LASSERT(cl_lock_is_mutexed(parent));
114         LASSERT(cl_lock_is_mutexed(sublock));
115         ENTRY;
116
117         lsl = cl2sub_lock(sublock);
118         /*
119          * check that sub-lock doesn't have lock link to this top-lock.
120          */
121         LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
122         LASSERT(idx < lck->lls_nr);
123
124         lck->lls_sub[idx].sub_lock = lsl;
125         lck->lls_nr_filled++;
126         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
127         cfs_list_add_tail(&link->lll_list, &lsl->lss_parents);
128         link->lll_idx = idx;
129         link->lll_super = lck;
130         cl_lock_get(parent);
131         lu_ref_add(&parent->cll_reference, "lov-child", sublock);
132         lck->lls_sub[idx].sub_flags |= LSF_HELD;
133         cl_lock_user_add(env, sublock);
134
135         rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
136         LASSERT(rc == 0); /* there is no way this can fail, currently */
137         EXIT;
138 }
139
140 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
141                                          const struct cl_io *io,
142                                          struct lov_lock *lck,
143                                          int idx, struct lov_lock_link **out)
144 {
145         struct cl_lock       *sublock;
146         struct cl_lock       *parent;
147         struct lov_lock_link *link;
148
149         LASSERT(idx < lck->lls_nr);
150         ENTRY;
151
152         OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
153         if (link != NULL) {
154                 struct lov_sublock_env *subenv;
155                 struct lov_lock_sub  *lls;
156                 struct cl_lock_descr *descr;
157
158                 parent = lck->lls_cl.cls_lock;
159                 lls    = &lck->lls_sub[idx];
160                 descr  = &lls->sub_descr;
161
162                 subenv = lov_sublock_env_get(env, parent, lls);
163                 if (!IS_ERR(subenv)) {
164                         /* CAVEAT: Don't try to add a field in lov_lock_sub
165                          * to remember the subio. This is because lock is able
166                          * to be cached, but this is not true for IO. This
167                          * further means a sublock might be referenced in
168                          * different io context. -jay */
169
170                         sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
171                                                descr, "lov-parent", parent);
172                         lov_sublock_env_put(subenv);
173                 } else {
174                         /* error occurs. */
175                         sublock = (void*)subenv;
176                 }
177
178                 if (!IS_ERR(sublock))
179                         *out = link;
180                 else
181                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
182         } else
183                 sublock = ERR_PTR(-ENOMEM);
184         RETURN(sublock);
185 }
186
187 static void lov_sublock_unlock(const struct lu_env *env,
188                                struct lovsub_lock *lsl,
189                                struct cl_lock_closure *closure,
190                                struct lov_sublock_env *subenv)
191 {
192         ENTRY;
193         lov_sublock_env_put(subenv);
194         lsl->lss_active = NULL;
195         cl_lock_disclosure(env, closure);
196         EXIT;
197 }
198
199 static int lov_sublock_lock(const struct lu_env *env,
200                             struct lov_lock *lck,
201                             struct lov_lock_sub *lls,
202                             struct cl_lock_closure *closure,
203                             struct lov_sublock_env **lsep)
204 {
205         struct lovsub_lock *sublock;
206         struct cl_lock     *child;
207         int                 result = 0;
208         ENTRY;
209
210         LASSERT(cfs_list_empty(&closure->clc_list));
211
212         sublock = lls->sub_lock;
213         child = sublock->lss_cl.cls_lock;
214         result = cl_lock_closure_build(env, child, closure);
215         if (result == 0) {
216                 struct cl_lock *parent = closure->clc_origin;
217
218                 LASSERT(cl_lock_is_mutexed(child));
219                 sublock->lss_active = parent;
220
221                 if (unlikely((child->cll_state == CLS_FREEING) ||
222                              (child->cll_flags & CLF_CANCELLED))) {
223                         struct lov_lock_link *link;
224                         /*
225                          * we could race with lock deletion which temporarily
226                          * put the lock in freeing state, bug 19080.
227                          */
228                         LASSERT(!(lls->sub_flags & LSF_HELD));
229
230                         link = lov_lock_link_find(env, lck, sublock);
231                         LASSERT(link != NULL);
232                         lov_lock_unlink(env, link, sublock);
233                         lov_sublock_unlock(env, sublock, closure, NULL);
234                         lck->lls_cancel_race = 1;
235                         result = CLO_REPEAT;
236                 } else if (lsep) {
237                         struct lov_sublock_env *subenv;
238                         subenv = lov_sublock_env_get(env, parent, lls);
239                         if (IS_ERR(subenv)) {
240                                 lov_sublock_unlock(env, sublock,
241                                                    closure, NULL);
242                                 result = PTR_ERR(subenv);
243                         } else {
244                                 *lsep = subenv;
245                         }
246                 }
247         }
248         RETURN(result);
249 }
250
251 /**
252  * Updates the result of a top-lock operation from a result of sub-lock
253  * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
254  * over sub-locks and lov_subresult() is used to calculate return value of a
255  * top-operation. To this end, possible return values of sub-operations are
256  * ordered as
257  *
258  *     - 0                  success
259  *     - CLO_WAIT           wait for event
260  *     - CLO_REPEAT         repeat top-operation
261  *     - -ne                fundamental error
262  *
263  * Top-level return code can only go down through this list. CLO_REPEAT
264  * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
265  * has to be rechecked by the upper layer.
266  */
267 static int lov_subresult(int result, int rc)
268 {
269         int result_rank;
270         int rc_rank;
271
272         ENTRY;
273
274         LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
275         LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
276         CLASSERT(CLO_WAIT < CLO_REPEAT);
277
278         /* calculate ranks in the ordering above */
279         result_rank = result < 0 ? 1 + CLO_REPEAT : result;
280         rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
281
282         if (result_rank < rc_rank)
283                 result = rc;
284         RETURN(result);
285 }
286
287 /**
288  * Creates sub-locks for a given lov_lock for the first time.
289  *
290  * Goes through all sub-objects of top-object, and creates sub-locks on every
291  * sub-object intersecting with top-lock extent. This is complicated by the
292  * fact that top-lock (that is being created) can be accessed concurrently
293  * through already created sub-locks (possibly shared with other top-locks).
294  */
295 static int lov_lock_sub_init(const struct lu_env *env,
296                              struct lov_lock *lck, const struct cl_io *io)
297 {
298         int result = 0;
299         int i;
300         int nr;
301         obd_off start;
302         obd_off end;
303         obd_off file_start;
304         obd_off file_end;
305
306         struct lov_object       *loo    = cl2lov(lck->lls_cl.cls_obj);
307         struct lov_layout_raid0 *r0     = lov_r0(loo);
308         struct cl_lock          *parent = lck->lls_cl.cls_lock;
309
310         ENTRY;
311
312         lck->lls_orig = parent->cll_descr;
313         file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
314         file_end   = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
315
316         for (i = 0, nr = 0; i < r0->lo_nr; i++) {
317                 /*
318                  * XXX for wide striping smarter algorithm is desirable,
319                  * breaking out of the loop, early.
320                  */
321                 if (lov_stripe_intersects(r0->lo_lsm, i,
322                                           file_start, file_end, &start, &end))
323                         nr++;
324         }
325         LASSERT(nr > 0);
326         OBD_ALLOC_LARGE(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
327         if (lck->lls_sub == NULL)
328                 RETURN(-ENOMEM);
329
330         lck->lls_nr = nr;
331         /*
332          * First, fill in sub-lock descriptions in
333          * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
334          * (called below in this function, and by lov_lock_enqueue()) to
335          * create sub-locks. At this moment, no other thread can access
336          * top-lock.
337          */
338         for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
339                 if (lov_stripe_intersects(r0->lo_lsm, i,
340                                           file_start, file_end, &start, &end)) {
341                         struct cl_lock_descr *descr;
342
343                         descr = &lck->lls_sub[nr].sub_descr;
344
345                         LASSERT(descr->cld_obj == NULL);
346                         descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
347                         descr->cld_start = cl_index(descr->cld_obj, start);
348                         descr->cld_end   = cl_index(descr->cld_obj, end);
349                         descr->cld_mode  = parent->cll_descr.cld_mode;
350                         descr->cld_gid   = parent->cll_descr.cld_gid;
351                         descr->cld_enq_flags   = parent->cll_descr.cld_enq_flags;
352                         /* XXX has no effect */
353                         lck->lls_sub[nr].sub_got = *descr;
354                         lck->lls_sub[nr].sub_stripe = i;
355                         nr++;
356                 }
357         }
358         LASSERT(nr == lck->lls_nr);
359         /*
360          * Then, create sub-locks. Once at least one sub-lock was created,
361          * top-lock can be reached by other threads.
362          */
363         for (i = 0; i < lck->lls_nr; ++i) {
364                 struct cl_lock       *sublock;
365                 struct lov_lock_link *link;
366
367                 if (lck->lls_sub[i].sub_lock == NULL) {
368                         sublock = lov_sublock_alloc(env, io, lck, i, &link);
369                         if (IS_ERR(sublock)) {
370                                 result = PTR_ERR(sublock);
371                                 break;
372                         }
373                         cl_lock_get_trust(sublock);
374                         cl_lock_mutex_get(env, sublock);
375                         cl_lock_mutex_get(env, parent);
376                         /*
377                          * recheck under mutex that sub-lock wasn't created
378                          * concurrently, and that top-lock is still alive.
379                          */
380                         if (lck->lls_sub[i].sub_lock == NULL &&
381                             parent->cll_state < CLS_FREEING) {
382                                 lov_sublock_adopt(env, lck, sublock, i, link);
383                                 cl_lock_mutex_put(env, parent);
384                         } else {
385                                 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
386                                 cl_lock_mutex_put(env, parent);
387                                 cl_lock_unhold(env, sublock,
388                                                "lov-parent", parent);
389                         }
390                         cl_lock_mutex_put(env, sublock);
391                         cl_lock_put(env, sublock);
392                 }
393         }
394         /*
395          * Some sub-locks can be missing at this point. This is not a problem,
396          * because enqueue will create them anyway. Main duty of this function
397          * is to fill in sub-lock descriptions in a race free manner.
398          */
399         RETURN(result);
400 }
401
402 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
403                                int i, int deluser, int rc)
404 {
405         struct cl_lock *parent = lck->lls_cl.cls_lock;
406
407         LASSERT(cl_lock_is_mutexed(parent));
408         ENTRY;
409
410         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
411                 struct cl_lock    *sublock;
412                 int dying;
413
414                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
415                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
416                 LASSERT(cl_lock_is_mutexed(sublock));
417
418                 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
419                 if (deluser)
420                         cl_lock_user_del(env, sublock);
421                 /*
422                  * If the last hold is released, and cancellation is pending
423                  * for a sub-lock, release parent mutex, to avoid keeping it
424                  * while sub-lock is being paged out.
425                  */
426                 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
427                          sublock->cll_descr.cld_mode == CLM_GROUP ||
428                          (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
429                         sublock->cll_holds == 1;
430                 if (dying)
431                         cl_lock_mutex_put(env, parent);
432                 cl_lock_unhold(env, sublock, "lov-parent", parent);
433                 if (dying) {
434                         cl_lock_mutex_get(env, parent);
435                         rc = lov_subresult(rc, CLO_REPEAT);
436                 }
437                 /*
438                  * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
439                  * not backed by a reference on a
440                  * sub-lock. lovsub_lock_delete() will clear
441                  * lck->lls_sub[i].sub_lock under semaphores, just before
442                  * sub-lock is destroyed.
443                  */
444         }
445         RETURN(rc);
446 }
447
448 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
449                              int i)
450 {
451         struct cl_lock *parent = lck->lls_cl.cls_lock;
452
453         LASSERT(cl_lock_is_mutexed(parent));
454         ENTRY;
455
456         if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
457                 struct cl_lock *sublock;
458
459                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
460                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
461                 LASSERT(cl_lock_is_mutexed(sublock));
462                 LASSERT(sublock->cll_state != CLS_FREEING);
463
464                 lck->lls_sub[i].sub_flags |= LSF_HELD;
465
466                 cl_lock_get_trust(sublock);
467                 cl_lock_hold_add(env, sublock, "lov-parent", parent);
468                 cl_lock_user_add(env, sublock);
469                 cl_lock_put(env, sublock);
470         }
471         EXIT;
472 }
473
474 static void lov_lock_fini(const struct lu_env *env,
475                           struct cl_lock_slice *slice)
476 {
477         struct lov_lock *lck;
478         int i;
479
480         ENTRY;
481         lck = cl2lov_lock(slice);
482         LASSERT(lck->lls_nr_filled == 0);
483         if (lck->lls_sub != NULL) {
484                 for (i = 0; i < lck->lls_nr; ++i)
485                         /*
486                          * No sub-locks exists at this point, as sub-lock has
487                          * a reference on its parent.
488                          */
489                         LASSERT(lck->lls_sub[i].sub_lock == NULL);
490                 OBD_FREE_LARGE(lck->lls_sub,
491                                lck->lls_nr * sizeof lck->lls_sub[0]);
492         }
493         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
494         EXIT;
495 }
496
497 static int lov_lock_enqueue_wait(const struct lu_env *env,
498                                  struct lov_lock *lck,
499                                  struct cl_lock *sublock)
500 {
501         struct cl_lock *lock = lck->lls_cl.cls_lock;
502         int             result;
503         ENTRY;
504
505         LASSERT(cl_lock_is_mutexed(lock));
506
507         cl_lock_mutex_put(env, lock);
508         result = cl_lock_enqueue_wait(env, sublock, 0);
509         cl_lock_mutex_get(env, lock);
510         RETURN(result ?: CLO_REPEAT);
511 }
512
513 /**
514  * Tries to advance a state machine of a given sub-lock toward enqueuing of
515  * the top-lock.
516  *
517  * \retval 0 if state-transition can proceed
518  * \retval -ve otherwise.
519  */
520 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
521                                 struct cl_lock *sublock,
522                                 struct cl_io *io, __u32 enqflags, int last)
523 {
524         int result;
525         ENTRY;
526
527         /* first, try to enqueue a sub-lock ... */
528         result = cl_enqueue_try(env, sublock, io, enqflags);
529         if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL))
530                 /* if it is enqueued, try to `wait' on it---maybe it's already
531                  * granted */
532                 result = cl_wait_try(env, sublock);
533         /*
534          * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
535          * parallel, otherwise---enqueue has to wait until sub-lock is granted
536          * before proceeding to the next one.
537          */
538         if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
539             (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
540                 result = 0;
541         RETURN(result);
542 }
543
544 /**
545  * Helper function for lov_lock_enqueue() that creates missing sub-lock.
546  */
547 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
548                             struct cl_io *io, struct lov_lock *lck, int idx)
549 {
550         struct lov_lock_link *link;
551         struct cl_lock       *sublock;
552         int                   result;
553
554         LASSERT(parent->cll_depth == 1);
555         cl_lock_mutex_put(env, parent);
556         sublock = lov_sublock_alloc(env, io, lck, idx, &link);
557         if (!IS_ERR(sublock))
558                 cl_lock_mutex_get(env, sublock);
559         cl_lock_mutex_get(env, parent);
560
561         if (!IS_ERR(sublock)) {
562                 cl_lock_get_trust(sublock);
563                 if (parent->cll_state == CLS_QUEUING &&
564                     lck->lls_sub[idx].sub_lock == NULL) {
565                         lov_sublock_adopt(env, lck, sublock, idx, link);
566                 } else {
567                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
568                         /* other thread allocated sub-lock, or enqueue is no
569                          * longer going on */
570                         cl_lock_mutex_put(env, parent);
571                         cl_lock_unhold(env, sublock, "lov-parent", parent);
572                         cl_lock_mutex_get(env, parent);
573                 }
574                 cl_lock_mutex_put(env, sublock);
575                 cl_lock_put(env, sublock);
576                 result = CLO_REPEAT;
577         } else
578                 result = PTR_ERR(sublock);
579         return result;
580 }
581
582 /**
583  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
584  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
585  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
586  * state machines in the face of sub-locks sharing (by multiple top-locks),
587  * and concurrent sub-lock cancellations.
588  */
589 static int lov_lock_enqueue(const struct lu_env *env,
590                             const struct cl_lock_slice *slice,
591                             struct cl_io *io, __u32 enqflags)
592 {
593         struct cl_lock         *lock    = slice->cls_lock;
594         struct lov_lock        *lck     = cl2lov_lock(slice);
595         struct cl_lock_closure *closure = lov_closure_get(env, lock);
596         int i;
597         int result;
598         enum cl_lock_state minstate;
599
600         ENTRY;
601
602         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
603                 int rc;
604                 struct lovsub_lock     *sub;
605                 struct lov_lock_sub    *lls;
606                 struct cl_lock         *sublock;
607                 struct lov_sublock_env *subenv;
608
609                 if (lock->cll_state != CLS_QUEUING) {
610                         /*
611                          * Lock might have left QUEUING state if previous
612                          * iteration released its mutex. Stop enqueing in this
613                          * case and let the upper layer to decide what to do.
614                          */
615                         LASSERT(i > 0 && result != 0);
616                         break;
617                 }
618
619                 lls = &lck->lls_sub[i];
620                 sub = lls->sub_lock;
621                 /*
622                  * Sub-lock might have been canceled, while top-lock was
623                  * cached.
624                  */
625                 if (sub == NULL) {
626                         result = lov_sublock_fill(env, lock, io, lck, i);
627                         /* lov_sublock_fill() released @lock mutex,
628                          * restart. */
629                         break;
630                 }
631                 sublock = sub->lss_cl.cls_lock;
632                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
633                 if (rc == 0) {
634                         lov_sublock_hold(env, lck, i);
635                         rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
636                                                   subenv->lse_io, enqflags,
637                                                   i == lck->lls_nr - 1);
638                         minstate = min(minstate, sublock->cll_state);
639                         if (rc == CLO_WAIT) {
640                                 switch (sublock->cll_state) {
641                                 case CLS_QUEUING:
642                                         /* take recursive mutex, the lock is
643                                          * released in lov_lock_enqueue_wait.
644                                          */
645                                         cl_lock_mutex_get(env, sublock);
646                                         lov_sublock_unlock(env, sub, closure,
647                                                            subenv);
648                                         rc = lov_lock_enqueue_wait(env, lck,
649                                                                    sublock);
650                                         break;
651                                 case CLS_CACHED:
652                                         rc = lov_sublock_release(env, lck, i,
653                                                                  1, rc);
654                                 default:
655                                         lov_sublock_unlock(env, sub, closure,
656                                                            subenv);
657                                         break;
658                                 }
659                         } else {
660                                 LASSERT(sublock->cll_conflict == NULL);
661                                 lov_sublock_unlock(env, sub, closure, subenv);
662                         }
663                 }
664                 result = lov_subresult(result, rc);
665                 if (result != 0)
666                         break;
667         }
668         cl_lock_closure_fini(closure);
669         RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
670 }
671
672 static int lov_lock_unuse(const struct lu_env *env,
673                           const struct cl_lock_slice *slice)
674 {
675         struct lov_lock        *lck     = cl2lov_lock(slice);
676         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
677         int i;
678         int result;
679
680         ENTRY;
681
682         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
683                 int rc;
684                 struct lovsub_lock     *sub;
685                 struct cl_lock         *sublock;
686                 struct lov_lock_sub    *lls;
687                 struct lov_sublock_env *subenv;
688
689                 /* top-lock state cannot change concurrently, because single
690                  * thread (one that released the last hold) carries unlocking
691                  * to the completion. */
692                 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
693                 lls = &lck->lls_sub[i];
694                 sub = lls->sub_lock;
695                 if (sub == NULL)
696                         continue;
697
698                 sublock = sub->lss_cl.cls_lock;
699                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
700                 if (rc == 0) {
701                         if (lls->sub_flags & LSF_HELD) {
702                                 LASSERT(sublock->cll_state == CLS_HELD ||
703                                         sublock->cll_state == CLS_ENQUEUED);
704                                 /* For AGL case, the sublock state maybe not
705                                  * match the lower layer state, so sync them
706                                  * before unuse. */
707                                 if (sublock->cll_users == 1 &&
708                                     sublock->cll_state == CLS_ENQUEUED) {
709                                         __u32 save;
710
711                                         save = sublock->cll_descr.cld_enq_flags;
712                                         sublock->cll_descr.cld_enq_flags |=
713                                                         CEF_NO_REENQUEUE;
714                                         cl_wait_try(env, sublock);
715                                         sublock->cll_descr.cld_enq_flags = save;
716                                 }
717                                 rc = cl_unuse_try(subenv->lse_env, sublock);
718                                 rc = lov_sublock_release(env, lck, i, 0, rc);
719                         }
720                         lov_sublock_unlock(env, sub, closure, subenv);
721                 }
722                 result = lov_subresult(result, rc);
723         }
724
725         if (result == 0 && lck->lls_cancel_race) {
726                 lck->lls_cancel_race = 0;
727                 result = -ESTALE;
728         }
729         cl_lock_closure_fini(closure);
730         RETURN(result);
731 }
732
733
734 static void lov_lock_cancel(const struct lu_env *env,
735                            const struct cl_lock_slice *slice)
736 {
737         struct lov_lock        *lck     = cl2lov_lock(slice);
738         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
739         int i;
740         int result;
741
742         ENTRY;
743
744         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
745                 int rc;
746                 struct lovsub_lock     *sub;
747                 struct cl_lock         *sublock;
748                 struct lov_lock_sub    *lls;
749                 struct lov_sublock_env *subenv;
750
751                 /* top-lock state cannot change concurrently, because single
752                  * thread (one that released the last hold) carries unlocking
753                  * to the completion. */
754                 lls = &lck->lls_sub[i];
755                 sub = lls->sub_lock;
756                 if (sub == NULL)
757                         continue;
758
759                 sublock = sub->lss_cl.cls_lock;
760                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
761                 if (rc == 0) {
762                         if (!(lls->sub_flags & LSF_HELD)) {
763                                 lov_sublock_unlock(env, sub, closure, subenv);
764                                 continue;
765                         }
766
767                         switch(sublock->cll_state) {
768                         case CLS_HELD:
769                                 rc = cl_unuse_try(subenv->lse_env,
770                                                   sublock);
771                                 lov_sublock_release(env, lck, i, 0, 0);
772                                 break;
773                         case CLS_ENQUEUED:
774                                 /* TODO: it's not a good idea to cancel this
775                                  * lock because it's innocent. But it's
776                                  * acceptable. The better way would be to
777                                  * define a new lock method to unhold the
778                                  * dlm lock. */
779                                 cl_lock_cancel(env, sublock);
780                         default:
781                                 lov_sublock_release(env, lck, i, 1, 0);
782                                 break;
783                         }
784                         lov_sublock_unlock(env, sub, closure, subenv);
785                 }
786
787                 if (rc == CLO_REPEAT) {
788                         --i;
789                         continue;
790                 }
791
792                 result = lov_subresult(result, rc);
793         }
794
795         if (result)
796                 CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
797                               "lov_lock_cancel fails with %d.\n", result);
798
799         cl_lock_closure_fini(closure);
800 }
801
802 static int lov_lock_wait(const struct lu_env *env,
803                          const struct cl_lock_slice *slice)
804 {
805         struct lov_lock        *lck     = cl2lov_lock(slice);
806         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
807         enum cl_lock_state      minstate;
808         int                     reenqueued;
809         int                     result;
810         int                     i;
811
812         ENTRY;
813
814 again:
815         for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
816              i < lck->lls_nr; ++i) {
817                 int rc;
818                 struct lovsub_lock     *sub;
819                 struct cl_lock         *sublock;
820                 struct lov_lock_sub    *lls;
821                 struct lov_sublock_env *subenv;
822
823                 lls = &lck->lls_sub[i];
824                 sub = lls->sub_lock;
825                 LASSERT(sub != NULL);
826                 sublock = sub->lss_cl.cls_lock;
827                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
828                 if (rc == 0) {
829                         LASSERT(sublock->cll_state >= CLS_ENQUEUED);
830                         if (sublock->cll_state < CLS_HELD)
831                                 rc = cl_wait_try(env, sublock);
832
833                         minstate = min(minstate, sublock->cll_state);
834                         lov_sublock_unlock(env, sub, closure, subenv);
835                 }
836                 if (rc == CLO_REENQUEUED) {
837                         reenqueued++;
838                         rc = 0;
839                 }
840                 result = lov_subresult(result, rc);
841                 if (result != 0)
842                         break;
843         }
844         /* Each sublock only can be reenqueued once, so will not loop for
845          * ever. */
846         if (result == 0 && reenqueued != 0)
847                 goto again;
848         cl_lock_closure_fini(closure);
849         RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
850 }
851
852 static int lov_lock_use(const struct lu_env *env,
853                         const struct cl_lock_slice *slice)
854 {
855         struct lov_lock        *lck     = cl2lov_lock(slice);
856         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
857         int                     result;
858         int                     i;
859
860         LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
861         ENTRY;
862
863         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
864                 int rc;
865                 struct lovsub_lock     *sub;
866                 struct cl_lock         *sublock;
867                 struct lov_lock_sub    *lls;
868                 struct lov_sublock_env *subenv;
869
870                 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
871
872                 lls = &lck->lls_sub[i];
873                 sub = lls->sub_lock;
874                 if (sub == NULL) {
875                         /*
876                          * Sub-lock might have been canceled, while top-lock was
877                          * cached.
878                          */
879                         result = -ESTALE;
880                         break;
881                 }
882
883                 sublock = sub->lss_cl.cls_lock;
884                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
885                 if (rc == 0) {
886                         LASSERT(sublock->cll_state != CLS_FREEING);
887                         lov_sublock_hold(env, lck, i);
888                         if (sublock->cll_state == CLS_CACHED) {
889                                 rc = cl_use_try(subenv->lse_env, sublock, 0);
890                                 if (rc != 0)
891                                         rc = lov_sublock_release(env, lck,
892                                                                  i, 1, rc);
893                         } else if (sublock->cll_state == CLS_NEW) {
894                                 /* Sub-lock might have been canceled, while
895                                  * top-lock was cached. */
896                                 result = -ESTALE;
897                                 lov_sublock_release(env, lck, i, 1, result);
898                         }
899                         lov_sublock_unlock(env, sub, closure, subenv);
900                 }
901                 result = lov_subresult(result, rc);
902                 if (result != 0)
903                         break;
904         }
905
906         if (lck->lls_cancel_race) {
907                 /*
908                  * If there is unlocking happened at the same time, then
909                  * sublock_lock state should be FREEING, and lov_sublock_lock
910                  * should return CLO_REPEAT. In this case, it should return
911                  * ESTALE, and up layer should reset the lock state to be NEW.
912                  */
913                 lck->lls_cancel_race = 0;
914                 LASSERT(result != 0);
915                 result = -ESTALE;
916         }
917         cl_lock_closure_fini(closure);
918         RETURN(result);
919 }
920
921 #if 0
922 static int lock_lock_multi_match()
923 {
924         struct cl_lock          *lock    = slice->cls_lock;
925         struct cl_lock_descr    *subneed = &lov_env_info(env)->lti_ldescr;
926         struct lov_object       *loo     = cl2lov(lov->lls_cl.cls_obj);
927         struct lov_layout_raid0 *r0      = lov_r0(loo);
928         struct lov_lock_sub     *sub;
929         struct cl_object        *subobj;
930         obd_off  fstart;
931         obd_off  fend;
932         obd_off  start;
933         obd_off  end;
934         int i;
935
936         fstart = cl_offset(need->cld_obj, need->cld_start);
937         fend   = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
938         subneed->cld_mode = need->cld_mode;
939         cl_lock_mutex_get(env, lock);
940         for (i = 0; i < lov->lls_nr; ++i) {
941                 sub = &lov->lls_sub[i];
942                 if (sub->sub_lock == NULL)
943                         continue;
944                 subobj = sub->sub_descr.cld_obj;
945                 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
946                                            fstart, fend, &start, &end))
947                         continue;
948                 subneed->cld_start = cl_index(subobj, start);
949                 subneed->cld_end   = cl_index(subobj, end);
950                 subneed->cld_obj   = subobj;
951                 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
952                         result = 0;
953                         break;
954                 }
955         }
956         cl_lock_mutex_put(env, lock);
957 }
958 #endif
959
960 /**
961  * Check if the extent region \a descr is covered by \a child against the
962  * specific \a stripe.
963  */
964 static int lov_lock_stripe_is_matching(const struct lu_env *env,
965                                        struct lov_object *lov, int stripe,
966                                        const struct cl_lock_descr *child,
967                                        const struct cl_lock_descr *descr)
968 {
969         struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
970         obd_off start;
971         obd_off end;
972         int result;
973
974         if (lov_r0(lov)->lo_nr == 1)
975                 return cl_lock_ext_match(child, descr);
976
977         /*
978          * For a multi-stripes object:
979          * - make sure the descr only covers child's stripe, and
980          * - check if extent is matching.
981          */
982         start = cl_offset(&lov->lo_cl, descr->cld_start);
983         end   = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
984         result = end - start <= lsm->lsm_stripe_size &&
985                  stripe == lov_stripe_number(lsm, start) &&
986                  stripe == lov_stripe_number(lsm, end);
987         if (result) {
988                 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
989                 obd_off sub_start;
990                 obd_off sub_end;
991
992                 subd->cld_obj  = NULL;   /* don't need sub object at all */
993                 subd->cld_mode = descr->cld_mode;
994                 subd->cld_gid  = descr->cld_gid;
995                 result = lov_stripe_intersects(lsm, stripe, start, end,
996                                                &sub_start, &sub_end);
997                 LASSERT(result);
998                 subd->cld_start = cl_index(child->cld_obj, sub_start);
999                 subd->cld_end   = cl_index(child->cld_obj, sub_end);
1000                 result = cl_lock_ext_match(child, subd);
1001         }
1002         return result;
1003 }
1004
1005 /**
1006  * An implementation of cl_lock_operations::clo_fits_into() method.
1007  *
1008  * Checks whether a lock (given by \a slice) is suitable for \a
1009  * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
1010  * O_APPEND write.
1011  *
1012  * \see ccc_lock_fits_into().
1013  */
1014 static int lov_lock_fits_into(const struct lu_env *env,
1015                               const struct cl_lock_slice *slice,
1016                               const struct cl_lock_descr *need,
1017                               const struct cl_io *io)
1018 {
1019         struct lov_lock   *lov = cl2lov_lock(slice);
1020         struct lov_object *obj = cl2lov(slice->cls_obj);
1021         int result;
1022
1023         LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
1024         LASSERT(lov->lls_nr > 0);
1025
1026         ENTRY;
1027
1028         if (need->cld_mode == CLM_GROUP)
1029                 /*
1030                  * always allow to match group lock.
1031                  */
1032                 result = cl_lock_ext_match(&lov->lls_orig, need);
1033         else if (lov->lls_nr == 1) {
1034                 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
1035                 result = lov_lock_stripe_is_matching(env,
1036                                                      cl2lov(slice->cls_obj),
1037                                                      lov->lls_sub[0].sub_stripe,
1038                                                      got, need);
1039         } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
1040                    !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
1041                 /*
1042                  * Multi-stripe locks are only suitable for `quick' IO and for
1043                  * glimpse.
1044                  */
1045                 result = 0;
1046         else
1047                 /*
1048                  * Most general case: multi-stripe existing lock, and
1049                  * (potentially) multi-stripe @need lock. Check that @need is
1050                  * covered by @lov's sub-locks.
1051                  *
1052                  * For now, ignore lock expansions made by the server, and
1053                  * match against original lock extent.
1054                  */
1055                 result = cl_lock_ext_match(&lov->lls_orig, need);
1056         CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
1057                PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
1058                lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
1059                result);
1060         RETURN(result);
1061 }
1062
1063 void lov_lock_unlink(const struct lu_env *env,
1064                      struct lov_lock_link *link, struct lovsub_lock *sub)
1065 {
1066         struct lov_lock *lck    = link->lll_super;
1067         struct cl_lock  *parent = lck->lls_cl.cls_lock;
1068
1069         LASSERT(cl_lock_is_mutexed(parent));
1070         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1071         ENTRY;
1072
1073         cfs_list_del_init(&link->lll_list);
1074         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
1075         /* yank this sub-lock from parent's array */
1076         lck->lls_sub[link->lll_idx].sub_lock = NULL;
1077         LASSERT(lck->lls_nr_filled > 0);
1078         lck->lls_nr_filled--;
1079         lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
1080         cl_lock_put(env, parent);
1081         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
1082         EXIT;
1083 }
1084
1085 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
1086                                          struct lov_lock *lck,
1087                                          struct lovsub_lock *sub)
1088 {
1089         struct lov_lock_link *scan;
1090
1091         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1092         ENTRY;
1093
1094         cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
1095                 if (scan->lll_super == lck)
1096                         RETURN(scan);
1097         }
1098         RETURN(NULL);
1099 }
1100
1101 /**
1102  * An implementation of cl_lock_operations::clo_delete() method. This is
1103  * invoked for "top-to-bottom" delete, when lock destruction starts from the
1104  * top-lock, e.g., as a result of inode destruction.
1105  *
1106  * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
1107  * this is done separately elsewhere:
1108  *
1109  *     - for inode destruction, lov_object_delete() calls cl_object_kill() for
1110  *       each sub-object, purging its locks;
1111  *
1112  *     - in other cases (e.g., a fatal error with a top-lock) sub-locks are
1113  *       left in the cache.
1114  */
1115 static void lov_lock_delete(const struct lu_env *env,
1116                             const struct cl_lock_slice *slice)
1117 {
1118         struct lov_lock        *lck     = cl2lov_lock(slice);
1119         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
1120         struct lov_lock_link   *link;
1121         int                     rc;
1122         int                     i;
1123
1124         LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
1125         ENTRY;
1126
1127         for (i = 0; i < lck->lls_nr; ++i) {
1128                 struct lov_lock_sub *lls = &lck->lls_sub[i];
1129                 struct lovsub_lock  *lsl = lls->sub_lock;
1130
1131                 if (lsl == NULL) /* already removed */
1132                         continue;
1133
1134                 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
1135                 if (rc == CLO_REPEAT) {
1136                         --i;
1137                         continue;
1138                 }
1139
1140                 LASSERT(rc == 0);
1141                 LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
1142
1143                 if (lls->sub_flags & LSF_HELD)
1144                         lov_sublock_release(env, lck, i, 1, 0);
1145
1146                 link = lov_lock_link_find(env, lck, lsl);
1147                 LASSERT(link != NULL);
1148                 lov_lock_unlink(env, link, lsl);
1149                 LASSERT(lck->lls_sub[i].sub_lock == NULL);
1150
1151                 lov_sublock_unlock(env, lsl, closure, NULL);
1152         }
1153
1154         cl_lock_closure_fini(closure);
1155         EXIT;
1156 }
1157
1158 static int lov_lock_print(const struct lu_env *env, void *cookie,
1159                           lu_printer_t p, const struct cl_lock_slice *slice)
1160 {
1161         struct lov_lock *lck = cl2lov_lock(slice);
1162         int              i;
1163
1164         (*p)(env, cookie, "%d\n", lck->lls_nr);
1165         for (i = 0; i < lck->lls_nr; ++i) {
1166                 struct lov_lock_sub *sub;
1167
1168                 sub = &lck->lls_sub[i];
1169                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_flags);
1170                 if (sub->sub_lock != NULL)
1171                         cl_lock_print(env, cookie, p,
1172                                       sub->sub_lock->lss_cl.cls_lock);
1173                 else
1174                         (*p)(env, cookie, "---\n");
1175         }
1176         return 0;
1177 }
1178
1179 static const struct cl_lock_operations lov_lock_ops = {
1180         .clo_fini      = lov_lock_fini,
1181         .clo_enqueue   = lov_lock_enqueue,
1182         .clo_wait      = lov_lock_wait,
1183         .clo_use       = lov_lock_use,
1184         .clo_unuse     = lov_lock_unuse,
1185         .clo_cancel    = lov_lock_cancel,
1186         .clo_fits_into = lov_lock_fits_into,
1187         .clo_delete    = lov_lock_delete,
1188         .clo_print     = lov_lock_print
1189 };
1190
1191 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1192                         struct cl_lock *lock, const struct cl_io *io)
1193 {
1194         struct lov_lock *lck;
1195         int result;
1196
1197         ENTRY;
1198         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1199         if (lck != NULL) {
1200                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1201                 result = lov_lock_sub_init(env, lck, io);
1202         } else
1203                 result = -ENOMEM;
1204         RETURN(result);
1205 }
1206
1207 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1208                                                struct cl_lock *parent)
1209 {
1210         struct cl_lock_closure *closure;
1211
1212         closure = &lov_env_info(env)->lti_closure;
1213         LASSERT(cfs_list_empty(&closure->clc_list));
1214         cl_lock_closure_init(env, closure, parent, 1);
1215         return closure;
1216 }
1217
1218
1219 /** @} lov */