Whamcloud - gitweb
b=21571 stacksize and locking fixes for loadgen patch from umka
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for LOV layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LOV
42
43 #include "lov_cl_internal.h"
44
45 /** \addtogroup lov
46  *  @{
47  */
48
49 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
50                                                struct cl_lock *parent);
51
52 static int lov_lock_unuse(const struct lu_env *env,
53                           const struct cl_lock_slice *slice);
54 /*****************************************************************************
55  *
56  * Lov lock operations.
57  *
58  */
59
60 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
61                                                    struct cl_lock *parent,
62                                                    struct lov_lock_sub *lls)
63 {
64         struct lov_sublock_env *subenv;
65         struct lov_io          *lio    = lov_env_io(env);
66         struct cl_io           *io     = lio->lis_cl.cis_io;
67         struct lov_io_sub      *sub;
68
69         subenv = &lov_env_session(env)->ls_subenv;
70
71         /*
72          * FIXME: We tend to use the subio's env & io to call the sublock
73          * lock operations because osc lock sometimes stores some control
74          * variables in thread's IO infomation(Now only lockless information).
75          * However, if the lock's host(object) is different from the object
76          * for current IO, we have no way to get the subenv and subio because
77          * they are not initialized at all. As a temp fix, in this case,
78          * we still borrow the parent's env to call sublock operations.
79          */
80         if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
81                 subenv->lse_env = env;
82                 subenv->lse_io  = io;
83                 subenv->lse_sub = NULL;
84         } else {
85                 sub = lov_sub_get(env, lio, lls->sub_stripe);
86                 if (!IS_ERR(sub)) {
87                         subenv->lse_env = sub->sub_env;
88                         subenv->lse_io  = sub->sub_io;
89                         subenv->lse_sub = sub;
90                 } else {
91                         subenv = (void*)sub;
92                 }
93         }
94         return subenv;
95 }
96
97 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
98 {
99         if (subenv && subenv->lse_sub)
100                 lov_sub_put(subenv->lse_sub);
101 }
102
103 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
104                               struct cl_lock *sublock, int idx,
105                               struct lov_lock_link *link)
106 {
107         struct lovsub_lock *lsl;
108         struct cl_lock     *parent = lck->lls_cl.cls_lock;
109         int                 rc;
110
111         LASSERT(cl_lock_is_mutexed(parent));
112         LASSERT(cl_lock_is_mutexed(sublock));
113         ENTRY;
114
115         lsl = cl2sub_lock(sublock);
116         /*
117          * check that sub-lock doesn't have lock link to this top-lock.
118          */
119         LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
120         LASSERT(idx < lck->lls_nr);
121
122         lck->lls_sub[idx].sub_lock = lsl;
123         lck->lls_nr_filled++;
124         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
125         list_add_tail(&link->lll_list, &lsl->lss_parents);
126         link->lll_idx = idx;
127         link->lll_super = lck;
128         cl_lock_get(parent);
129         lu_ref_add(&parent->cll_reference, "lov-child", sublock);
130         lck->lls_sub[idx].sub_flags |= LSF_HELD;
131         cl_lock_user_add(env, sublock);
132
133         rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
134         LASSERT(rc == 0); /* there is no way this can fail, currently */
135         EXIT;
136 }
137
138 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
139                                          const struct cl_io *io,
140                                          struct lov_lock *lck,
141                                          int idx, struct lov_lock_link **out)
142 {
143         struct cl_lock       *sublock;
144         struct cl_lock       *parent;
145         struct lov_lock_link *link;
146
147         LASSERT(idx < lck->lls_nr);
148         ENTRY;
149
150         OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
151         if (link != NULL) {
152                 struct lov_sublock_env *subenv;
153                 struct lov_lock_sub  *lls;
154                 struct cl_lock_descr *descr;
155
156                 parent = lck->lls_cl.cls_lock;
157                 lls    = &lck->lls_sub[idx];
158                 descr  = &lls->sub_descr;
159
160                 subenv = lov_sublock_env_get(env, parent, lls);
161                 if (!IS_ERR(subenv)) {
162                         /* CAVEAT: Don't try to add a field in lov_lock_sub
163                          * to remember the subio. This is because lock is able
164                          * to be cached, but this is not true for IO. This
165                          * further means a sublock might be referenced in
166                          * different io context. -jay */
167
168                         sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
169                                                descr, "lov-parent", parent);
170                         lov_sublock_env_put(subenv);
171                 } else {
172                         /* error occurs. */
173                         sublock = (void*)subenv;
174                 }
175
176                 if (!IS_ERR(sublock))
177                         *out = link;
178                 else
179                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
180         } else
181                 sublock = ERR_PTR(-ENOMEM);
182         RETURN(sublock);
183 }
184
185 static void lov_sublock_unlock(const struct lu_env *env,
186                                struct lovsub_lock *lsl,
187                                struct cl_lock_closure *closure,
188                                struct lov_sublock_env *subenv)
189 {
190         ENTRY;
191         lov_sublock_env_put(subenv);
192         lsl->lss_active = NULL;
193         cl_lock_disclosure(env, closure);
194         EXIT;
195 }
196
197 static int lov_sublock_lock(const struct lu_env *env,
198                             struct lov_lock *lck,
199                             struct lov_lock_sub *lls,
200                             struct cl_lock_closure *closure,
201                             struct lov_sublock_env **lsep)
202 {
203         struct lovsub_lock *sublock;
204         struct cl_lock     *child;
205         int                 result = 0;
206         ENTRY;
207
208         LASSERT(list_empty(&closure->clc_list));
209
210         sublock = lls->sub_lock;
211         child = sublock->lss_cl.cls_lock;
212         result = cl_lock_closure_build(env, child, closure);
213         if (result == 0) {
214                 struct cl_lock *parent = closure->clc_origin;
215
216                 LASSERT(cl_lock_is_mutexed(child));
217                 sublock->lss_active = parent;
218
219                 if (unlikely((child->cll_state == CLS_FREEING) ||
220                              (child->cll_flags & CLF_CANCELLED))) {
221                         struct lov_lock_link *link;
222                         /*
223                          * we could race with lock deletion which temporarily
224                          * put the lock in freeing state, bug 19080.
225                          */
226                         LASSERT(!(lls->sub_flags & LSF_HELD));
227
228                         link = lov_lock_link_find(env, lck, sublock);
229                         LASSERT(link != NULL);
230                         lov_lock_unlink(env, link, sublock);
231                         lov_sublock_unlock(env, sublock, closure, NULL);
232                         lck->lls_cancel_race = 1;
233                         result = CLO_REPEAT;
234                 } else if (lsep) {
235                         struct lov_sublock_env *subenv;
236                         subenv = lov_sublock_env_get(env, parent, lls);
237                         if (IS_ERR(subenv)) {
238                                 lov_sublock_unlock(env, sublock,
239                                                    closure, NULL);
240                                 result = PTR_ERR(subenv);
241                         } else {
242                                 *lsep = subenv;
243                         }
244                 }
245         }
246         RETURN(result);
247 }
248
249 /**
250  * Updates the result of a top-lock operation from a result of sub-lock
251  * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
252  * over sub-locks and lov_subresult() is used to calculate return value of a
253  * top-operation. To this end, possible return values of sub-operations are
254  * ordered as
255  *
256  *     - 0                  success
257  *     - CLO_WAIT           wait for event
258  *     - CLO_REPEAT         repeat top-operation
259  *     - -ne                fundamental error
260  *
261  * Top-level return code can only go down through this list. CLO_REPEAT
262  * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
263  * has to be rechecked by the upper layer.
264  */
265 static int lov_subresult(int result, int rc)
266 {
267         int result_rank;
268         int rc_rank;
269
270         LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
271         LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
272         CLASSERT(CLO_WAIT < CLO_REPEAT);
273
274         ENTRY;
275
276         /* calculate ranks in the ordering above */
277         result_rank = result < 0 ? 1 + CLO_REPEAT : result;
278         rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
279
280         if (result_rank < rc_rank)
281                 result = rc;
282         RETURN(result);
283 }
284
285 /**
286  * Creates sub-locks for a given lov_lock for the first time.
287  *
288  * Goes through all sub-objects of top-object, and creates sub-locks on every
289  * sub-object intersecting with top-lock extent. This is complicated by the
290  * fact that top-lock (that is being created) can be accessed concurrently
291  * through already created sub-locks (possibly shared with other top-locks).
292  */
293 static int lov_lock_sub_init(const struct lu_env *env,
294                              struct lov_lock *lck, const struct cl_io *io)
295 {
296         int result = 0;
297         int i;
298         int nr;
299         obd_off start;
300         obd_off end;
301         obd_off file_start;
302         obd_off file_end;
303
304         struct lov_object       *loo    = cl2lov(lck->lls_cl.cls_obj);
305         struct lov_layout_raid0 *r0     = lov_r0(loo);
306         struct cl_lock          *parent = lck->lls_cl.cls_lock;
307
308         ENTRY;
309
310         lck->lls_orig = parent->cll_descr;
311         file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
312         file_end   = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
313
314         for (i = 0, nr = 0; i < r0->lo_nr; i++) {
315                 /*
316                  * XXX for wide striping smarter algorithm is desirable,
317                  * breaking out of the loop, early.
318                  */
319                 if (lov_stripe_intersects(r0->lo_lsm, i,
320                                           file_start, file_end, &start, &end))
321                         nr++;
322         }
323         LASSERT(nr > 0);
324         OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
325         if (lck->lls_sub == NULL)
326                 RETURN(-ENOMEM);
327
328         lck->lls_nr = nr;
329         /*
330          * First, fill in sub-lock descriptions in
331          * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
332          * (called below in this function, and by lov_lock_enqueue()) to
333          * create sub-locks. At this moment, no other thread can access
334          * top-lock.
335          */
336         for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
337                 if (lov_stripe_intersects(r0->lo_lsm, i,
338                                           file_start, file_end, &start, &end)) {
339                         struct cl_lock_descr *descr;
340
341                         descr = &lck->lls_sub[nr].sub_descr;
342
343                         LASSERT(descr->cld_obj == NULL);
344                         descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
345                         descr->cld_start = cl_index(descr->cld_obj, start);
346                         descr->cld_end   = cl_index(descr->cld_obj, end);
347                         descr->cld_mode  = parent->cll_descr.cld_mode;
348                         descr->cld_gid   = parent->cll_descr.cld_gid;
349                         descr->cld_enq_flags   = parent->cll_descr.cld_enq_flags;
350                         /* XXX has no effect */
351                         lck->lls_sub[nr].sub_got = *descr;
352                         lck->lls_sub[nr].sub_stripe = i;
353                         nr++;
354                 }
355         }
356         LASSERT(nr == lck->lls_nr);
357         /*
358          * Then, create sub-locks. Once at least one sub-lock was created,
359          * top-lock can be reached by other threads.
360          */
361         for (i = 0; i < lck->lls_nr; ++i) {
362                 struct cl_lock       *sublock;
363                 struct lov_lock_link *link;
364
365                 if (lck->lls_sub[i].sub_lock == NULL) {
366                         sublock = lov_sublock_alloc(env, io, lck, i, &link);
367                         if (IS_ERR(sublock)) {
368                                 result = PTR_ERR(sublock);
369                                 break;
370                         }
371                         cl_lock_get_trust(sublock);
372                         cl_lock_mutex_get(env, sublock);
373                         cl_lock_mutex_get(env, parent);
374                         /*
375                          * recheck under mutex that sub-lock wasn't created
376                          * concurrently, and that top-lock is still alive.
377                          */
378                         if (lck->lls_sub[i].sub_lock == NULL &&
379                             parent->cll_state < CLS_FREEING) {
380                                 lov_sublock_adopt(env, lck, sublock, i, link);
381                                 cl_lock_mutex_put(env, parent);
382                         } else {
383                                 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
384                                 cl_lock_mutex_put(env, parent);
385                                 cl_lock_unhold(env, sublock,
386                                                "lov-parent", parent);
387                         }
388                         cl_lock_mutex_put(env, sublock);
389                         cl_lock_put(env, sublock);
390                 }
391         }
392         /*
393          * Some sub-locks can be missing at this point. This is not a problem,
394          * because enqueue will create them anyway. Main duty of this function
395          * is to fill in sub-lock descriptions in a race free manner.
396          */
397         RETURN(result);
398 }
399
400 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
401                                int i, int deluser, int rc)
402 {
403         struct cl_lock *parent = lck->lls_cl.cls_lock;
404
405         LASSERT(cl_lock_is_mutexed(parent));
406         ENTRY;
407
408         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
409                 struct cl_lock    *sublock;
410                 int dying;
411
412                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
413                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
414                 LASSERT(cl_lock_is_mutexed(sublock));
415
416                 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
417                 if (deluser)
418                         cl_lock_user_del(env, sublock);
419                 /*
420                  * If the last hold is released, and cancellation is pending
421                  * for a sub-lock, release parent mutex, to avoid keeping it
422                  * while sub-lock is being paged out.
423                  */
424                 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
425                          sublock->cll_descr.cld_mode == CLM_GROUP ||
426                          (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
427                         sublock->cll_holds == 1;
428                 if (dying)
429                         cl_lock_mutex_put(env, parent);
430                 cl_lock_unhold(env, sublock, "lov-parent", parent);
431                 if (dying) {
432                         cl_lock_mutex_get(env, parent);
433                         rc = lov_subresult(rc, CLO_REPEAT);
434                 }
435                 /*
436                  * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
437                  * not backed by a reference on a
438                  * sub-lock. lovsub_lock_delete() will clear
439                  * lck->lls_sub[i].sub_lock under semaphores, just before
440                  * sub-lock is destroyed.
441                  */
442         }
443         RETURN(rc);
444 }
445
446 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
447                              int i)
448 {
449         struct cl_lock *parent = lck->lls_cl.cls_lock;
450
451         LASSERT(cl_lock_is_mutexed(parent));
452         ENTRY;
453
454         if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
455                 struct cl_lock *sublock;
456
457                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
458                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
459                 LASSERT(cl_lock_is_mutexed(sublock));
460                 LASSERT(sublock->cll_state != CLS_FREEING);
461
462                 lck->lls_sub[i].sub_flags |= LSF_HELD;
463
464                 cl_lock_get_trust(sublock);
465                 cl_lock_hold_add(env, sublock, "lov-parent", parent);
466                 cl_lock_user_add(env, sublock);
467                 cl_lock_put(env, sublock);
468         }
469         EXIT;
470 }
471
472 static void lov_lock_fini(const struct lu_env *env,
473                           struct cl_lock_slice *slice)
474 {
475         struct lov_lock *lck;
476         int i;
477
478         ENTRY;
479         lck = cl2lov_lock(slice);
480         LASSERT(lck->lls_nr_filled == 0);
481         if (lck->lls_sub != NULL) {
482                 for (i = 0; i < lck->lls_nr; ++i)
483                         /*
484                          * No sub-locks exists at this point, as sub-lock has
485                          * a reference on its parent.
486                          */
487                         LASSERT(lck->lls_sub[i].sub_lock == NULL);
488                 OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
489         }
490         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
491         EXIT;
492 }
493
494 /**
495  *
496  * \retval 0 if state-transition can proceed
497  * \retval -ve otherwise.
498  */
499 static int lov_lock_enqueue_wait(const struct lu_env *env,
500                                  struct lov_lock *lck,
501                                  struct cl_lock *sublock)
502 {
503         struct cl_lock *lock     = lck->lls_cl.cls_lock;
504         struct cl_lock *conflict = sublock->cll_conflict;
505         int result = CLO_REPEAT;
506         ENTRY;
507
508         LASSERT(cl_lock_is_mutexed(lock));
509         LASSERT(cl_lock_is_mutexed(sublock));
510         LASSERT(sublock->cll_state == CLS_QUEUING);
511         LASSERT(conflict != NULL);
512
513         sublock->cll_conflict = NULL;
514         cl_lock_mutex_put(env, lock);
515         cl_lock_mutex_put(env, sublock);
516
517         LASSERT(cl_lock_nr_mutexed(env) == 0);
518
519         cl_lock_mutex_get(env, conflict);
520         cl_lock_cancel(env, conflict);
521         cl_lock_delete(env, conflict);
522         while (conflict->cll_state != CLS_FREEING) {
523                 int rc = 0;
524
525                 rc = cl_lock_state_wait(env, conflict);
526                 if (rc == 0)
527                         continue;
528
529                 result = lov_subresult(result, rc);
530                 break;
531         }
532         cl_lock_mutex_put(env, conflict);
533         lu_ref_del(&conflict->cll_reference, "cancel-wait", sublock);
534         cl_lock_put(env, conflict);
535
536         cl_lock_mutex_get(env, lock);
537         RETURN(result);
538 }
539
540 /**
541  * Tries to advance a state machine of a given sub-lock toward enqueuing of
542  * the top-lock.
543  *
544  * \retval 0 if state-transition can proceed
545  * \retval -ve otherwise.
546  */
547 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
548                                 struct cl_lock *sublock,
549                                 struct cl_io *io, __u32 enqflags, int last)
550 {
551         int result;
552         ENTRY;
553
554         /* first, try to enqueue a sub-lock ... */
555         result = cl_enqueue_try(env, sublock, io, enqflags);
556         if (sublock->cll_state == CLS_ENQUEUED)
557                 /* if it is enqueued, try to `wait' on it---maybe it's already
558                  * granted */
559                 result = cl_wait_try(env, sublock);
560         /*
561          * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
562          * parallel, otherwise---enqueue has to wait until sub-lock is granted
563          * before proceeding to the next one.
564          */
565         if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
566             enqflags & CEF_ASYNC && !last)
567                 result = 0;
568         RETURN(result);
569 }
570
571 /**
572  * Helper function for lov_lock_enqueue() that creates missing sub-lock.
573  */
574 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
575                             struct cl_io *io, struct lov_lock *lck, int idx)
576 {
577         struct lov_lock_link *link;
578         struct cl_lock       *sublock;
579         int                   result;
580
581         LASSERT(parent->cll_depth == 1);
582         cl_lock_mutex_put(env, parent);
583         sublock = lov_sublock_alloc(env, io, lck, idx, &link);
584         if (!IS_ERR(sublock))
585                 cl_lock_mutex_get(env, sublock);
586         cl_lock_mutex_get(env, parent);
587
588         if (!IS_ERR(sublock)) {
589                 cl_lock_get_trust(sublock);
590                 if (parent->cll_state == CLS_QUEUING &&
591                     lck->lls_sub[idx].sub_lock == NULL) {
592                         lov_sublock_adopt(env, lck, sublock, idx, link);
593                 } else {
594                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
595                         /* other thread allocated sub-lock, or enqueue is no
596                          * longer going on */
597                         cl_lock_mutex_put(env, parent);
598                         cl_lock_unhold(env, sublock, "lov-parent", parent);
599                         cl_lock_mutex_get(env, parent);
600                 }
601                 cl_lock_mutex_put(env, sublock);
602                 cl_lock_put(env, sublock);
603                 result = CLO_REPEAT;
604         } else
605                 result = PTR_ERR(sublock);
606         return result;
607 }
608
609 /**
610  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
611  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
612  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
613  * state machines in the face of sub-locks sharing (by multiple top-locks),
614  * and concurrent sub-lock cancellations.
615  */
616 static int lov_lock_enqueue(const struct lu_env *env,
617                             const struct cl_lock_slice *slice,
618                             struct cl_io *io, __u32 enqflags)
619 {
620         struct cl_lock         *lock    = slice->cls_lock;
621         struct lov_lock        *lck     = cl2lov_lock(slice);
622         struct cl_lock_closure *closure = lov_closure_get(env, lock);
623         int i;
624         int result;
625         enum cl_lock_state minstate;
626
627         ENTRY;
628
629         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
630                 int rc;
631                 struct lovsub_lock     *sub;
632                 struct lov_lock_sub    *lls;
633                 struct cl_lock         *sublock;
634                 struct lov_sublock_env *subenv;
635
636                 if (lock->cll_state != CLS_QUEUING) {
637                         /*
638                          * Lock might have left QUEUING state if previous
639                          * iteration released its mutex. Stop enqueing in this
640                          * case and let the upper layer to decide what to do.
641                          */
642                         LASSERT(i > 0 && result != 0);
643                         break;
644                 }
645
646                 lls = &lck->lls_sub[i];
647                 sub = lls->sub_lock;
648                 /*
649                  * Sub-lock might have been canceled, while top-lock was
650                  * cached.
651                  */
652                 if (sub == NULL) {
653                         result = lov_sublock_fill(env, lock, io, lck, i);
654                         /* lov_sublock_fill() released @lock mutex,
655                          * restart. */
656                         break;
657                 }
658                 sublock = sub->lss_cl.cls_lock;
659                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
660                 if (rc == 0) {
661                         lov_sublock_hold(env, lck, i);
662                         rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
663                                                   subenv->lse_io, enqflags,
664                                                   i == lck->lls_nr - 1);
665                         minstate = min(minstate, sublock->cll_state);
666                         if (rc == CLO_WAIT) {
667                                 switch (sublock->cll_state) {
668                                 case CLS_QUEUING:
669                                         /* take recursive mutex, the lock is
670                                          * released in lov_lock_enqueue_wait.
671                                          */
672                                         cl_lock_mutex_get(env, sublock);
673                                         lov_sublock_unlock(env, sub, closure,
674                                                            subenv);
675                                         rc = lov_lock_enqueue_wait(env, lck,
676                                                                    sublock);
677                                         break;
678                                 case CLS_CACHED:
679                                         rc = lov_sublock_release(env, lck, i,
680                                                                  1, rc);
681                                 default:
682                                         lov_sublock_unlock(env, sub, closure,
683                                                            subenv);
684                                         break;
685                                 }
686                         } else {
687                                 LASSERT(sublock->cll_conflict == NULL);
688                                 lov_sublock_unlock(env, sub, closure, subenv);
689                         }
690                 }
691                 result = lov_subresult(result, rc);
692                 if (result != 0)
693                         break;
694         }
695         cl_lock_closure_fini(closure);
696         RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
697 }
698
699 static int lov_lock_unuse(const struct lu_env *env,
700                           const struct cl_lock_slice *slice)
701 {
702         struct lov_lock        *lck     = cl2lov_lock(slice);
703         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
704         int i;
705         int result;
706
707         ENTRY;
708
709         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
710                 int rc;
711                 struct lovsub_lock     *sub;
712                 struct cl_lock         *sublock;
713                 struct lov_lock_sub    *lls;
714                 struct lov_sublock_env *subenv;
715
716                 /* top-lock state cannot change concurrently, because single
717                  * thread (one that released the last hold) carries unlocking
718                  * to the completion. */
719                 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
720                 lls = &lck->lls_sub[i];
721                 sub = lls->sub_lock;
722                 if (sub == NULL)
723                         continue;
724
725                 sublock = sub->lss_cl.cls_lock;
726                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
727                 if (rc == 0) {
728                         if (lls->sub_flags & LSF_HELD) {
729                                 LASSERT(sublock->cll_state == CLS_HELD);
730                                 rc = cl_unuse_try(subenv->lse_env, sublock);
731                                 rc = lov_sublock_release(env, lck, i, 0, rc);
732                         }
733                         lov_sublock_unlock(env, sub, closure, subenv);
734                 }
735                 result = lov_subresult(result, rc);
736         }
737
738         if (result == 0 && lck->lls_cancel_race) {
739                 lck->lls_cancel_race = 0;
740                 result = -ESTALE;
741         }
742         cl_lock_closure_fini(closure);
743         RETURN(result);
744 }
745
746
747 static void lov_lock_cancel(const struct lu_env *env,
748                            const struct cl_lock_slice *slice)
749 {
750         struct lov_lock        *lck     = cl2lov_lock(slice);
751         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
752         int i;
753         int result;
754
755         ENTRY;
756
757         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
758                 int rc;
759                 struct lovsub_lock     *sub;
760                 struct cl_lock         *sublock;
761                 struct lov_lock_sub    *lls;
762                 struct lov_sublock_env *subenv;
763
764                 /* top-lock state cannot change concurrently, because single
765                  * thread (one that released the last hold) carries unlocking
766                  * to the completion. */
767                 lls = &lck->lls_sub[i];
768                 sub = lls->sub_lock;
769                 if (sub == NULL)
770                         continue;
771
772                 sublock = sub->lss_cl.cls_lock;
773                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
774                 if (rc == 0) {
775                         if (!(lls->sub_flags & LSF_HELD)) {
776                                 lov_sublock_unlock(env, sub, closure, subenv);
777                                 continue;
778                         }
779
780                         switch(sublock->cll_state) {
781                         case CLS_HELD:
782                                 rc = cl_unuse_try(subenv->lse_env,
783                                                   sublock);
784                                 lov_sublock_release(env, lck, i, 0, 0);
785                                 break;
786                         case CLS_ENQUEUED:
787                                 /* TODO: it's not a good idea to cancel this
788                                  * lock because it's innocent. But it's
789                                  * acceptable. The better way would be to
790                                  * define a new lock method to unhold the
791                                  * dlm lock. */
792                                 cl_lock_cancel(env, sublock);
793                         default:
794                                 lov_sublock_release(env, lck, i, 1, 0);
795                                 break;
796                         }
797                         lov_sublock_unlock(env, sub, closure, subenv);
798                 }
799
800                 if (rc == CLO_REPEAT) {
801                         --i;
802                         continue;
803                 }
804
805                 result = lov_subresult(result, rc);
806         }
807
808         if (result)
809                 CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
810                               "lov_lock_cancel fails with %d.\n", result);
811
812         cl_lock_closure_fini(closure);
813 }
814
815 static int lov_lock_wait(const struct lu_env *env,
816                          const struct cl_lock_slice *slice)
817 {
818         struct lov_lock        *lck     = cl2lov_lock(slice);
819         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
820         enum cl_lock_state      minstate;
821         int                     result;
822         int                     i;
823
824         ENTRY;
825
826         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
827                 int rc;
828                 struct lovsub_lock     *sub;
829                 struct cl_lock         *sublock;
830                 struct lov_lock_sub    *lls;
831                 struct lov_sublock_env *subenv;
832
833                 lls = &lck->lls_sub[i];
834                 sub = lls->sub_lock;
835                 LASSERT(sub != NULL);
836                 sublock = sub->lss_cl.cls_lock;
837                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
838                 if (rc == 0) {
839                         LASSERT(sublock->cll_state >= CLS_ENQUEUED);
840                         if (sublock->cll_state < CLS_HELD)
841                                 rc = cl_wait_try(env, sublock);
842
843                         minstate = min(minstate, sublock->cll_state);
844                         lov_sublock_unlock(env, sub, closure, subenv);
845                 }
846                 result = lov_subresult(result, rc);
847                 if (result != 0)
848                         break;
849         }
850         cl_lock_closure_fini(closure);
851         RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
852 }
853
854 static int lov_lock_use(const struct lu_env *env,
855                         const struct cl_lock_slice *slice)
856 {
857         struct lov_lock        *lck     = cl2lov_lock(slice);
858         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
859         int                     result;
860         int                     i;
861
862         LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
863         ENTRY;
864
865         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
866                 int rc;
867                 struct lovsub_lock     *sub;
868                 struct cl_lock         *sublock;
869                 struct lov_lock_sub    *lls;
870                 struct lov_sublock_env *subenv;
871
872                 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
873
874                 lls = &lck->lls_sub[i];
875                 sub = lls->sub_lock;
876                 if (sub == NULL) {
877                         /*
878                          * Sub-lock might have been canceled, while top-lock was
879                          * cached.
880                          */
881                         result = -ESTALE;
882                         break;
883                 }
884
885                 sublock = sub->lss_cl.cls_lock;
886                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
887                 if (rc == 0) {
888                         LASSERT(sublock->cll_state != CLS_FREEING);
889                         lov_sublock_hold(env, lck, i);
890                         if (sublock->cll_state == CLS_CACHED) {
891                                 rc = cl_use_try(subenv->lse_env, sublock, 0);
892                                 if (rc != 0)
893                                         rc = lov_sublock_release(env, lck,
894                                                                  i, 1, rc);
895                         }
896                         lov_sublock_unlock(env, sub, closure, subenv);
897                 }
898                 result = lov_subresult(result, rc);
899                 if (result != 0)
900                         break;
901         }
902
903         if (lck->lls_cancel_race) {
904                 /*
905                  * If there is unlocking happened at the same time, then
906                  * sublock_lock state should be FREEING, and lov_sublock_lock
907                  * should return CLO_REPEAT. In this case, it should return
908                  * ESTALE, and up layer should reset the lock state to be NEW.
909                  */
910                 lck->lls_cancel_race = 0;
911                 LASSERT(result != 0);
912                 result = -ESTALE;
913         }
914         cl_lock_closure_fini(closure);
915         RETURN(result);
916 }
917
918 #if 0
919 static int lock_lock_multi_match()
920 {
921         struct cl_lock          *lock    = slice->cls_lock;
922         struct cl_lock_descr    *subneed = &lov_env_info(env)->lti_ldescr;
923         struct lov_object       *loo     = cl2lov(lov->lls_cl.cls_obj);
924         struct lov_layout_raid0 *r0      = lov_r0(loo);
925         struct lov_lock_sub     *sub;
926         struct cl_object        *subobj;
927         obd_off  fstart;
928         obd_off  fend;
929         obd_off  start;
930         obd_off  end;
931         int i;
932
933         fstart = cl_offset(need->cld_obj, need->cld_start);
934         fend   = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
935         subneed->cld_mode = need->cld_mode;
936         cl_lock_mutex_get(env, lock);
937         for (i = 0; i < lov->lls_nr; ++i) {
938                 sub = &lov->lls_sub[i];
939                 if (sub->sub_lock == NULL)
940                         continue;
941                 subobj = sub->sub_descr.cld_obj;
942                 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
943                                            fstart, fend, &start, &end))
944                         continue;
945                 subneed->cld_start = cl_index(subobj, start);
946                 subneed->cld_end   = cl_index(subobj, end);
947                 subneed->cld_obj   = subobj;
948                 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
949                         result = 0;
950                         break;
951                 }
952         }
953         cl_lock_mutex_put(env, lock);
954 }
955 #endif
956
957 /**
958  * Check if the extent region \a descr is covered by \a child against the
959  * specific \a stripe.
960  */
961 static int lov_lock_stripe_is_matching(const struct lu_env *env,
962                                        struct lov_object *lov, int stripe,
963                                        const struct cl_lock_descr *child,
964                                        const struct cl_lock_descr *descr)
965 {
966         struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
967         obd_off start;
968         obd_off end;
969         int result;
970
971         if (lov_r0(lov)->lo_nr == 1)
972                 return cl_lock_ext_match(child, descr);
973
974         /*
975          * For a multi-stripes object:
976          * - make sure the descr only covers child's stripe, and
977          * - check if extent is matching.
978          */
979         start = cl_offset(&lov->lo_cl, descr->cld_start);
980         end   = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
981         result = end - start <= lsm->lsm_stripe_size &&
982                  stripe == lov_stripe_number(lsm, start) &&
983                  stripe == lov_stripe_number(lsm, end);
984         if (result) {
985                 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
986                 obd_off sub_start;
987                 obd_off sub_end;
988
989                 subd->cld_obj  = NULL;   /* don't need sub object at all */
990                 subd->cld_mode = descr->cld_mode;
991                 subd->cld_gid  = descr->cld_gid;
992                 result = lov_stripe_intersects(lsm, stripe, start, end,
993                                                &sub_start, &sub_end);
994                 LASSERT(result);
995                 subd->cld_start = cl_index(child->cld_obj, sub_start);
996                 subd->cld_end   = cl_index(child->cld_obj, sub_end);
997                 result = cl_lock_ext_match(child, subd);
998         }
999         return result;
1000 }
1001
1002 /**
1003  * An implementation of cl_lock_operations::clo_fits_into() method.
1004  *
1005  * Checks whether a lock (given by \a slice) is suitable for \a
1006  * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
1007  * O_APPEND write.
1008  *
1009  * \see ccc_lock_fits_into().
1010  */
1011 static int lov_lock_fits_into(const struct lu_env *env,
1012                               const struct cl_lock_slice *slice,
1013                               const struct cl_lock_descr *need,
1014                               const struct cl_io *io)
1015 {
1016         struct lov_lock   *lov = cl2lov_lock(slice);
1017         struct lov_object *obj = cl2lov(slice->cls_obj);
1018         int result;
1019
1020         LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
1021         LASSERT(lov->lls_nr > 0);
1022
1023         ENTRY;
1024
1025         if (need->cld_mode == CLM_GROUP)
1026                 /*
1027                  * always allow to match group lock.
1028                  */
1029                 result = cl_lock_ext_match(&lov->lls_orig, need);
1030         else if (lov->lls_nr == 1) {
1031                 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
1032                 result = lov_lock_stripe_is_matching(env,
1033                                                      cl2lov(slice->cls_obj),
1034                                                      lov->lls_sub[0].sub_stripe,
1035                                                      got, need);
1036         } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
1037                    !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
1038                 /*
1039                  * Multi-stripe locks are only suitable for `quick' IO and for
1040                  * glimpse.
1041                  */
1042                 result = 0;
1043         else
1044                 /*
1045                  * Most general case: multi-stripe existing lock, and
1046                  * (potentially) multi-stripe @need lock. Check that @need is
1047                  * covered by @lov's sub-locks.
1048                  *
1049                  * For now, ignore lock expansions made by the server, and
1050                  * match against original lock extent.
1051                  */
1052                 result = cl_lock_ext_match(&lov->lls_orig, need);
1053         CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
1054                PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
1055                lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
1056                result);
1057         RETURN(result);
1058 }
1059
1060 void lov_lock_unlink(const struct lu_env *env,
1061                      struct lov_lock_link *link, struct lovsub_lock *sub)
1062 {
1063         struct lov_lock *lck    = link->lll_super;
1064         struct cl_lock  *parent = lck->lls_cl.cls_lock;
1065
1066         LASSERT(cl_lock_is_mutexed(parent));
1067         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1068         ENTRY;
1069
1070         list_del_init(&link->lll_list);
1071         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
1072         /* yank this sub-lock from parent's array */
1073         lck->lls_sub[link->lll_idx].sub_lock = NULL;
1074         LASSERT(lck->lls_nr_filled > 0);
1075         lck->lls_nr_filled--;
1076         lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
1077         cl_lock_put(env, parent);
1078         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
1079         EXIT;
1080 }
1081
1082 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
1083                                          struct lov_lock *lck,
1084                                          struct lovsub_lock *sub)
1085 {
1086         struct lov_lock_link *scan;
1087
1088         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1089         ENTRY;
1090
1091         list_for_each_entry(scan, &sub->lss_parents, lll_list) {
1092                 if (scan->lll_super == lck)
1093                         RETURN(scan);
1094         }
1095         RETURN(NULL);
1096 }
1097
1098 /**
1099  * An implementation of cl_lock_operations::clo_delete() method. This is
1100  * invoked for "top-to-bottom" delete, when lock destruction starts from the
1101  * top-lock, e.g., as a result of inode destruction.
1102  *
1103  * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
1104  * this is done separately elsewhere:
1105  *
1106  *     - for inode destruction, lov_object_delete() calls cl_object_kill() for
1107  *       each sub-object, purging its locks;
1108  *
1109  *     - in other cases (e.g., a fatal error with a top-lock) sub-locks are
1110  *       left in the cache.
1111  */
1112 static void lov_lock_delete(const struct lu_env *env,
1113                             const struct cl_lock_slice *slice)
1114 {
1115         struct lov_lock        *lck     = cl2lov_lock(slice);
1116         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
1117         int i;
1118
1119         LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
1120         ENTRY;
1121
1122         for (i = 0; i < lck->lls_nr; ++i) {
1123                 struct lov_lock_sub *lls;
1124                 struct lovsub_lock  *lsl;
1125                 struct cl_lock      *sublock;
1126                 int rc;
1127
1128                 lls = &lck->lls_sub[i];
1129                 lsl = lls->sub_lock;
1130                 if (lsl == NULL)
1131                         continue;
1132
1133                 sublock = lsl->lss_cl.cls_lock;
1134                 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
1135                 if (rc == 0) {
1136                         if (lls->sub_flags & LSF_HELD)
1137                                 lov_sublock_release(env, lck, i, 1, 0);
1138                         if (sublock->cll_state < CLS_FREEING) {
1139                                 struct lov_lock_link *link;
1140
1141                                 link = lov_lock_link_find(env, lck, lsl);
1142                                 LASSERT(link != NULL);
1143                                 lov_lock_unlink(env, link, lsl);
1144                                 LASSERT(lck->lls_sub[i].sub_lock == NULL);
1145                         }
1146                         lov_sublock_unlock(env, lsl, closure, NULL);
1147                 } else if (rc == CLO_REPEAT) {
1148                         --i; /* repeat with this lock */
1149                 } else {
1150                         CL_LOCK_DEBUG(D_ERROR, env, sublock,
1151                                       "Cannot get sub-lock for delete: %i\n",
1152                                       rc);
1153                 }
1154         }
1155         cl_lock_closure_fini(closure);
1156         EXIT;
1157 }
1158
1159 static int lov_lock_print(const struct lu_env *env, void *cookie,
1160                           lu_printer_t p, const struct cl_lock_slice *slice)
1161 {
1162         struct lov_lock *lck = cl2lov_lock(slice);
1163         int              i;
1164
1165         (*p)(env, cookie, "%d\n", lck->lls_nr);
1166         for (i = 0; i < lck->lls_nr; ++i) {
1167                 struct lov_lock_sub *sub;
1168
1169                 sub = &lck->lls_sub[i];
1170                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_flags);
1171                 if (sub->sub_lock != NULL)
1172                         cl_lock_print(env, cookie, p,
1173                                       sub->sub_lock->lss_cl.cls_lock);
1174                 else
1175                         (*p)(env, cookie, "---\n");
1176         }
1177         return 0;
1178 }
1179
1180 static const struct cl_lock_operations lov_lock_ops = {
1181         .clo_fini      = lov_lock_fini,
1182         .clo_enqueue   = lov_lock_enqueue,
1183         .clo_wait      = lov_lock_wait,
1184         .clo_use       = lov_lock_use,
1185         .clo_unuse     = lov_lock_unuse,
1186         .clo_cancel    = lov_lock_cancel,
1187         .clo_fits_into = lov_lock_fits_into,
1188         .clo_delete    = lov_lock_delete,
1189         .clo_print     = lov_lock_print
1190 };
1191
1192 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1193                         struct cl_lock *lock, const struct cl_io *io)
1194 {
1195         struct lov_lock *lck;
1196         int result;
1197
1198         ENTRY;
1199         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1200         if (lck != NULL) {
1201                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1202                 result = lov_lock_sub_init(env, lck, io);
1203         } else
1204                 result = -ENOMEM;
1205         RETURN(result);
1206 }
1207
1208 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1209                                                struct cl_lock *parent)
1210 {
1211         struct cl_lock_closure *closure;
1212
1213         closure = &lov_env_info(env)->lti_closure;
1214         LASSERT(list_empty(&closure->clc_list));
1215         cl_lock_closure_init(env, closure, parent, 1);
1216         return closure;
1217 }
1218
1219
1220 /** @} lov */