Whamcloud - gitweb
905b6ccdf77c1a032df8af1fcd960a3ad6035f0c
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for LOV layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LOV
42
43 #include "lov_cl_internal.h"
44
45 /** \addtogroup lov lov @{ */
46
47 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
48                                                struct cl_lock *parent);
49
50 /*****************************************************************************
51  *
52  * Lov lock operations.
53  *
54  */
55
56 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
57                                                    struct cl_lock *parent,
58                                                    struct lov_lock_sub *lls)
59 {
60         struct lov_sublock_env *subenv;
61         struct lov_io          *lio    = lov_env_io(env);
62         struct cl_io           *io     = lio->lis_cl.cis_io;
63         struct lov_io_sub      *sub;
64
65         subenv = &lov_env_session(env)->ls_subenv;
66
67         /*
68          * FIXME: We tend to use the subio's env & io to call the sublock
69          * lock operations because osc lock sometimes stores some control
70          * variables in thread's IO infomation(Now only lockless information).
71          * However, if the lock's host(object) is different from the object
72          * for current IO, we have no way to get the subenv and subio because
73          * they are not initialized at all. As a temp fix, in this case,
74          * we still borrow the parent's env to call sublock operations.
75          */
76         if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
77                 subenv->lse_env = env;
78                 subenv->lse_io  = io;
79                 subenv->lse_sub = NULL;
80         } else {
81                 LASSERT(io != NULL);
82                 sub = lov_sub_get(env, lio, lls->sub_stripe);
83                 if (!IS_ERR(sub)) {
84                         subenv->lse_env = sub->sub_env;
85                         subenv->lse_io  = sub->sub_io;
86                         subenv->lse_sub = sub;
87                 } else {
88                         subenv = (void*)sub;
89                 }
90         }
91         return subenv;
92 }
93
94 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
95 {
96         if (subenv && subenv->lse_sub)
97                 lov_sub_put(subenv->lse_sub);
98 }
99
100 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
101                               struct cl_lock *sublock, int idx,
102                               struct lov_lock_link *link)
103 {
104         struct lovsub_lock *lsl;
105         struct cl_lock     *parent = lck->lls_cl.cls_lock;
106         int                 rc;
107
108         LASSERT(cl_lock_is_mutexed(parent));
109         LASSERT(cl_lock_is_mutexed(sublock));
110         ENTRY;
111
112         lsl = cl2sub_lock(sublock);
113         /*
114          * check that sub-lock doesn't have lock link to this top-lock.
115          */
116         LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
117         LASSERT(idx < lck->lls_nr);
118
119         lck->lls_sub[idx].sub_lock = lsl;
120         lck->lls_nr_filled++;
121         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
122         list_add_tail(&link->lll_list, &lsl->lss_parents);
123         link->lll_idx = idx;
124         link->lll_super = lck;
125         cl_lock_get(parent);
126         lu_ref_add(&parent->cll_reference, "lov-child", sublock);
127         lck->lls_sub[idx].sub_flags |= LSF_HELD;
128         cl_lock_user_add(env, sublock);
129
130         rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
131         LASSERT(rc == 0); /* there is no way this can fail, currently */
132         EXIT;
133 }
134
135 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
136                                          const struct cl_io *io,
137                                          struct lov_lock *lck,
138                                          int idx, struct lov_lock_link **out)
139 {
140         struct cl_lock       *sublock;
141         struct cl_lock       *parent;
142         struct lov_lock_link *link;
143
144         LASSERT(idx < lck->lls_nr);
145         ENTRY;
146
147         OBD_SLAB_ALLOC_PTR(link, lov_lock_link_kmem);
148         if (link != NULL) {
149                 struct lov_sublock_env *subenv;
150                 struct lov_lock_sub  *lls;
151                 struct cl_lock_descr *descr;
152
153                 parent = lck->lls_cl.cls_lock;
154                 lls    = &lck->lls_sub[idx];
155                 descr  = &lls->sub_descr;
156
157                 subenv = lov_sublock_env_get(env, parent, lls);
158                 if (!IS_ERR(subenv)) {
159                         /* CAVEAT: Don't try to add a field in lov_lock_sub
160                          * to remember the subio. This is because lock is able
161                          * to be cached, but this is not true for IO. This
162                          * further means a sublock might be referenced in
163                          * different io context. -jay */
164
165                         sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
166                                                descr, "lov-parent", parent);
167                         lov_sublock_env_put(subenv);
168                 } else {
169                         /* error occurs. */
170                         sublock = (void*)subenv;
171                 }
172
173                 if (!IS_ERR(sublock))
174                         *out = link;
175                 else
176                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
177         } else
178                 sublock = ERR_PTR(-ENOMEM);
179         RETURN(sublock);
180 }
181
182 static void lov_sublock_unlock(const struct lu_env *env,
183                                struct lovsub_lock *lsl,
184                                struct cl_lock_closure *closure,
185                                struct lov_sublock_env *subenv)
186 {
187         ENTRY;
188         lov_sublock_env_put(subenv);
189         lsl->lss_active = NULL;
190         cl_lock_disclosure(env, closure);
191         EXIT;
192 }
193
194 static int lov_sublock_lock(const struct lu_env *env,
195                             struct lov_lock_sub *lls,
196                             struct cl_lock_closure *closure,
197                             struct lov_sublock_env **lsep)
198 {
199         struct cl_lock *child;
200         int             result = 0;
201         ENTRY;
202
203         LASSERT(list_empty(&closure->clc_list));
204
205         child = lls->sub_lock->lss_cl.cls_lock;
206         result = cl_lock_closure_build(env, child, closure);
207         if (result == 0) {
208                 struct cl_lock *parent = closure->clc_origin;
209
210                 LASSERT(cl_lock_is_mutexed(child));
211                 lls->sub_lock->lss_active = parent;
212
213                 if (lsep) {
214                         struct lov_sublock_env *subenv;
215                         subenv = lov_sublock_env_get(env, parent, lls);
216                         if (IS_ERR(subenv)) {
217                                 lov_sublock_unlock(env, lls->sub_lock,
218                                                    closure, NULL);
219                                 result = PTR_ERR(subenv);
220                         } else {
221                                 *lsep = subenv;
222                         }
223                 }
224         }
225         RETURN(result);
226 }
227
228 /**
229  * Updates the result of a top-lock operation from a result of sub-lock
230  * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
231  * over sub-locks and lov_subresult() is used to calculate return value of a
232  * top-operation. To this end, possible return values of sub-operations are
233  * ordered as
234  *
235  *     - 0                  success
236  *     - CLO_WAIT           wait for event
237  *     - CLO_REPEAT         repeat top-operation
238  *     - -ne                fundamental error
239  *
240  * Top-level return code can only go down through this list. CLO_REPEAT
241  * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
242  * has to be rechecked by the upper layer.
243  */
244 static int lov_subresult(int result, int rc)
245 {
246         int result_rank;
247         int rc_rank;
248
249         LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
250         LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
251         CLASSERT(CLO_WAIT < CLO_REPEAT);
252
253         ENTRY;
254
255         /* calculate ranks in the ordering above */
256         result_rank = result < 0 ? 1 + CLO_REPEAT : result;
257         rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
258
259         if (result_rank < rc_rank)
260                 result = rc;
261         RETURN(result);
262 }
263
264 /**
265  * Creates sub-locks for a given lov_lock for the first time.
266  *
267  * Goes through all sub-objects of top-object, and creates sub-locks on every
268  * sub-object intersecting with top-lock extent. This is complicated by the
269  * fact that top-lock (that is being created) can be accessed concurrently
270  * through already created sub-locks (possibly shared with other top-locks).
271  */
272 static int lov_lock_sub_init(const struct lu_env *env,
273                              struct lov_lock *lck, const struct cl_io *io)
274 {
275         int result = 0;
276         int i;
277         int j;
278         int nr;
279         int stripe;
280         int start_stripe;
281         obd_off start;
282         obd_off end;
283         obd_off file_start;
284         obd_off file_end;
285
286         struct lov_object       *loo    = cl2lov(lck->lls_cl.cls_obj);
287         struct lov_layout_raid0 *r0     = lov_r0(loo);
288         struct cl_lock          *parent = lck->lls_cl.cls_lock;
289
290         ENTRY;
291
292         lck->lls_orig = parent->cll_descr;
293         file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
294         file_end   = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
295
296         start_stripe = lov_stripe_number(r0->lo_lsm, file_start);
297         for (i = 0, nr = 0; i < r0->lo_nr; i++) {
298                 /*
299                  * XXX for wide striping smarter algorithm is desirable,
300                  * breaking out of the loop, early.
301                  */
302                 stripe = (start_stripe + i) % r0->lo_nr;
303                 if (lov_stripe_intersects(r0->lo_lsm, stripe,
304                                           file_start, file_end, &start, &end))
305                         nr++;
306         }
307         LASSERT(nr > 0);
308         OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
309         if (lck->lls_sub == NULL)
310                 RETURN(-ENOMEM);
311
312         lck->lls_nr = nr;
313         /*
314          * First, fill in sub-lock descriptions in
315          * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
316          * (called below in this function, and by lov_lock_enqueue()) to
317          * create sub-locks. At this moment, no other thread can access
318          * top-lock.
319          */
320         for (j = 0, nr = 0; j < i; ++j) {
321                 stripe = (start_stripe + j) % r0->lo_nr;
322                 if (lov_stripe_intersects(r0->lo_lsm, stripe,
323                                           file_start, file_end, &start, &end)) {
324                         struct cl_lock_descr *descr;
325
326                         descr = &lck->lls_sub[nr].sub_descr;
327
328                         LASSERT(descr->cld_obj == NULL);
329                         descr->cld_obj   = lovsub2cl(r0->lo_sub[stripe]);
330                         descr->cld_start = cl_index(descr->cld_obj, start);
331                         descr->cld_end   = cl_index(descr->cld_obj, end);
332                         descr->cld_mode  = parent->cll_descr.cld_mode;
333                         lck->lls_sub[nr].sub_got = *descr;
334                         lck->lls_sub[nr].sub_stripe = stripe;
335                         nr++;
336                 }
337         }
338         LASSERT(nr == lck->lls_nr);
339         /*
340          * Then, create sub-locks. Once at least one sub-lock was created,
341          * top-lock can be reached by other threads.
342          */
343         for (i = 0; i < lck->lls_nr; ++i) {
344                 struct cl_lock       *sublock;
345                 struct lov_lock_link *link;
346
347                 if (lck->lls_sub[i].sub_lock == NULL) {
348                         sublock = lov_sublock_alloc(env, io, lck, i, &link);
349                         if (IS_ERR(sublock)) {
350                                 result = PTR_ERR(sublock);
351                                 break;
352                         }
353                         cl_lock_mutex_get(env, sublock);
354                         cl_lock_mutex_get(env, parent);
355                         /*
356                          * recheck under mutex that sub-lock wasn't created
357                          * concurrently, and that top-lock is still alive.
358                          */
359                         if (lck->lls_sub[i].sub_lock == NULL &&
360                             parent->cll_state < CLS_FREEING) {
361                                 lov_sublock_adopt(env, lck, sublock, i, link);
362                                 cl_lock_mutex_put(env, parent);
363                         } else {
364                                 cl_lock_mutex_put(env, parent);
365                                 cl_lock_unhold(env, sublock,
366                                                "lov-parent", parent);
367                         }
368                         cl_lock_mutex_put(env, sublock);
369                 }
370         }
371         /*
372          * Some sub-locks can be missing at this point. This is not a problem,
373          * because enqueue will create them anyway. Main duty of this function
374          * is to fill in sub-lock descriptions in a race free manner.
375          */
376         RETURN(result);
377 }
378
379 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
380                                int i, int deluser, int rc)
381 {
382         struct cl_lock *parent = lck->lls_cl.cls_lock;
383
384         LASSERT(cl_lock_is_mutexed(parent));
385         ENTRY;
386
387         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
388                 struct cl_lock    *sublock;
389                 int dying;
390
391                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
392                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
393                 LASSERT(cl_lock_is_mutexed(sublock));
394
395                 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
396                 if (deluser)
397                         cl_lock_user_del(env, sublock);
398                 /*
399                  * If the last hold is released, and cancellation is pending
400                  * for a sub-lock, release parent mutex, to avoid keeping it
401                  * while sub-lock is being paged out.
402                  */
403                 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
404                          (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
405                         sublock->cll_holds == 1;
406                 if (dying)
407                         cl_lock_mutex_put(env, parent);
408                 cl_lock_unhold(env, sublock, "lov-parent", parent);
409                 if (dying) {
410                         cl_lock_mutex_get(env, parent);
411                         rc = lov_subresult(rc, CLO_REPEAT);
412                 }
413                 /*
414                  * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
415                  * not backed by a reference on a
416                  * sub-lock. lovsub_lock_delete() will clear
417                  * lck->lls_sub[i].sub_lock under semaphores, just before
418                  * sub-lock is destroyed.
419                  */
420         }
421         RETURN(rc);
422 }
423
424 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
425                              int i)
426 {
427         struct cl_lock *parent = lck->lls_cl.cls_lock;
428
429         LASSERT(cl_lock_is_mutexed(parent));
430         ENTRY;
431
432         if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
433                 struct cl_lock *sublock;
434
435                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
436                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
437                 LASSERT(cl_lock_is_mutexed(sublock));
438                 LASSERT(sublock->cll_state != CLS_FREEING);
439
440                 lck->lls_sub[i].sub_flags |= LSF_HELD;
441
442                 cl_lock_get_trust(sublock);
443                 cl_lock_hold_add(env, sublock, "lov-parent", parent);
444                 cl_lock_user_add(env, sublock);
445                 cl_lock_put(env, sublock);
446         }
447         EXIT;
448 }
449
450 static void lov_lock_fini(const struct lu_env *env,
451                           struct cl_lock_slice *slice)
452 {
453         struct lov_lock *lck;
454         int i;
455
456         ENTRY;
457         lck = cl2lov_lock(slice);
458         LASSERT(lck->lls_nr_filled == 0);
459         if (lck->lls_sub != NULL) {
460                 for (i = 0; i < lck->lls_nr; ++i)
461                         /*
462                          * No sub-locks exists at this point, as sub-lock has
463                          * a reference on its parent.
464                          */
465                         LASSERT(lck->lls_sub[i].sub_lock == NULL);
466                 OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
467         }
468         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
469         EXIT;
470 }
471
472 /**
473  * Tries to advance a state machine of a given sub-lock toward enqueuing of
474  * the top-lock.
475  *
476  * \retval 0 if state-transition can proceed
477  * \retval -ve otherwise.
478  */
479 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
480                                 struct cl_lock *sublock,
481                                 struct cl_io *io, __u32 enqflags, int last)
482 {
483         int result;
484         ENTRY;
485
486         /* first, try to enqueue a sub-lock ... */
487         result = cl_enqueue_try(env, sublock, io, enqflags);
488         if (sublock->cll_state == CLS_ENQUEUED)
489                 /* if it is enqueued, try to `wait' on it---maybe it's already
490                  * granted */
491                 result = cl_wait_try(env, sublock);
492         /*
493          * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
494          * parallel, otherwise---enqueue has to wait until sub-lock is granted
495          * before proceeding to the next one.
496          */
497         if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
498             enqflags & CEF_ASYNC && !last)
499                 result = 0;
500         RETURN(result);
501 }
502
503 /**
504  * Helper function for lov_lock_enqueue() that creates missing sub-lock.
505  */
506 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
507                             struct cl_io *io, struct lov_lock *lck, int idx)
508 {
509         struct lov_lock_link *link;
510         struct cl_lock       *sublock;
511         int                   result;
512
513         LASSERT(parent->cll_depth == 1);
514         cl_lock_mutex_put(env, parent);
515         sublock = lov_sublock_alloc(env, io, lck, idx, &link);
516         if (!IS_ERR(sublock))
517                 cl_lock_mutex_get(env, sublock);
518         cl_lock_mutex_get(env, parent);
519
520         if (!IS_ERR(sublock)) {
521                 if (parent->cll_state == CLS_QUEUING &&
522                     lck->lls_sub[idx].sub_lock == NULL)
523                         lov_sublock_adopt(env, lck, sublock, idx, link);
524                 else {
525                         /* other thread allocated sub-lock, or enqueue is no
526                          * longer going on */
527                         cl_lock_mutex_put(env, parent);
528                         cl_lock_unhold(env, sublock, "lov-parent", parent);
529                         cl_lock_mutex_get(env, parent);
530                 }
531                 cl_lock_mutex_put(env, sublock);
532                 result = CLO_REPEAT;
533         } else
534                 result = PTR_ERR(sublock);
535         return result;
536 }
537
538 /**
539  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
540  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
541  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
542  * state machines in the face of sub-locks sharing (by multiple top-locks),
543  * and concurrent sub-lock cancellations.
544  */
545 static int lov_lock_enqueue(const struct lu_env *env,
546                             const struct cl_lock_slice *slice,
547                             struct cl_io *io, __u32 enqflags)
548 {
549         struct cl_lock         *lock    = slice->cls_lock;
550         struct lov_lock        *lck     = cl2lov_lock(slice);
551         struct cl_lock_closure *closure = lov_closure_get(env, lock);
552         int i;
553         int result;
554         enum cl_lock_state minstate;
555
556         ENTRY;
557
558         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
559                 int rc;
560                 struct lovsub_lock     *sub;
561                 struct lov_lock_sub    *lls;
562                 struct cl_lock         *sublock;
563                 struct lov_sublock_env *subenv;
564
565                 if (lock->cll_state != CLS_QUEUING) {
566                         /*
567                          * Lock might have left QUEUING state if previous
568                          * iteration released its mutex. Stop enqueing in this
569                          * case and let the upper layer to decide what to do.
570                          */
571                         LASSERT(i > 0 && result != 0);
572                         break;
573                 }
574
575                 lls = &lck->lls_sub[i];
576                 sub = lls->sub_lock;
577                 /*
578                  * Sub-lock might have been canceled, while top-lock was
579                  * cached.
580                  */
581                 if (sub == NULL) {
582                         result = lov_sublock_fill(env, lock, io, lck, i);
583                         /* lov_sublock_fill() released @lock mutex,
584                          * restart. */
585                         break;
586                 }
587                 sublock = sub->lss_cl.cls_lock;
588                 rc = lov_sublock_lock(env, lls, closure, &subenv);
589                 if (rc == 0) {
590                         lov_sublock_hold(env, lck, i);
591                         rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
592                                                   subenv->lse_io, enqflags,
593                                                   i == lck->lls_nr - 1);
594                         minstate = min(minstate, sublock->cll_state);
595                         /*
596                          * Don't hold a sub-lock in CLS_CACHED state, see
597                          * description for lov_lock::lls_sub.
598                          */
599                         if (sublock->cll_state > CLS_HELD)
600                                 rc = lov_sublock_release(env, lck, i, 1, rc);
601                         lov_sublock_unlock(env, sub, closure, subenv);
602                 }
603                 result = lov_subresult(result, rc);
604                 if (result < 0)
605                         break;
606         }
607         cl_lock_closure_fini(closure);
608         RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
609 }
610
611 static int lov_lock_unuse(const struct lu_env *env,
612                           const struct cl_lock_slice *slice)
613 {
614         struct lov_lock        *lck     = cl2lov_lock(slice);
615         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
616         int i;
617         int result;
618
619         ENTRY;
620
621         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
622                 int rc;
623                 struct lovsub_lock     *sub;
624                 struct cl_lock         *sublock;
625                 struct lov_lock_sub    *lls;
626                 struct lov_sublock_env *subenv;
627
628                 /* top-lock state cannot change concurrently, because single
629                  * thread (one that released the last hold) carries unlocking
630                  * to the completion. */
631                 LASSERT(slice->cls_lock->cll_state == CLS_UNLOCKING);
632                 lls = &lck->lls_sub[i];
633                 sub = lls->sub_lock;
634                 if (sub == NULL)
635                         continue;
636
637                 sublock = sub->lss_cl.cls_lock;
638                 rc = lov_sublock_lock(env, lls, closure, &subenv);
639                 if (rc == 0) {
640                         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
641                                 LASSERT(sublock->cll_state == CLS_HELD);
642                                 rc = cl_unuse_try(subenv->lse_env, sublock);
643                                 if (rc != CLO_WAIT)
644                                         rc = lov_sublock_release(env, lck,
645                                                                  i, 0, rc);
646                         }
647                         lov_sublock_unlock(env, sub, closure, subenv);
648                 }
649                 result = lov_subresult(result, rc);
650                 if (result < 0)
651                         break;
652         }
653         if (result == 0 && lck->lls_unuse_race) {
654                 lck->lls_unuse_race = 0;
655                 result = -ESTALE;
656         }
657         cl_lock_closure_fini(closure);
658         RETURN(result);
659 }
660
661 static int lov_lock_wait(const struct lu_env *env,
662                          const struct cl_lock_slice *slice)
663 {
664         struct lov_lock        *lck     = cl2lov_lock(slice);
665         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
666         enum cl_lock_state      minstate;
667         int                     result;
668         int                     i;
669
670         ENTRY;
671
672         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
673                 int rc;
674                 struct lovsub_lock     *sub;
675                 struct cl_lock         *sublock;
676                 struct lov_lock_sub    *lls;
677                 struct lov_sublock_env *subenv;
678
679                 lls = &lck->lls_sub[i];
680                 sub = lls->sub_lock;
681                 LASSERT(sub != NULL);
682                 sublock = sub->lss_cl.cls_lock;
683                 rc = lov_sublock_lock(env, lls, closure, &subenv);
684                 if (rc == 0) {
685                         LASSERT(sublock->cll_state >= CLS_ENQUEUED);
686                         if (sublock->cll_state < CLS_HELD)
687                                 rc = cl_wait_try(env, sublock);
688
689                         minstate = min(minstate, sublock->cll_state);
690                         lov_sublock_unlock(env, sub, closure, subenv);
691                 }
692                 result = lov_subresult(result, rc);
693                 if (result < 0)
694                         break;
695         }
696         cl_lock_closure_fini(closure);
697         RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
698 }
699
700 static int lov_lock_use(const struct lu_env *env,
701                         const struct cl_lock_slice *slice)
702 {
703         struct lov_lock        *lck     = cl2lov_lock(slice);
704         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
705         int                     result;
706         int                     i;
707
708         LASSERT(slice->cls_lock->cll_state == CLS_CACHED);
709         ENTRY;
710
711         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
712                 int rc;
713                 struct lovsub_lock     *sub;
714                 struct cl_lock         *sublock;
715                 struct lov_lock_sub    *lls;
716                 struct lov_sublock_env *subenv;
717
718                 if (slice->cls_lock->cll_state != CLS_CACHED) {
719                         /* see comment in lov_lock_enqueue(). */
720                         LASSERT(i > 0 && result != 0);
721                         break;
722                 }
723                 /*
724                  * if a sub-lock was destroyed while top-lock was in
725                  * CLS_CACHED state, top-lock would have been moved into
726                  * CLS_NEW state, so all sub-locks have to be in place.
727                  */
728                 lls = &lck->lls_sub[i];
729                 sub = lls->sub_lock;
730                 LASSERT(sub != NULL);
731                 sublock = sub->lss_cl.cls_lock;
732                 rc = lov_sublock_lock(env, lls, closure, &subenv);
733                 if (rc == 0) {
734                         LASSERT(sublock->cll_state != CLS_FREEING);
735                         lov_sublock_hold(env, lck, i);
736                         if (sublock->cll_state == CLS_CACHED) {
737                                 rc = cl_use_try(subenv->lse_env, sublock);
738                                 if (rc != 0)
739                                         rc = lov_sublock_release(env, lck,
740                                                                  i, 1, rc);
741                         } else
742                                 rc = 0;
743                         lov_sublock_unlock(env, sub, closure, subenv);
744                 }
745                 result = lov_subresult(result, rc);
746                 if (result < 0)
747                         break;
748         }
749         cl_lock_closure_fini(closure);
750         RETURN(result);
751 }
752
753 #if 0
754 static int lock_lock_multi_match()
755 {
756         struct cl_lock          *lock    = slice->cls_lock;
757         struct cl_lock_descr    *subneed = &lov_env_info(env)->lti_ldescr;
758         struct lov_object       *loo     = cl2lov(lov->lls_cl.cls_obj);
759         struct lov_layout_raid0 *r0      = lov_r0(loo);
760         struct lov_lock_sub     *sub;
761         struct cl_object        *subobj;
762         obd_off  fstart;
763         obd_off  fend;
764         obd_off  start;
765         obd_off  end;
766         int i;
767
768         fstart = cl_offset(need->cld_obj, need->cld_start);
769         fend   = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
770         subneed->cld_mode = need->cld_mode;
771         cl_lock_mutex_get(env, lock);
772         for (i = 0; i < lov->lls_nr; ++i) {
773                 sub = &lov->lls_sub[i];
774                 if (sub->sub_lock == NULL)
775                         continue;
776                 subobj = sub->sub_descr.cld_obj;
777                 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
778                                            fstart, fend, &start, &end))
779                         continue;
780                 subneed->cld_start = cl_index(subobj, start);
781                 subneed->cld_end   = cl_index(subobj, end);
782                 subneed->cld_obj   = subobj;
783                 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
784                         result = 0;
785                         break;
786                 }
787         }
788         cl_lock_mutex_put(env, lock);
789 }
790 #endif
791
792 static int lov_is_same_stripe(struct lov_object *lov, int stripe,
793                               const struct cl_lock_descr *descr)
794 {
795         struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
796         obd_off start;
797         obd_off end;
798
799         start = cl_offset(&lov->lo_cl, descr->cld_start);
800         end   = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
801         return
802                 end - start <= lsm->lsm_stripe_size &&
803                 stripe == lov_stripe_number(lsm, start) &&
804                 stripe == lov_stripe_number(lsm, end);
805 }
806
807 /**
808  * An implementation of cl_lock_operations::clo_fits_into() method.
809  *
810  * Checks whether a lock (given by \a slice) is suitable for \a
811  * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
812  * O_APPEND write.
813  *
814  * \see ccc_lock_fits_into().
815  */
816 static int lov_lock_fits_into(const struct lu_env *env,
817                               const struct cl_lock_slice *slice,
818                               const struct cl_lock_descr *need,
819                               const struct cl_io *io)
820 {
821         struct lov_lock   *lov = cl2lov_lock(slice);
822         struct lov_object *obj = cl2lov(slice->cls_obj);
823         int result;
824
825         LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
826         LASSERT(lov->lls_nr > 0);
827
828         ENTRY;
829
830         if (lov->lls_nr == 1) {
831                 /*
832                  * If a lock is on a single stripe, it's enough to check that
833                  * @need lock matches actually granted stripe lock, and...
834                  */
835                 result = cl_lock_ext_match(&lov->lls_sub[0].sub_got, need);
836                 if (result && lov_r0(obj)->lo_nr > 1)
837                         /*
838                          * ... @need is on the same stripe, if multiple
839                          * stripes are possible at all for this object.
840                          */
841                         result = lov_is_same_stripe(cl2lov(slice->cls_obj),
842                                                     lov->lls_sub[0].sub_stripe,
843                                                     need);
844         } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
845                    !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
846                 /*
847                  * Multi-stripe locks are only suitable for `quick' IO and for
848                  * glimpse.
849                  */
850                 result = 0;
851         else
852                 /*
853                  * Most general case: multi-stripe existing lock, and
854                  * (potentially) multi-stripe @need lock. Check that @need is
855                  * covered by @lov's sub-locks.
856                  *
857                  * For now, ignore lock expansions made by the server, and
858                  * match against original lock extent.
859                  */
860                 result = cl_lock_ext_match(&lov->lls_orig, need);
861         CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
862                PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
863                lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
864                result);
865         RETURN(result);
866 }
867
868 void lov_lock_unlink(const struct lu_env *env,
869                      struct lov_lock_link *link, struct lovsub_lock *sub)
870 {
871         struct lov_lock *lck    = link->lll_super;
872         struct cl_lock  *parent = lck->lls_cl.cls_lock;
873
874         LASSERT(cl_lock_is_mutexed(parent));
875         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
876         ENTRY;
877
878         list_del_init(&link->lll_list);
879         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
880         /* yank this sub-lock from parent's array */
881         lck->lls_sub[link->lll_idx].sub_lock = NULL;
882         LASSERT(lck->lls_nr_filled > 0);
883         lck->lls_nr_filled--;
884         lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
885         cl_lock_put(env, parent);
886         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
887         EXIT;
888 }
889
890 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
891                                          struct lov_lock *lck,
892                                          struct lovsub_lock *sub)
893 {
894         struct lov_lock_link *scan;
895
896         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
897         ENTRY;
898
899         list_for_each_entry(scan, &sub->lss_parents, lll_list) {
900                 if (scan->lll_super == lck)
901                         RETURN(scan);
902         }
903         RETURN(NULL);
904 }
905
906 /**
907  * An implementation of cl_lock_operations::clo_delete() method. This is
908  * invoked for "top-to-bottom" delete, when lock destruction starts from the
909  * top-lock, e.g., as a result of inode destruction.
910  *
911  * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
912  * this is done separately elsewhere:
913  *
914  *     - for inode destruction, lov_object_delete() calls cl_object_kill() for
915  *       each sub-object, purging its locks;
916  *
917  *     - in other cases (e.g., a fatal error with a top-lock) sub-locks are
918  *       left in the cache.
919  */
920 static void lov_lock_delete(const struct lu_env *env,
921                             const struct cl_lock_slice *slice)
922 {
923         struct lov_lock        *lck     = cl2lov_lock(slice);
924         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
925         int i;
926
927         LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
928         ENTRY;
929
930         for (i = 0; i < lck->lls_nr; ++i) {
931                 struct lov_lock_sub *lls;
932                 struct lovsub_lock  *lsl;
933                 struct cl_lock      *sublock;
934                 int rc;
935
936                 lls = &lck->lls_sub[i];
937                 lsl = lls->sub_lock;
938                 if (lsl == NULL)
939                         continue;
940
941                 sublock = lsl->lss_cl.cls_lock;
942                 rc = lov_sublock_lock(env, lls, closure, NULL);
943                 if (rc == 0) {
944                         if (lck->lls_sub[i].sub_flags & LSF_HELD)
945                                 lov_sublock_release(env, lck, i, 1, 0);
946                         if (sublock->cll_state < CLS_FREEING) {
947                                 struct lov_lock_link *link;
948
949                                 link = lov_lock_link_find(env, lck, lsl);
950                                 LASSERT(link != NULL);
951                                 lov_lock_unlink(env, link, lsl);
952                                 LASSERT(lck->lls_sub[i].sub_lock == NULL);
953                         }
954                         lov_sublock_unlock(env, lsl, closure, NULL);
955                 } else if (rc == CLO_REPEAT) {
956                         --i; /* repeat with this lock */
957                 } else {
958                         CL_LOCK_DEBUG(D_ERROR, env, sublock,
959                                       "Cannot get sub-lock for delete: %i\n",
960                                       rc);
961                 }
962         }
963         cl_lock_closure_fini(closure);
964         EXIT;
965 }
966
967 static int lov_lock_print(const struct lu_env *env, void *cookie,
968                           lu_printer_t p, const struct cl_lock_slice *slice)
969 {
970         struct lov_lock *lck = cl2lov_lock(slice);
971         int              i;
972
973         (*p)(env, cookie, "%d\n", lck->lls_nr);
974         for (i = 0; i < lck->lls_nr; ++i) {
975                 struct lov_lock_sub *sub;
976
977                 sub = &lck->lls_sub[i];
978                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_flags);
979                 if (sub->sub_lock != NULL)
980                         cl_lock_print(env, cookie, p,
981                                       sub->sub_lock->lss_cl.cls_lock);
982                 else
983                         (*p)(env, cookie, "---\n");
984         }
985         return 0;
986 }
987
988 static const struct cl_lock_operations lov_lock_ops = {
989         .clo_fini      = lov_lock_fini,
990         .clo_enqueue   = lov_lock_enqueue,
991         .clo_wait      = lov_lock_wait,
992         .clo_use       = lov_lock_use,
993         .clo_unuse     = lov_lock_unuse,
994         .clo_fits_into = lov_lock_fits_into,
995         .clo_delete    = lov_lock_delete,
996         .clo_print     = lov_lock_print
997 };
998
999 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1000                         struct cl_lock *lock, const struct cl_io *io)
1001 {
1002         struct lov_lock *lck;
1003         int result;
1004
1005         ENTRY;
1006         OBD_SLAB_ALLOC_PTR(lck, lov_lock_kmem);
1007         if (lck != NULL) {
1008                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1009                 result = lov_lock_sub_init(env, lck, io);
1010         } else
1011                 result = -ENOMEM;
1012         RETURN(result);
1013 }
1014
1015 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1016                                                struct cl_lock *parent)
1017 {
1018         struct cl_lock_closure *closure;
1019
1020         closure = &lov_env_info(env)->lti_closure;
1021         LINVRNT(list_empty(&closure->clc_list));
1022         cl_lock_closure_init(env, closure, parent, 1);
1023         return closure;
1024 }
1025
1026
1027 /** @} lov */