Whamcloud - gitweb
e2b1520a712c177a2bd83ddf7e46456479811819
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for LOV layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LOV
42
43 #include "lov_cl_internal.h"
44
45 /** \addtogroup lov
46  *  @{
47  */
48
49 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
50                                                struct cl_lock *parent);
51
52 /*****************************************************************************
53  *
54  * Lov lock operations.
55  *
56  */
57
58 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
59                                                    struct cl_lock *parent,
60                                                    struct lov_lock_sub *lls)
61 {
62         struct lov_sublock_env *subenv;
63         struct lov_io          *lio    = lov_env_io(env);
64         struct cl_io           *io     = lio->lis_cl.cis_io;
65         struct lov_io_sub      *sub;
66
67         subenv = &lov_env_session(env)->ls_subenv;
68
69         /*
70          * FIXME: We tend to use the subio's env & io to call the sublock
71          * lock operations because osc lock sometimes stores some control
72          * variables in thread's IO infomation(Now only lockless information).
73          * However, if the lock's host(object) is different from the object
74          * for current IO, we have no way to get the subenv and subio because
75          * they are not initialized at all. As a temp fix, in this case,
76          * we still borrow the parent's env to call sublock operations.
77          */
78         if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
79                 subenv->lse_env = env;
80                 subenv->lse_io  = io;
81                 subenv->lse_sub = NULL;
82         } else {
83                 sub = lov_sub_get(env, lio, lls->sub_stripe);
84                 if (!IS_ERR(sub)) {
85                         subenv->lse_env = sub->sub_env;
86                         subenv->lse_io  = sub->sub_io;
87                         subenv->lse_sub = sub;
88                 } else {
89                         subenv = (void*)sub;
90                 }
91         }
92         return subenv;
93 }
94
95 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
96 {
97         if (subenv && subenv->lse_sub)
98                 lov_sub_put(subenv->lse_sub);
99 }
100
101 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
102                               struct cl_lock *sublock, int idx,
103                               struct lov_lock_link *link)
104 {
105         struct lovsub_lock *lsl;
106         struct cl_lock     *parent = lck->lls_cl.cls_lock;
107         int                 rc;
108
109         LASSERT(cl_lock_is_mutexed(parent));
110         LASSERT(cl_lock_is_mutexed(sublock));
111         ENTRY;
112
113         lsl = cl2sub_lock(sublock);
114         /*
115          * check that sub-lock doesn't have lock link to this top-lock.
116          */
117         LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
118         LASSERT(idx < lck->lls_nr);
119
120         lck->lls_sub[idx].sub_lock = lsl;
121         lck->lls_nr_filled++;
122         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
123         list_add_tail(&link->lll_list, &lsl->lss_parents);
124         link->lll_idx = idx;
125         link->lll_super = lck;
126         cl_lock_get(parent);
127         lu_ref_add(&parent->cll_reference, "lov-child", sublock);
128         lck->lls_sub[idx].sub_flags |= LSF_HELD;
129         cl_lock_user_add(env, sublock);
130
131         rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
132         LASSERT(rc == 0); /* there is no way this can fail, currently */
133         EXIT;
134 }
135
136 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
137                                          const struct cl_io *io,
138                                          struct lov_lock *lck,
139                                          int idx, struct lov_lock_link **out)
140 {
141         struct cl_lock       *sublock;
142         struct cl_lock       *parent;
143         struct lov_lock_link *link;
144
145         LASSERT(idx < lck->lls_nr);
146         ENTRY;
147
148         OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
149         if (link != NULL) {
150                 struct lov_sublock_env *subenv;
151                 struct lov_lock_sub  *lls;
152                 struct cl_lock_descr *descr;
153
154                 parent = lck->lls_cl.cls_lock;
155                 lls    = &lck->lls_sub[idx];
156                 descr  = &lls->sub_descr;
157
158                 subenv = lov_sublock_env_get(env, parent, lls);
159                 if (!IS_ERR(subenv)) {
160                         /* CAVEAT: Don't try to add a field in lov_lock_sub
161                          * to remember the subio. This is because lock is able
162                          * to be cached, but this is not true for IO. This
163                          * further means a sublock might be referenced in
164                          * different io context. -jay */
165
166                         sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
167                                                descr, "lov-parent", parent);
168                         lov_sublock_env_put(subenv);
169                 } else {
170                         /* error occurs. */
171                         sublock = (void*)subenv;
172                 }
173
174                 if (!IS_ERR(sublock))
175                         *out = link;
176                 else
177                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
178         } else
179                 sublock = ERR_PTR(-ENOMEM);
180         RETURN(sublock);
181 }
182
183 static void lov_sublock_unlock(const struct lu_env *env,
184                                struct lovsub_lock *lsl,
185                                struct cl_lock_closure *closure,
186                                struct lov_sublock_env *subenv)
187 {
188         ENTRY;
189         lov_sublock_env_put(subenv);
190         lsl->lss_active = NULL;
191         cl_lock_disclosure(env, closure);
192         EXIT;
193 }
194
195 static int lov_sublock_lock(const struct lu_env *env,
196                             struct lov_lock *lck,
197                             struct lov_lock_sub *lls,
198                             struct cl_lock_closure *closure,
199                             struct lov_sublock_env **lsep)
200 {
201         struct lovsub_lock *sublock;
202         struct cl_lock     *child;
203         int                 result = 0;
204         ENTRY;
205
206         LASSERT(list_empty(&closure->clc_list));
207
208         sublock = lls->sub_lock;
209         child = sublock->lss_cl.cls_lock;
210         result = cl_lock_closure_build(env, child, closure);
211         if (result == 0) {
212                 struct cl_lock *parent = closure->clc_origin;
213
214                 LASSERT(cl_lock_is_mutexed(child));
215                 sublock->lss_active = parent;
216
217                 if (unlikely(child->cll_state == CLS_FREEING)) {
218                         struct lov_lock_link *link;
219                         /*
220                          * we could race with lock deletion which temporarily
221                          * put the lock in freeing state, bug 19080.
222                          */
223                         LASSERT(!(lls->sub_flags & LSF_HELD));
224
225                         link = lov_lock_link_find(env, lck, sublock);
226                         LASSERT(link != NULL);
227                         lov_lock_unlink(env, link, sublock);
228                         lov_sublock_unlock(env, sublock, closure, NULL);
229                         result = CLO_REPEAT;
230                 } else if (lsep) {
231                         struct lov_sublock_env *subenv;
232                         subenv = lov_sublock_env_get(env, parent, lls);
233                         if (IS_ERR(subenv)) {
234                                 lov_sublock_unlock(env, sublock,
235                                                    closure, NULL);
236                                 result = PTR_ERR(subenv);
237                         } else {
238                                 *lsep = subenv;
239                         }
240                 }
241         }
242         RETURN(result);
243 }
244
245 /**
246  * Updates the result of a top-lock operation from a result of sub-lock
247  * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
248  * over sub-locks and lov_subresult() is used to calculate return value of a
249  * top-operation. To this end, possible return values of sub-operations are
250  * ordered as
251  *
252  *     - 0                  success
253  *     - CLO_WAIT           wait for event
254  *     - CLO_REPEAT         repeat top-operation
255  *     - -ne                fundamental error
256  *
257  * Top-level return code can only go down through this list. CLO_REPEAT
258  * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
259  * has to be rechecked by the upper layer.
260  */
261 static int lov_subresult(int result, int rc)
262 {
263         int result_rank;
264         int rc_rank;
265
266         LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
267         LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
268         CLASSERT(CLO_WAIT < CLO_REPEAT);
269
270         ENTRY;
271
272         /* calculate ranks in the ordering above */
273         result_rank = result < 0 ? 1 + CLO_REPEAT : result;
274         rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
275
276         if (result_rank < rc_rank)
277                 result = rc;
278         RETURN(result);
279 }
280
281 /**
282  * Creates sub-locks for a given lov_lock for the first time.
283  *
284  * Goes through all sub-objects of top-object, and creates sub-locks on every
285  * sub-object intersecting with top-lock extent. This is complicated by the
286  * fact that top-lock (that is being created) can be accessed concurrently
287  * through already created sub-locks (possibly shared with other top-locks).
288  */
289 static int lov_lock_sub_init(const struct lu_env *env,
290                              struct lov_lock *lck, const struct cl_io *io)
291 {
292         int result = 0;
293         int i;
294         int nr;
295         obd_off start;
296         obd_off end;
297         obd_off file_start;
298         obd_off file_end;
299
300         struct lov_object       *loo    = cl2lov(lck->lls_cl.cls_obj);
301         struct lov_layout_raid0 *r0     = lov_r0(loo);
302         struct cl_lock          *parent = lck->lls_cl.cls_lock;
303
304         ENTRY;
305
306         lck->lls_orig = parent->cll_descr;
307         file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
308         file_end   = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
309
310         for (i = 0, nr = 0; i < r0->lo_nr; i++) {
311                 /*
312                  * XXX for wide striping smarter algorithm is desirable,
313                  * breaking out of the loop, early.
314                  */
315                 if (lov_stripe_intersects(r0->lo_lsm, i,
316                                           file_start, file_end, &start, &end))
317                         nr++;
318         }
319         LASSERT(nr > 0);
320         OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
321         if (lck->lls_sub == NULL)
322                 RETURN(-ENOMEM);
323
324         lck->lls_nr = nr;
325         /*
326          * First, fill in sub-lock descriptions in
327          * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
328          * (called below in this function, and by lov_lock_enqueue()) to
329          * create sub-locks. At this moment, no other thread can access
330          * top-lock.
331          */
332         for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
333                 if (lov_stripe_intersects(r0->lo_lsm, i,
334                                           file_start, file_end, &start, &end)) {
335                         struct cl_lock_descr *descr;
336
337                         descr = &lck->lls_sub[nr].sub_descr;
338
339                         LASSERT(descr->cld_obj == NULL);
340                         descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
341                         descr->cld_start = cl_index(descr->cld_obj, start);
342                         descr->cld_end   = cl_index(descr->cld_obj, end);
343                         descr->cld_mode  = parent->cll_descr.cld_mode;
344                         descr->cld_gid   = parent->cll_descr.cld_gid;
345                         /* XXX has no effect */
346                         lck->lls_sub[nr].sub_got = *descr;
347                         lck->lls_sub[nr].sub_stripe = i;
348                         nr++;
349                 }
350         }
351         LASSERT(nr == lck->lls_nr);
352         /*
353          * Then, create sub-locks. Once at least one sub-lock was created,
354          * top-lock can be reached by other threads.
355          */
356         for (i = 0; i < lck->lls_nr; ++i) {
357                 struct cl_lock       *sublock;
358                 struct lov_lock_link *link;
359
360                 if (lck->lls_sub[i].sub_lock == NULL) {
361                         sublock = lov_sublock_alloc(env, io, lck, i, &link);
362                         if (IS_ERR(sublock)) {
363                                 result = PTR_ERR(sublock);
364                                 break;
365                         }
366                         cl_lock_mutex_get(env, sublock);
367                         cl_lock_mutex_get(env, parent);
368                         /*
369                          * recheck under mutex that sub-lock wasn't created
370                          * concurrently, and that top-lock is still alive.
371                          */
372                         if (lck->lls_sub[i].sub_lock == NULL &&
373                             parent->cll_state < CLS_FREEING) {
374                                 lov_sublock_adopt(env, lck, sublock, i, link);
375                                 cl_lock_mutex_put(env, parent);
376                         } else {
377                                 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
378                                 cl_lock_mutex_put(env, parent);
379                                 cl_lock_unhold(env, sublock,
380                                                "lov-parent", parent);
381                         }
382                         cl_lock_mutex_put(env, sublock);
383                 }
384         }
385         /*
386          * Some sub-locks can be missing at this point. This is not a problem,
387          * because enqueue will create them anyway. Main duty of this function
388          * is to fill in sub-lock descriptions in a race free manner.
389          */
390         RETURN(result);
391 }
392
393 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
394                                int i, int deluser, int rc)
395 {
396         struct cl_lock *parent = lck->lls_cl.cls_lock;
397
398         LASSERT(cl_lock_is_mutexed(parent));
399         ENTRY;
400
401         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
402                 struct cl_lock    *sublock;
403                 int dying;
404
405                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
406                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
407                 LASSERT(cl_lock_is_mutexed(sublock));
408
409                 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
410                 if (deluser)
411                         cl_lock_user_del(env, sublock);
412                 /*
413                  * If the last hold is released, and cancellation is pending
414                  * for a sub-lock, release parent mutex, to avoid keeping it
415                  * while sub-lock is being paged out.
416                  */
417                 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
418                          sublock->cll_descr.cld_mode == CLM_GROUP ||
419                          (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
420                         sublock->cll_holds == 1;
421                 if (dying)
422                         cl_lock_mutex_put(env, parent);
423                 cl_lock_unhold(env, sublock, "lov-parent", parent);
424                 if (dying) {
425                         cl_lock_mutex_get(env, parent);
426                         rc = lov_subresult(rc, CLO_REPEAT);
427                 }
428                 /*
429                  * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
430                  * not backed by a reference on a
431                  * sub-lock. lovsub_lock_delete() will clear
432                  * lck->lls_sub[i].sub_lock under semaphores, just before
433                  * sub-lock is destroyed.
434                  */
435         }
436         RETURN(rc);
437 }
438
439 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
440                              int i)
441 {
442         struct cl_lock *parent = lck->lls_cl.cls_lock;
443
444         LASSERT(cl_lock_is_mutexed(parent));
445         ENTRY;
446
447         if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
448                 struct cl_lock *sublock;
449
450                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
451                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
452                 LASSERT(cl_lock_is_mutexed(sublock));
453                 LASSERT(sublock->cll_state != CLS_FREEING);
454
455                 lck->lls_sub[i].sub_flags |= LSF_HELD;
456
457                 cl_lock_get_trust(sublock);
458                 cl_lock_hold_add(env, sublock, "lov-parent", parent);
459                 cl_lock_user_add(env, sublock);
460                 cl_lock_put(env, sublock);
461         }
462         EXIT;
463 }
464
465 static void lov_lock_fini(const struct lu_env *env,
466                           struct cl_lock_slice *slice)
467 {
468         struct lov_lock *lck;
469         int i;
470
471         ENTRY;
472         lck = cl2lov_lock(slice);
473         LASSERT(lck->lls_nr_filled == 0);
474         if (lck->lls_sub != NULL) {
475                 for (i = 0; i < lck->lls_nr; ++i)
476                         /*
477                          * No sub-locks exists at this point, as sub-lock has
478                          * a reference on its parent.
479                          */
480                         LASSERT(lck->lls_sub[i].sub_lock == NULL);
481                 OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
482         }
483         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
484         EXIT;
485 }
486
487 /**
488  * Tries to advance a state machine of a given sub-lock toward enqueuing of
489  * the top-lock.
490  *
491  * \retval 0 if state-transition can proceed
492  * \retval -ve otherwise.
493  */
494 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
495                                 struct cl_lock *sublock,
496                                 struct cl_io *io, __u32 enqflags, int last)
497 {
498         int result;
499         ENTRY;
500
501         /* first, try to enqueue a sub-lock ... */
502         result = cl_enqueue_try(env, sublock, io, enqflags);
503         if (sublock->cll_state == CLS_ENQUEUED)
504                 /* if it is enqueued, try to `wait' on it---maybe it's already
505                  * granted */
506                 result = cl_wait_try(env, sublock);
507         /*
508          * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
509          * parallel, otherwise---enqueue has to wait until sub-lock is granted
510          * before proceeding to the next one.
511          */
512         if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
513             enqflags & CEF_ASYNC && !last)
514                 result = 0;
515         RETURN(result);
516 }
517
518 /**
519  * Helper function for lov_lock_enqueue() that creates missing sub-lock.
520  */
521 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
522                             struct cl_io *io, struct lov_lock *lck, int idx)
523 {
524         struct lov_lock_link *link;
525         struct cl_lock       *sublock;
526         int                   result;
527
528         LASSERT(parent->cll_depth == 1);
529         cl_lock_mutex_put(env, parent);
530         sublock = lov_sublock_alloc(env, io, lck, idx, &link);
531         if (!IS_ERR(sublock))
532                 cl_lock_mutex_get(env, sublock);
533         cl_lock_mutex_get(env, parent);
534
535         if (!IS_ERR(sublock)) {
536                 if (parent->cll_state == CLS_QUEUING &&
537                     lck->lls_sub[idx].sub_lock == NULL)
538                         lov_sublock_adopt(env, lck, sublock, idx, link);
539                 else {
540                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
541                         /* other thread allocated sub-lock, or enqueue is no
542                          * longer going on */
543                         cl_lock_mutex_put(env, parent);
544                         cl_lock_unhold(env, sublock, "lov-parent", parent);
545                         cl_lock_mutex_get(env, parent);
546                 }
547                 cl_lock_mutex_put(env, sublock);
548                 result = CLO_REPEAT;
549         } else
550                 result = PTR_ERR(sublock);
551         return result;
552 }
553
554 /**
555  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
556  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
557  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
558  * state machines in the face of sub-locks sharing (by multiple top-locks),
559  * and concurrent sub-lock cancellations.
560  */
561 static int lov_lock_enqueue(const struct lu_env *env,
562                             const struct cl_lock_slice *slice,
563                             struct cl_io *io, __u32 enqflags)
564 {
565         struct cl_lock         *lock    = slice->cls_lock;
566         struct lov_lock        *lck     = cl2lov_lock(slice);
567         struct cl_lock_closure *closure = lov_closure_get(env, lock);
568         int i;
569         int result;
570         enum cl_lock_state minstate;
571
572         ENTRY;
573
574         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
575                 int rc;
576                 struct lovsub_lock     *sub;
577                 struct lov_lock_sub    *lls;
578                 struct cl_lock         *sublock;
579                 struct lov_sublock_env *subenv;
580
581                 if (lock->cll_state != CLS_QUEUING) {
582                         /*
583                          * Lock might have left QUEUING state if previous
584                          * iteration released its mutex. Stop enqueing in this
585                          * case and let the upper layer to decide what to do.
586                          */
587                         LASSERT(i > 0 && result != 0);
588                         break;
589                 }
590
591                 lls = &lck->lls_sub[i];
592                 sub = lls->sub_lock;
593                 /*
594                  * Sub-lock might have been canceled, while top-lock was
595                  * cached.
596                  */
597                 if (sub == NULL) {
598                         result = lov_sublock_fill(env, lock, io, lck, i);
599                         /* lov_sublock_fill() released @lock mutex,
600                          * restart. */
601                         break;
602                 }
603                 sublock = sub->lss_cl.cls_lock;
604                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
605                 if (rc == 0) {
606                         lov_sublock_hold(env, lck, i);
607                         rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
608                                                   subenv->lse_io, enqflags,
609                                                   i == lck->lls_nr - 1);
610                         minstate = min(minstate, sublock->cll_state);
611                         /*
612                          * Don't hold a sub-lock in CLS_CACHED state, see
613                          * description for lov_lock::lls_sub.
614                          */
615                         if (sublock->cll_state > CLS_HELD)
616                                 rc = lov_sublock_release(env, lck, i, 1, rc);
617                         lov_sublock_unlock(env, sub, closure, subenv);
618                 }
619                 result = lov_subresult(result, rc);
620                 if (result != 0)
621                         break;
622         }
623         cl_lock_closure_fini(closure);
624         RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
625 }
626
627 static int lov_lock_unuse(const struct lu_env *env,
628                           const struct cl_lock_slice *slice)
629 {
630         struct lov_lock        *lck     = cl2lov_lock(slice);
631         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
632         int i;
633         int result;
634
635         ENTRY;
636
637         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
638                 int rc;
639                 struct lovsub_lock     *sub;
640                 struct cl_lock         *sublock;
641                 struct lov_lock_sub    *lls;
642                 struct lov_sublock_env *subenv;
643
644                 /* top-lock state cannot change concurrently, because single
645                  * thread (one that released the last hold) carries unlocking
646                  * to the completion. */
647                 LASSERT(slice->cls_lock->cll_state == CLS_UNLOCKING);
648                 lls = &lck->lls_sub[i];
649                 sub = lls->sub_lock;
650                 if (sub == NULL)
651                         continue;
652
653                 sublock = sub->lss_cl.cls_lock;
654                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
655                 if (rc == 0) {
656                         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
657                                 LASSERT(sublock->cll_state == CLS_HELD);
658                                 rc = cl_unuse_try(subenv->lse_env, sublock);
659                                 if (rc != CLO_WAIT)
660                                         rc = lov_sublock_release(env, lck,
661                                                                  i, 0, rc);
662                         }
663                         lov_sublock_unlock(env, sub, closure, subenv);
664                 }
665                 result = lov_subresult(result, rc);
666                 if (result < 0)
667                         break;
668         }
669         if (result == 0 && lck->lls_unuse_race) {
670                 lck->lls_unuse_race = 0;
671                 result = -ESTALE;
672         }
673         cl_lock_closure_fini(closure);
674         RETURN(result);
675 }
676
677 static int lov_lock_wait(const struct lu_env *env,
678                          const struct cl_lock_slice *slice)
679 {
680         struct lov_lock        *lck     = cl2lov_lock(slice);
681         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
682         enum cl_lock_state      minstate;
683         int                     result;
684         int                     i;
685
686         ENTRY;
687
688         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
689                 int rc;
690                 struct lovsub_lock     *sub;
691                 struct cl_lock         *sublock;
692                 struct lov_lock_sub    *lls;
693                 struct lov_sublock_env *subenv;
694
695                 lls = &lck->lls_sub[i];
696                 sub = lls->sub_lock;
697                 LASSERT(sub != NULL);
698                 sublock = sub->lss_cl.cls_lock;
699                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
700                 if (rc == 0) {
701                         LASSERT(sublock->cll_state >= CLS_ENQUEUED);
702                         if (sublock->cll_state < CLS_HELD)
703                                 rc = cl_wait_try(env, sublock);
704
705                         minstate = min(minstate, sublock->cll_state);
706                         lov_sublock_unlock(env, sub, closure, subenv);
707                 }
708                 result = lov_subresult(result, rc);
709                 if (result != 0)
710                         break;
711         }
712         cl_lock_closure_fini(closure);
713         RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
714 }
715
716 static int lov_lock_use(const struct lu_env *env,
717                         const struct cl_lock_slice *slice)
718 {
719         struct lov_lock        *lck     = cl2lov_lock(slice);
720         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
721         int                     result;
722         int                     i;
723
724         LASSERT(slice->cls_lock->cll_state == CLS_CACHED);
725         ENTRY;
726
727         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
728                 int rc;
729                 struct lovsub_lock     *sub;
730                 struct cl_lock         *sublock;
731                 struct lov_lock_sub    *lls;
732                 struct lov_sublock_env *subenv;
733
734                 if (slice->cls_lock->cll_state != CLS_CACHED) {
735                         /* see comment in lov_lock_enqueue(). */
736                         LASSERT(i > 0 && result != 0);
737                         break;
738                 }
739                 /*
740                  * if a sub-lock was destroyed while top-lock was in
741                  * CLS_CACHED state, top-lock would have been moved into
742                  * CLS_NEW state, so all sub-locks have to be in place.
743                  */
744                 lls = &lck->lls_sub[i];
745                 sub = lls->sub_lock;
746                 LASSERT(sub != NULL);
747                 sublock = sub->lss_cl.cls_lock;
748                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
749                 if (rc == 0) {
750                         LASSERT(sublock->cll_state != CLS_FREEING);
751                         lov_sublock_hold(env, lck, i);
752                         if (sublock->cll_state == CLS_CACHED) {
753                                 rc = cl_use_try(subenv->lse_env, sublock);
754                                 if (rc != 0)
755                                         rc = lov_sublock_release(env, lck,
756                                                                  i, 1, rc);
757                         } else
758                                 rc = 0;
759                         lov_sublock_unlock(env, sub, closure, subenv);
760                 }
761                 result = lov_subresult(result, rc);
762                 if (result != 0)
763                         break;
764         }
765         cl_lock_closure_fini(closure);
766         RETURN(result);
767 }
768
769 #if 0
770 static int lock_lock_multi_match()
771 {
772         struct cl_lock          *lock    = slice->cls_lock;
773         struct cl_lock_descr    *subneed = &lov_env_info(env)->lti_ldescr;
774         struct lov_object       *loo     = cl2lov(lov->lls_cl.cls_obj);
775         struct lov_layout_raid0 *r0      = lov_r0(loo);
776         struct lov_lock_sub     *sub;
777         struct cl_object        *subobj;
778         obd_off  fstart;
779         obd_off  fend;
780         obd_off  start;
781         obd_off  end;
782         int i;
783
784         fstart = cl_offset(need->cld_obj, need->cld_start);
785         fend   = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
786         subneed->cld_mode = need->cld_mode;
787         cl_lock_mutex_get(env, lock);
788         for (i = 0; i < lov->lls_nr; ++i) {
789                 sub = &lov->lls_sub[i];
790                 if (sub->sub_lock == NULL)
791                         continue;
792                 subobj = sub->sub_descr.cld_obj;
793                 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
794                                            fstart, fend, &start, &end))
795                         continue;
796                 subneed->cld_start = cl_index(subobj, start);
797                 subneed->cld_end   = cl_index(subobj, end);
798                 subneed->cld_obj   = subobj;
799                 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
800                         result = 0;
801                         break;
802                 }
803         }
804         cl_lock_mutex_put(env, lock);
805 }
806 #endif
807
808 /**
809  * Check if the extent region \a descr is covered by \a child against the
810  * specific \a stripe.
811  */
812 static int lov_lock_stripe_is_matching(const struct lu_env *env,
813                                        struct lov_object *lov, int stripe,
814                                        const struct cl_lock_descr *child,
815                                        const struct cl_lock_descr *descr)
816 {
817         struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
818         obd_off start;
819         obd_off end;
820         int result;
821
822         if (lov_r0(lov)->lo_nr == 1)
823                 return cl_lock_ext_match(child, descr);
824
825         /*
826          * For a multi-stripes object:
827          * - make sure the descr only covers child's stripe, and
828          * - check if extent is matching.
829          */
830         start = cl_offset(&lov->lo_cl, descr->cld_start);
831         end   = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
832         result = end - start <= lsm->lsm_stripe_size &&
833                  stripe == lov_stripe_number(lsm, start) &&
834                  stripe == lov_stripe_number(lsm, end);
835         if (result) {
836                 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
837                 obd_off sub_start;
838                 obd_off sub_end;
839
840                 subd->cld_obj  = NULL;   /* don't need sub object at all */
841                 subd->cld_mode = descr->cld_mode;
842                 subd->cld_gid  = descr->cld_gid;
843                 result = lov_stripe_intersects(lsm, stripe, start, end,
844                                                &sub_start, &sub_end);
845                 LASSERT(result);
846                 subd->cld_start = cl_index(child->cld_obj, sub_start);
847                 subd->cld_end   = cl_index(child->cld_obj, sub_end);
848                 result = cl_lock_ext_match(child, subd);
849         }
850         return result;
851 }
852
853 /**
854  * An implementation of cl_lock_operations::clo_fits_into() method.
855  *
856  * Checks whether a lock (given by \a slice) is suitable for \a
857  * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
858  * O_APPEND write.
859  *
860  * \see ccc_lock_fits_into().
861  */
862 static int lov_lock_fits_into(const struct lu_env *env,
863                               const struct cl_lock_slice *slice,
864                               const struct cl_lock_descr *need,
865                               const struct cl_io *io)
866 {
867         struct lov_lock   *lov = cl2lov_lock(slice);
868         struct lov_object *obj = cl2lov(slice->cls_obj);
869         int result;
870
871         LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
872         LASSERT(lov->lls_nr > 0);
873
874         ENTRY;
875
876         if (need->cld_mode == CLM_GROUP)
877                 /*
878                  * always allow to match group lock.
879                  */
880                 result = cl_lock_ext_match(&lov->lls_orig, need);
881         else if (lov->lls_nr == 1) {
882                 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
883                 result = lov_lock_stripe_is_matching(env,
884                                                      cl2lov(slice->cls_obj),
885                                                      lov->lls_sub[0].sub_stripe,
886                                                      got, need);
887         } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
888                    !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
889                 /*
890                  * Multi-stripe locks are only suitable for `quick' IO and for
891                  * glimpse.
892                  */
893                 result = 0;
894         else
895                 /*
896                  * Most general case: multi-stripe existing lock, and
897                  * (potentially) multi-stripe @need lock. Check that @need is
898                  * covered by @lov's sub-locks.
899                  *
900                  * For now, ignore lock expansions made by the server, and
901                  * match against original lock extent.
902                  */
903                 result = cl_lock_ext_match(&lov->lls_orig, need);
904         CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
905                PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
906                lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
907                result);
908         RETURN(result);
909 }
910
911 void lov_lock_unlink(const struct lu_env *env,
912                      struct lov_lock_link *link, struct lovsub_lock *sub)
913 {
914         struct lov_lock *lck    = link->lll_super;
915         struct cl_lock  *parent = lck->lls_cl.cls_lock;
916
917         LASSERT(cl_lock_is_mutexed(parent));
918         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
919         ENTRY;
920
921         list_del_init(&link->lll_list);
922         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
923         /* yank this sub-lock from parent's array */
924         lck->lls_sub[link->lll_idx].sub_lock = NULL;
925         LASSERT(lck->lls_nr_filled > 0);
926         lck->lls_nr_filled--;
927         lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
928         cl_lock_put(env, parent);
929         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
930         EXIT;
931 }
932
933 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
934                                          struct lov_lock *lck,
935                                          struct lovsub_lock *sub)
936 {
937         struct lov_lock_link *scan;
938
939         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
940         ENTRY;
941
942         list_for_each_entry(scan, &sub->lss_parents, lll_list) {
943                 if (scan->lll_super == lck)
944                         RETURN(scan);
945         }
946         RETURN(NULL);
947 }
948
949 /**
950  * An implementation of cl_lock_operations::clo_delete() method. This is
951  * invoked for "top-to-bottom" delete, when lock destruction starts from the
952  * top-lock, e.g., as a result of inode destruction.
953  *
954  * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
955  * this is done separately elsewhere:
956  *
957  *     - for inode destruction, lov_object_delete() calls cl_object_kill() for
958  *       each sub-object, purging its locks;
959  *
960  *     - in other cases (e.g., a fatal error with a top-lock) sub-locks are
961  *       left in the cache.
962  */
963 static void lov_lock_delete(const struct lu_env *env,
964                             const struct cl_lock_slice *slice)
965 {
966         struct lov_lock        *lck     = cl2lov_lock(slice);
967         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
968         int i;
969
970         LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
971         ENTRY;
972
973         for (i = 0; i < lck->lls_nr; ++i) {
974                 struct lov_lock_sub *lls;
975                 struct lovsub_lock  *lsl;
976                 struct cl_lock      *sublock;
977                 int rc;
978
979                 lls = &lck->lls_sub[i];
980                 lsl = lls->sub_lock;
981                 if (lsl == NULL)
982                         continue;
983
984                 sublock = lsl->lss_cl.cls_lock;
985                 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
986                 if (rc == 0) {
987                         if (lck->lls_sub[i].sub_flags & LSF_HELD)
988                                 lov_sublock_release(env, lck, i, 1, 0);
989                         if (sublock->cll_state < CLS_FREEING) {
990                                 struct lov_lock_link *link;
991
992                                 link = lov_lock_link_find(env, lck, lsl);
993                                 LASSERT(link != NULL);
994                                 lov_lock_unlink(env, link, lsl);
995                                 LASSERT(lck->lls_sub[i].sub_lock == NULL);
996                         }
997                         lov_sublock_unlock(env, lsl, closure, NULL);
998                 } else if (rc == CLO_REPEAT) {
999                         --i; /* repeat with this lock */
1000                 } else {
1001                         CL_LOCK_DEBUG(D_ERROR, env, sublock,
1002                                       "Cannot get sub-lock for delete: %i\n",
1003                                       rc);
1004                 }
1005         }
1006         cl_lock_closure_fini(closure);
1007         EXIT;
1008 }
1009
1010 static int lov_lock_print(const struct lu_env *env, void *cookie,
1011                           lu_printer_t p, const struct cl_lock_slice *slice)
1012 {
1013         struct lov_lock *lck = cl2lov_lock(slice);
1014         int              i;
1015
1016         (*p)(env, cookie, "%d\n", lck->lls_nr);
1017         for (i = 0; i < lck->lls_nr; ++i) {
1018                 struct lov_lock_sub *sub;
1019
1020                 sub = &lck->lls_sub[i];
1021                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_flags);
1022                 if (sub->sub_lock != NULL)
1023                         cl_lock_print(env, cookie, p,
1024                                       sub->sub_lock->lss_cl.cls_lock);
1025                 else
1026                         (*p)(env, cookie, "---\n");
1027         }
1028         return 0;
1029 }
1030
1031 static const struct cl_lock_operations lov_lock_ops = {
1032         .clo_fini      = lov_lock_fini,
1033         .clo_enqueue   = lov_lock_enqueue,
1034         .clo_wait      = lov_lock_wait,
1035         .clo_use       = lov_lock_use,
1036         .clo_unuse     = lov_lock_unuse,
1037         .clo_fits_into = lov_lock_fits_into,
1038         .clo_delete    = lov_lock_delete,
1039         .clo_print     = lov_lock_print
1040 };
1041
1042 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1043                         struct cl_lock *lock, const struct cl_io *io)
1044 {
1045         struct lov_lock *lck;
1046         int result;
1047
1048         ENTRY;
1049         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1050         if (lck != NULL) {
1051                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1052                 result = lov_lock_sub_init(env, lck, io);
1053         } else
1054                 result = -ENOMEM;
1055         RETURN(result);
1056 }
1057
1058 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1059                                                struct cl_lock *parent)
1060 {
1061         struct cl_lock_closure *closure;
1062
1063         closure = &lov_env_info(env)->lti_closure;
1064         LASSERT(list_empty(&closure->clc_list));
1065         cl_lock_closure_init(env, closure, parent, 1);
1066         return closure;
1067 }
1068
1069
1070 /** @} lov */