Whamcloud - gitweb
Branch:HEAD
[fs/lustre-release.git] / lustre / lov / lovsub_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for LOVSUB layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LOV
42
43 #include "lov_cl_internal.h"
44
45 /** \addtogroup lov
46  *  @{
47  */
48
49 /*****************************************************************************
50  *
51  * Lovsub lock operations.
52  *
53  */
54
55 static void lovsub_lock_fini(const struct lu_env *env,
56                              struct cl_lock_slice *slice)
57 {
58         struct lovsub_lock   *lsl;
59
60         ENTRY;
61         lsl = cl2lovsub_lock(slice);
62         LASSERT(list_empty(&lsl->lss_parents));
63         OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
64         EXIT;
65 }
66
67 static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov)
68 {
69         struct cl_lock *parent;
70
71         ENTRY;
72         parent = lov->lls_cl.cls_lock;
73         cl_lock_get(parent);
74         lu_ref_add(&parent->cll_reference, "lovsub-parent", cfs_current());
75         cl_lock_mutex_get(env, parent);
76         EXIT;
77 }
78
79 static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov)
80 {
81         struct cl_lock *parent;
82
83         ENTRY;
84         parent = lov->lls_cl.cls_lock;
85         cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
86         lu_ref_del(&parent->cll_reference, "lovsub-parent", cfs_current());
87         cl_lock_put(env, parent);
88         EXIT;
89 }
90
91 static int lovsub_lock_state_one(const struct lu_env *env,
92                                  const struct lovsub_lock *lovsub,
93                                  struct lov_lock *lov)
94 {
95         struct cl_lock *parent;
96         struct cl_lock *child;
97         int             restart = 0;
98
99         ENTRY;
100         parent = lov->lls_cl.cls_lock;
101         child  = lovsub->lss_cl.cls_lock;
102
103         if (lovsub->lss_active != parent) {
104                 lovsub_parent_lock(env, lov);
105                 if (child->cll_error != 0 && parent->cll_error == 0) {
106                         /*
107                          * This is a deadlock case:
108                          * cl_lock_error(for the parent lock)
109                          *   -> cl_lock_delete
110                          *     -> lov_lock_delete
111                          *       -> cl_lock_enclosure
112                          *         -> cl_lock_mutex_try(for the child lock)
113                          */
114                         cl_lock_mutex_put(env, child);
115                         cl_lock_error(env, parent, child->cll_error);
116                         restart = 1;
117                 } else {
118                         cl_lock_signal(env, parent);
119                 }
120                 lovsub_parent_unlock(env, lov);
121         }
122         RETURN(restart);
123 }
124
125 /**
126  * Implements cl_lock_operations::clo_state() method for lovsub layer, which
127  * method is called whenever sub-lock state changes. Propagates state change
128  * to the top-locks.
129  */
130 static void lovsub_lock_state(const struct lu_env *env,
131                               const struct cl_lock_slice *slice,
132                               enum cl_lock_state state)
133 {
134         struct lovsub_lock   *sub = cl2lovsub_lock(slice);
135         struct lov_lock_link *scan;
136         int                   restart = 0;
137
138         LASSERT(cl_lock_is_mutexed(slice->cls_lock));
139         ENTRY;
140
141         do {
142                 restart = 0;
143                 list_for_each_entry(scan, &sub->lss_parents, lll_list) {
144                         restart = lovsub_lock_state_one(env, sub,
145                                                         scan->lll_super);
146                         if (restart) {
147                                 cl_lock_mutex_get(env, slice->cls_lock);
148                                 break;
149                         }
150                 }
151         } while(restart);
152         EXIT;
153 }
154
155 /**
156  * Implementation of cl_lock_operation::clo_weigh() estimating lock weight by
157  * asking parent lock.
158  */
159 static unsigned long lovsub_lock_weigh(const struct lu_env *env,
160                                        const struct cl_lock_slice *slice)
161 {
162         struct lovsub_lock *lock = cl2lovsub_lock(slice);
163         struct lov_lock    *lov;
164         unsigned long       dumbbell;
165
166         ENTRY;
167
168         LASSERT(cl_lock_is_mutexed(slice->cls_lock));
169
170         if (!list_empty(&lock->lss_parents)) {
171                 /*
172                  * It is not clear whether all parents have to be asked and
173                  * their estimations summed, or it is enough to ask one. For
174                  * the current usages, one is always enough.
175                  */
176                 lov = container_of(lock->lss_parents.next,
177                                    struct lov_lock_link, lll_list)->lll_super;
178
179                 lovsub_parent_lock(env, lov);
180                 dumbbell = cl_lock_weigh(env, lov->lls_cl.cls_lock);
181                 lovsub_parent_unlock(env, lov);
182         } else
183                 dumbbell = 0;
184
185         RETURN(dumbbell);
186 }
187
188 /**
189  * Maps start/end offsets within a stripe, to offsets within a file.
190  */
191 static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
192                                   struct lov_object *obj,
193                                   int stripe, struct cl_lock_descr *out)
194 {
195         struct lov_stripe_md *lsm = lov_r0(obj)->lo_lsm;
196         pgoff_t size; /* stripe size in pages */
197         pgoff_t skip; /* how many pages in every stripe are occupied by
198                        * "other" stripes */
199         pgoff_t start;
200         pgoff_t end;
201
202         ENTRY;
203         start = in->cld_start;
204         end   = in->cld_end;
205
206         if (lsm->lsm_stripe_count > 1) {
207                 size = cl_index(lov2cl(obj), lsm->lsm_stripe_size);
208                 skip = (lsm->lsm_stripe_count - 1) * size;
209
210                 /* XXX overflow check here? */
211                 start += start/size * skip + stripe * size;
212
213                 if (end != CL_PAGE_EOF) {
214                         end += end/size * skip + stripe * size;
215                         /*
216                          * And check for overflow...
217                          */
218                         if (end < in->cld_end)
219                                 end = CL_PAGE_EOF;
220                 }
221         }
222         out->cld_start = start;
223         out->cld_end   = end;
224         EXIT;
225 }
226
227 /**
228  * Adjusts parent lock extent when a sub-lock is attached to a parent. This is
229  * called in two ways:
230  *
231  *     - as part of receive call-back, when server returns granted extent to
232  *       the client, and
233  *
234  *     - when top-lock finds existing sub-lock in the cache.
235  *
236  * Note, that lock mode is not propagated to the parent: i.e., if CLM_READ
237  * top-lock matches CLM_WRITE sub-lock, top-lock is still CLM_READ.
238  */
239 int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
240                        struct lovsub_lock *sublock,
241                        const struct cl_lock_descr *d, int idx)
242 {
243         struct cl_lock       *parent;
244         struct cl_lock       *child;
245         struct lovsub_object *subobj;
246         struct cl_lock_descr *pd;
247         struct cl_lock_descr *parent_descr;
248         int                   result;
249
250         parent       = lov->lls_cl.cls_lock;
251         parent_descr = &parent->cll_descr;
252         LASSERT(cl_lock_mode_match(d->cld_mode, parent_descr->cld_mode));
253
254         child  = sublock->lss_cl.cls_lock;
255         subobj = cl2lovsub(sublock->lss_cl.cls_obj);
256         pd     = &lov_env_info(env)->lti_ldescr;
257
258         pd->cld_obj  = parent_descr->cld_obj;
259         pd->cld_mode = parent_descr->cld_mode;
260         pd->cld_gid  = parent_descr->cld_gid;
261         lovsub_lock_descr_map(d, subobj->lso_super, subobj->lso_index, pd);
262         lov->lls_sub[idx].sub_got = *d;
263         /*
264          * Notify top-lock about modification, if lock description changes
265          * materially.
266          */
267         if (!cl_lock_ext_match(parent_descr, pd))
268                 result = cl_lock_modify(env, parent, pd);
269         else
270                 result = 0;
271         return result;
272 }
273
274 static int lovsub_lock_modify(const struct lu_env *env,
275                               const struct cl_lock_slice *s,
276                               const struct cl_lock_descr *d)
277 {
278         struct lovsub_lock   *lock   = cl2lovsub_lock(s);
279         struct lov_lock_link *scan;
280         struct lov_lock      *lov;
281         int result                   = 0;
282
283         ENTRY;
284
285         LASSERT(cl_lock_mode_match(d->cld_mode,
286                                    s->cls_lock->cll_descr.cld_mode));
287         list_for_each_entry(scan, &lock->lss_parents, lll_list) {
288                 int rc;
289
290                 lov = scan->lll_super;
291                 lovsub_parent_lock(env, lov);
292                 rc = lov_sublock_modify(env, lov, lock, d, scan->lll_idx);
293                 lovsub_parent_unlock(env, lov);
294                 result = result ?: rc;
295         }
296         RETURN(result);
297 }
298
299 static int lovsub_lock_closure(const struct lu_env *env,
300                                const struct cl_lock_slice *slice,
301                                struct cl_lock_closure *closure)
302 {
303         struct lovsub_lock   *sub;
304         struct cl_lock       *parent;
305         struct lov_lock_link *scan;
306         int                   result;
307
308         LASSERT(cl_lock_is_mutexed(slice->cls_lock));
309         ENTRY;
310
311         sub    = cl2lovsub_lock(slice);
312         result = 0;
313
314         list_for_each_entry(scan, &sub->lss_parents, lll_list) {
315                 parent = scan->lll_super->lls_cl.cls_lock;
316                 result = cl_lock_closure_build(env, parent, closure);
317                 if (result != 0)
318                         break;
319         }
320         RETURN(result);
321 }
322
323 /**
324  * A helper function for lovsub_lock_delete() that deals with a given parent
325  * top-lock.
326  */
327 static int lovsub_lock_delete_one(const struct lu_env *env,
328                                   struct cl_lock *child, struct lov_lock *lov)
329 {
330         struct cl_lock       *parent;
331         int             result;
332         ENTRY;
333
334         parent  = lov->lls_cl.cls_lock;
335         result = 0;
336
337         switch (parent->cll_state) {
338         case CLS_NEW:
339         case CLS_QUEUING:
340         case CLS_ENQUEUED:
341         case CLS_FREEING:
342                 cl_lock_signal(env, parent);
343                 break;
344         case CLS_INTRANSIT:
345                 /*
346                  * Here lies a problem: a sub-lock is canceled while top-lock
347                  * is being unlocked. Top-lock cannot be moved into CLS_NEW
348                  * state, because unlocking has to succeed eventually by
349                  * placing lock into CLS_CACHED (or failing it), see
350                  * cl_unuse_try(). Nor can top-lock be left in CLS_CACHED
351                  * state, because lov maintains an invariant that all
352                  * sub-locks exist in CLS_CACHED (this allows cached top-lock
353                  * to be reused immediately). Nor can we wait for top-lock
354                  * state to change, because this can be synchronous to the
355                  * current thread.
356                  *
357                  * We know for sure that lov_lock_unuse() will be called at
358                  * least one more time to finish un-using, so leave a mark on
359                  * the top-lock, that will be seen by the next call to
360                  * lov_lock_unuse().
361                  */
362                 if (cl_lock_is_intransit(parent))
363                         lov->lls_cancel_race = 1;
364                 break;
365         case CLS_CACHED:
366                 /*
367                  * if a sub-lock is canceled move its top-lock into CLS_NEW
368                  * state to preserve an invariant that a top-lock in
369                  * CLS_CACHED is immediately ready for re-use (i.e., has all
370                  * sub-locks), and so that next attempt to re-use the top-lock
371                  * enqueues missing sub-lock.
372                  */
373                 cl_lock_state_set(env, parent, CLS_NEW);
374                 /*
375                  * if last sub-lock is canceled, destroy the top-lock (which
376                  * is now `empty') proactively.
377                  */
378                 if (lov->lls_nr_filled == 0) {
379                         /* ... but unfortunately, this cannot be done easily,
380                          * as cancellation of a top-lock might acquire mutices
381                          * of its other sub-locks, violating lock ordering,
382                          * see cl_lock_{cancel,delete}() preconditions.
383                          *
384                          * To work around this, the mutex of this sub-lock is
385                          * released, top-lock is destroyed, and sub-lock mutex
386                          * acquired again. The list of parents has to be
387                          * re-scanned from the beginning after this.
388                          *
389                          * Only do this if no mutices other than on @child and
390                          * @parent are held by the current thread.
391                          *
392                          * TODO: The lock modal here is too complex, because
393                          * the lock may be canceled and deleted by voluntarily:
394                          *    cl_lock_request
395                          *      -> osc_lock_enqueue_wait
396                          *        -> osc_lock_cancel_wait
397                          *          -> cl_lock_delete
398                          *            -> lovsub_lock_delete
399                          *              -> cl_lock_cancel/delete
400                          *                -> ...
401                          *
402                          * The better choice is to spawn a kernel thread for
403                          * this purpose. -jay
404                          */
405                         if (cl_lock_nr_mutexed(env) == 2) {
406                                 cl_lock_mutex_put(env, child);
407                                 cl_lock_cancel(env, parent);
408                                 cl_lock_delete(env, parent);
409                                 result = 1;
410                         }
411                 }
412                 break;
413         case CLS_HELD:
414         default:
415                 LASSERTF(parent->cll_error != 0, "cll state %d is wrong!\n",
416                          parent->cll_state);
417                 break;
418         }
419
420         RETURN(result);
421 }
422
423 /**
424  * An implementation of cl_lock_operations::clo_delete() method. This is
425  * invoked in "bottom-to-top" delete, when lock destruction starts from the
426  * sub-lock (e.g, as a result of ldlm lock LRU policy).
427  */
428 static void lovsub_lock_delete(const struct lu_env *env,
429                                const struct cl_lock_slice *slice)
430 {
431         struct cl_lock     *child = slice->cls_lock;
432         struct lovsub_lock *sub   = cl2lovsub_lock(slice);
433         int restart;
434
435         LASSERT(cl_lock_is_mutexed(child));
436
437         ENTRY;
438         /*
439          * Destruction of a sub-lock might take multiple iterations, because
440          * when the last sub-lock of a given top-lock is deleted, top-lock is
441          * canceled proactively, and this requires to release sub-lock
442          * mutex. Once sub-lock mutex has been released, list of its parents
443          * has to be re-scanned from the beginning.
444          */
445         do {
446                 struct lov_lock      *lov;
447                 struct lov_lock_link *scan;
448                 struct lov_lock_link *temp;
449                 struct lov_lock_sub  *subdata;
450
451                 restart = 0;
452                 list_for_each_entry_safe(scan, temp,
453                                          &sub->lss_parents, lll_list) {
454                         lov     = scan->lll_super;
455                         subdata = &lov->lls_sub[scan->lll_idx];
456                         lovsub_parent_lock(env, lov);
457                         subdata->sub_got = subdata->sub_descr;
458                         lov_lock_unlink(env, scan, sub);
459                         restart = lovsub_lock_delete_one(env, child, lov);
460                         lovsub_parent_unlock(env, lov);
461
462                         if (restart) {
463                                 cl_lock_mutex_get(env, child);
464                                 break;
465                         }
466                }
467         } while (restart);
468         EXIT;
469 }
470
471 static int lovsub_lock_print(const struct lu_env *env, void *cookie,
472                              lu_printer_t p, const struct cl_lock_slice *slice)
473 {
474         struct lovsub_lock   *sub = cl2lovsub_lock(slice);
475         struct lov_lock      *lov;
476         struct lov_lock_link *scan;
477
478         list_for_each_entry(scan, &sub->lss_parents, lll_list) {
479                 lov = scan->lll_super;
480                 (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
481                 if (lov != NULL)
482                         cl_lock_descr_print(env, cookie, p,
483                                             &lov->lls_cl.cls_lock->cll_descr);
484                 (*p)(env, cookie, "] ");
485         }
486         return 0;
487 }
488
489 static const struct cl_lock_operations lovsub_lock_ops = {
490         .clo_fini    = lovsub_lock_fini,
491         .clo_state   = lovsub_lock_state,
492         .clo_delete  = lovsub_lock_delete,
493         .clo_modify  = lovsub_lock_modify,
494         .clo_closure = lovsub_lock_closure,
495         .clo_weigh   = lovsub_lock_weigh,
496         .clo_print   = lovsub_lock_print
497 };
498
499 int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
500                      struct cl_lock *lock, const struct cl_io *io)
501 {
502         struct lovsub_lock *lsk;
503         int result;
504
505         ENTRY;
506         OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, CFS_ALLOC_IO);
507         if (lsk != NULL) {
508                 CFS_INIT_LIST_HEAD(&lsk->lss_parents);
509                 cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
510                 result = 0;
511         } else
512                 result = -ENOMEM;
513         RETURN(result);
514 }
515
516 /** @} lov */