4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_lock for LOV layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_LOV
43 #include "lov_cl_internal.h"
49 /*****************************************************************************
51 * Lov lock operations.
55 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
56 const struct cl_lock *parent,
57 struct lov_lock_sub *lls)
59 struct lov_sublock_env *subenv;
60 struct lov_io *lio = lov_env_io(env);
61 struct cl_io *io = lio->lis_cl.cis_io;
62 struct lov_io_sub *sub;
64 subenv = &lov_env_session(env)->ls_subenv;
67 * FIXME: We tend to use the subio's env & io to call the sublock
68 * lock operations because osc lock sometimes stores some control
69 * variables in thread's IO infomation(Now only lockless information).
70 * However, if the lock's host(object) is different from the object
71 * for current IO, we have no way to get the subenv and subio because
72 * they are not initialized at all. As a temp fix, in this case,
73 * we still borrow the parent's env to call sublock operations.
75 if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
76 subenv->lse_env = env;
78 subenv->lse_sub = NULL;
80 sub = lov_sub_get(env, lio, lls->sub_stripe);
82 subenv->lse_env = sub->sub_env;
83 subenv->lse_io = sub->sub_io;
84 subenv->lse_sub = sub;
92 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
94 if (subenv && subenv->lse_sub)
95 lov_sub_put(subenv->lse_sub);
98 static int lov_sublock_init(const struct lu_env *env,
99 const struct cl_lock *parent,
100 struct lov_lock_sub *lls)
102 struct lov_sublock_env *subenv;
106 subenv = lov_sublock_env_get(env, parent, lls);
107 if (!IS_ERR(subenv)) {
108 result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
110 lov_sublock_env_put(subenv);
113 result = PTR_ERR(subenv);
119 * Creates sub-locks for a given lov_lock for the first time.
121 * Goes through all sub-objects of top-object, and creates sub-locks on every
122 * sub-object intersecting with top-lock extent. This is complicated by the
123 * fact that top-lock (that is being created) can be accessed concurrently
124 * through already created sub-locks (possibly shared with other top-locks).
126 static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
127 const struct cl_object *obj,
128 struct cl_lock *lock)
138 struct lov_object *loo = cl2lov(obj);
139 struct lov_layout_raid0 *r0 = lov_r0(loo);
140 struct lov_lock *lovlck;
144 CDEBUG(D_INODE, "%p: lock/io FID "DFID"/"DFID", lock/io clobj %p/%p\n",
145 loo, PFID(lu_object_fid(lov2lu(loo))),
146 PFID(lu_object_fid(&obj->co_lu)),
149 file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start);
150 file_end = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1;
152 for (i = 0, nr = 0; i < r0->lo_nr; i++) {
154 * XXX for wide striping smarter algorithm is desirable,
155 * breaking out of the loop, early.
157 if (likely(r0->lo_sub[i] != NULL) && /* spare layout */
158 lov_stripe_intersects(loo->lo_lsm, i,
159 file_start, file_end, &start, &end))
164 OBD_ALLOC_LARGE(lovlck, offsetof(struct lov_lock, lls_sub[nr]));
166 RETURN(ERR_PTR(-ENOMEM));
169 for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
170 if (likely(r0->lo_sub[i] != NULL) &&
171 lov_stripe_intersects(loo->lo_lsm, i,
172 file_start, file_end, &start, &end)) {
173 struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
174 struct cl_lock_descr *descr;
176 descr = &lls->sub_lock.cll_descr;
178 LASSERT(descr->cld_obj == NULL);
179 descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
180 descr->cld_start = cl_index(descr->cld_obj, start);
181 descr->cld_end = cl_index(descr->cld_obj, end);
182 descr->cld_mode = lock->cll_descr.cld_mode;
183 descr->cld_gid = lock->cll_descr.cld_gid;
184 descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
188 /* initialize sub lock */
189 result = lov_sublock_init(env, lock, lls);
193 lls->sub_initialized = 1;
197 LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
200 for (i = 0; i < nr; ++i) {
201 if (!lovlck->lls_sub[i].sub_initialized)
204 cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
207 OBD_FREE_LARGE(lovlck,
208 offsetof(struct lov_lock, lls_sub[nr]));
209 lovlck = ERR_PTR(result);
215 static void lov_lock_fini(const struct lu_env *env,
216 struct cl_lock_slice *slice)
218 struct lov_lock *lovlck;
222 lovlck = cl2lov_lock(slice);
223 for (i = 0; i < lovlck->lls_nr; ++i) {
224 LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
225 if (lovlck->lls_sub[i].sub_initialized)
226 cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
228 OBD_FREE_LARGE(lovlck,
229 offsetof(struct lov_lock, lls_sub[lovlck->lls_nr]));
234 * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
235 * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
236 * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
237 * state machines in the face of sub-locks sharing (by multiple top-locks),
238 * and concurrent sub-lock cancellations.
240 static int lov_lock_enqueue(const struct lu_env *env,
241 const struct cl_lock_slice *slice,
242 struct cl_io *io, struct cl_sync_io *anchor)
244 struct cl_lock *lock = slice->cls_lock;
245 struct lov_lock *lovlck = cl2lov_lock(slice);
251 for (i = 0; i < lovlck->lls_nr; ++i) {
252 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
253 struct lov_sublock_env *subenv;
255 subenv = lov_sublock_env_get(env, lock, lls);
256 if (IS_ERR(subenv)) {
257 rc = PTR_ERR(subenv);
261 rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
262 &lls->sub_lock, anchor);
263 lov_sublock_env_put(subenv);
267 lls->sub_is_enqueued = 1;
272 static void lov_lock_cancel(const struct lu_env *env,
273 const struct cl_lock_slice *slice)
275 struct cl_lock *lock = slice->cls_lock;
276 struct lov_lock *lovlck = cl2lov_lock(slice);
281 for (i = 0; i < lovlck->lls_nr; ++i) {
282 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
283 struct cl_lock *sublock = &lls->sub_lock;
284 struct lov_sublock_env *subenv;
286 if (!lls->sub_is_enqueued)
289 lls->sub_is_enqueued = 0;
290 subenv = lov_sublock_env_get(env, lock, lls);
291 if (!IS_ERR(subenv)) {
292 cl_lock_cancel(subenv->lse_env, sublock);
293 lov_sublock_env_put(subenv);
295 CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
296 "lov_lock_cancel fails with %ld.\n",
302 static int lov_lock_print(const struct lu_env *env, void *cookie,
303 lu_printer_t p, const struct cl_lock_slice *slice)
305 struct lov_lock *lck = cl2lov_lock(slice);
308 (*p)(env, cookie, "%d\n", lck->lls_nr);
309 for (i = 0; i < lck->lls_nr; ++i) {
310 struct lov_lock_sub *sub;
312 sub = &lck->lls_sub[i];
313 (*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued);
314 cl_lock_print(env, cookie, p, &sub->sub_lock);
319 static const struct cl_lock_operations lov_lock_ops = {
320 .clo_fini = lov_lock_fini,
321 .clo_enqueue = lov_lock_enqueue,
322 .clo_cancel = lov_lock_cancel,
323 .clo_print = lov_lock_print
326 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
327 struct cl_lock *lock, const struct cl_io *io)
329 struct lov_lock *lck;
333 lck = lov_lock_sub_init(env, obj, lock);
335 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
337 result = PTR_ERR(lck);
341 static void lov_empty_lock_fini(const struct lu_env *env,
342 struct cl_lock_slice *slice)
344 struct lov_lock *lck = cl2lov_lock(slice);
345 OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
348 static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
349 lu_printer_t p, const struct cl_lock_slice *slice)
351 (*p)(env, cookie, "empty\n");
355 /* XXX: more methods will be added later. */
356 static const struct cl_lock_operations lov_empty_lock_ops = {
357 .clo_fini = lov_empty_lock_fini,
358 .clo_print = lov_empty_lock_print
361 int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
362 struct cl_lock *lock, const struct cl_io *io)
364 struct lov_lock *lck;
365 int result = -ENOMEM;
368 OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
370 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);