4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_lock for LOV layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
37 #define DEBUG_SUBSYSTEM S_LOV
39 #include "lov_cl_internal.h"
45 /*****************************************************************************
47 * Lov lock operations.
51 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
52 const struct cl_lock *parent,
53 struct lov_lock_sub *lls)
55 struct lov_sublock_env *subenv;
56 struct lov_io *lio = lov_env_io(env);
57 struct cl_io *io = lio->lis_cl.cis_io;
58 struct lov_io_sub *sub;
60 subenv = &lov_env_session(env)->ls_subenv;
63 * FIXME: We tend to use the subio's env & io to call the sublock
64 * lock operations because osc lock sometimes stores some control
65 * variables in thread's IO infomation(Now only lockless information).
66 * However, if the lock's host(object) is different from the object
67 * for current IO, we have no way to get the subenv and subio because
68 * they are not initialized at all. As a temp fix, in this case,
69 * we still borrow the parent's env to call sublock operations.
71 if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
72 subenv->lse_env = env;
75 sub = lov_sub_get(env, lio, lls->sub_index);
77 subenv->lse_env = sub->sub_env;
78 subenv->lse_io = &sub->sub_io;
86 static int lov_sublock_init(const struct lu_env *env,
87 const struct cl_lock *parent,
88 struct lov_lock_sub *lls)
90 struct lov_sublock_env *subenv;
95 subenv = lov_sublock_env_get(env, parent, lls);
96 if (!IS_ERR(subenv)) {
97 result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
101 result = PTR_ERR(subenv);
107 * Creates sub-locks for a given lov_lock for the first time.
109 * Goes through all sub-objects of top-object, and creates sub-locks on every
110 * sub-object intersecting with top-lock extent. This is complicated by the
111 * fact that top-lock (that is being created) can be accessed concurrently
112 * through already created sub-locks (possibly shared with other top-locks).
114 static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
115 const struct cl_object *obj,
116 struct cl_lock *lock)
118 struct lov_object *lov = cl2lov(obj);
119 struct lov_lock *lovlck;
120 struct lu_extent ext;
130 ext.e_start = cl_offset(obj, lock->cll_descr.cld_start);
131 if (lock->cll_descr.cld_end == CL_PAGE_EOF)
132 ext.e_end = OBD_OBJECT_EOF;
134 ext.e_end = cl_offset(obj, lock->cll_descr.cld_end + 1);
137 lov_foreach_io_layout(index, lov_env_io(env), &ext) {
138 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
140 for (i = 0; i < r0->lo_nr; i++) {
141 if (likely(r0->lo_sub[i]) && /* spare layout */
142 lov_stripe_intersects(lov->lo_lsm, index, i,
148 * Aggressive lock request (from cl_setattr_ost) which asks for
149 * [eof, -1) lock, could come across uninstantiated layout extent,
150 * hence a 0 nr is possible.
153 OBD_ALLOC_LARGE(lovlck, offsetof(struct lov_lock, lls_sub[nr]));
155 RETURN(ERR_PTR(-ENOMEM));
159 lov_foreach_io_layout(index, lov_env_io(env), &ext) {
160 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
162 for (i = 0; i < r0->lo_nr; ++i) {
163 struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
164 struct cl_lock_descr *descr = &lls->sub_lock.cll_descr;
166 if (unlikely(!r0->lo_sub[i]) ||
167 !lov_stripe_intersects(lov->lo_lsm, index, i,
171 LASSERT(descr->cld_obj == NULL);
172 descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
173 descr->cld_start = cl_index(descr->cld_obj, start);
174 descr->cld_end = cl_index(descr->cld_obj, end);
175 descr->cld_mode = lock->cll_descr.cld_mode;
176 descr->cld_gid = lock->cll_descr.cld_gid;
177 descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
179 lls->sub_index = lov_comp_index(index, i);
181 /* initialize sub lock */
182 result = lov_sublock_init(env, lock, lls);
186 lls->sub_initialized = 1;
190 LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
193 for (i = 0; i < nr; ++i) {
194 if (!lovlck->lls_sub[i].sub_initialized)
197 cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
200 OBD_FREE_LARGE(lovlck,
201 offsetof(struct lov_lock, lls_sub[nr]));
202 lovlck = ERR_PTR(result);
208 static void lov_lock_fini(const struct lu_env *env,
209 struct cl_lock_slice *slice)
211 struct lov_lock *lovlck;
215 lovlck = cl2lov_lock(slice);
216 for (i = 0; i < lovlck->lls_nr; ++i) {
217 LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
218 if (lovlck->lls_sub[i].sub_initialized)
219 cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
221 OBD_FREE_LARGE(lovlck,
222 offsetof(struct lov_lock, lls_sub[lovlck->lls_nr]));
227 * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
228 * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
229 * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
230 * state machines in the face of sub-locks sharing (by multiple top-locks),
231 * and concurrent sub-lock cancellations.
233 static int lov_lock_enqueue(const struct lu_env *env,
234 const struct cl_lock_slice *slice,
235 struct cl_io *io, struct cl_sync_io *anchor)
237 struct cl_lock *lock = slice->cls_lock;
238 struct lov_lock *lovlck = cl2lov_lock(slice);
244 for (i = 0; i < lovlck->lls_nr; ++i) {
245 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
246 struct lov_sublock_env *subenv;
248 subenv = lov_sublock_env_get(env, lock, lls);
249 if (IS_ERR(subenv)) {
250 rc = PTR_ERR(subenv);
254 rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
255 &lls->sub_lock, anchor);
259 lls->sub_is_enqueued = 1;
264 static void lov_lock_cancel(const struct lu_env *env,
265 const struct cl_lock_slice *slice)
267 struct cl_lock *lock = slice->cls_lock;
268 struct lov_lock *lovlck = cl2lov_lock(slice);
273 for (i = 0; i < lovlck->lls_nr; ++i) {
274 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
275 struct cl_lock *sublock = &lls->sub_lock;
276 struct lov_sublock_env *subenv;
278 if (!lls->sub_is_enqueued)
281 lls->sub_is_enqueued = 0;
282 subenv = lov_sublock_env_get(env, lock, lls);
283 if (!IS_ERR(subenv)) {
284 cl_lock_cancel(subenv->lse_env, sublock);
286 CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
287 "lov_lock_cancel fails with %ld.\n",
293 static int lov_lock_print(const struct lu_env *env, void *cookie,
294 lu_printer_t p, const struct cl_lock_slice *slice)
296 struct lov_lock *lck = cl2lov_lock(slice);
299 (*p)(env, cookie, "%d\n", lck->lls_nr);
300 for (i = 0; i < lck->lls_nr; ++i) {
301 struct lov_lock_sub *sub;
303 sub = &lck->lls_sub[i];
304 (*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued);
305 cl_lock_print(env, cookie, p, &sub->sub_lock);
310 static const struct cl_lock_operations lov_lock_ops = {
311 .clo_fini = lov_lock_fini,
312 .clo_enqueue = lov_lock_enqueue,
313 .clo_cancel = lov_lock_cancel,
314 .clo_print = lov_lock_print
317 int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj,
318 struct cl_lock *lock, const struct cl_io *io)
320 struct lov_lock *lck;
324 lck = lov_lock_sub_init(env, obj, lock);
326 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
328 result = PTR_ERR(lck);
332 static void lov_empty_lock_fini(const struct lu_env *env,
333 struct cl_lock_slice *slice)
335 struct lov_lock *lck = cl2lov_lock(slice);
337 OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
340 static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
341 lu_printer_t p, const struct cl_lock_slice *slice)
343 (*p)(env, cookie, "empty\n");
347 /* XXX: more methods will be added later. */
348 static const struct cl_lock_operations lov_empty_lock_ops = {
349 .clo_fini = lov_empty_lock_fini,
350 .clo_print = lov_empty_lock_print
353 int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
354 struct cl_lock *lock, const struct cl_io *io)
356 struct lov_lock *lck;
357 int result = -ENOMEM;
360 OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
362 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);