4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Implementation of cl_lock for LOV layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
37 #define DEBUG_SUBSYSTEM S_LOV
39 #include "lov_cl_internal.h"
45 /*****************************************************************************
47 * Lov lock operations.
51 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
52 const struct cl_lock *parent,
53 struct lov_lock_sub *lls)
55 struct lov_sublock_env *subenv;
56 struct lov_io *lio = lov_env_io(env);
57 struct cl_io *io = lio->lis_cl.cis_io;
58 struct lov_io_sub *sub;
60 subenv = &lov_env_session(env)->ls_subenv;
63 * FIXME: We tend to use the subio's env & io to call the sublock
64 * lock operations because osc lock sometimes stores some control
65 * variables in thread's IO infomation(Now only lockless information).
66 * However, if the lock's host(object) is different from the object
67 * for current IO, we have no way to get the subenv and subio because
68 * they are not initialized at all. As a temp fix, in this case,
69 * we still borrow the parent's env to call sublock operations.
71 if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
72 subenv->lse_env = env;
75 sub = lov_sub_get(env, lio, lls->sub_index);
77 subenv->lse_env = sub->sub_env;
78 subenv->lse_io = &sub->sub_io;
86 static int lov_sublock_init(const struct lu_env *env,
87 const struct cl_lock *parent,
88 struct lov_lock_sub *lls)
90 struct lov_sublock_env *subenv;
95 subenv = lov_sublock_env_get(env, parent, lls);
96 if (!IS_ERR(subenv)) {
97 result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
101 result = PTR_ERR(subenv);
107 * Creates sub-locks for a given lov_lock for the first time.
109 * Goes through all sub-objects of top-object, and creates sub-locks on every
110 * sub-object intersecting with top-lock extent. This is complicated by the
111 * fact that top-lock (that is being created) can be accessed concurrently
112 * through already created sub-locks (possibly shared with other top-locks).
114 static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
115 const struct cl_io *io,
116 const struct cl_object *obj,
117 struct cl_lock *lock)
119 struct lov_object *lov = cl2lov(obj);
120 struct lov_lock *lovlck;
121 struct lu_extent ext;
131 ext.e_start = cl_offset(obj, lock->cll_descr.cld_start);
132 if (lock->cll_descr.cld_end == CL_PAGE_EOF)
133 ext.e_end = OBD_OBJECT_EOF;
135 ext.e_end = cl_offset(obj, lock->cll_descr.cld_end + 1);
138 lov_foreach_io_layout(index, lov_env_io(env), &ext) {
139 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
141 for (i = 0; i < r0->lo_nr; i++) {
142 if (likely(r0->lo_sub[i])) {/* spare layout */
143 if (lov_stripe_intersects(lov->lo_lsm, index, i,
146 else if (cl_io_is_trunc(io) &&
147 r0->lo_trunc_stripeno == i)
153 * Aggressive lock request (from cl_setattr_ost) which asks for
154 * [eof, -1) lock, could come across uninstantiated layout extent,
155 * hence a 0 nr is possible.
158 OBD_ALLOC_LARGE(lovlck, offsetof(struct lov_lock, lls_sub[nr]));
160 RETURN(ERR_PTR(-ENOMEM));
164 lov_foreach_io_layout(index, lov_env_io(env), &ext) {
165 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
167 for (i = 0; i < r0->lo_nr; ++i) {
168 struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
169 struct cl_lock_descr *descr = &lls->sub_lock.cll_descr;
170 bool intersect = false;
172 if (unlikely(!r0->lo_sub[i]))
175 intersect = lov_stripe_intersects(lov->lo_lsm, index, i,
180 if (cl_io_is_trunc(io) && i == r0->lo_trunc_stripeno)
186 LASSERT(descr->cld_obj == NULL);
187 descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
188 descr->cld_start = cl_index(descr->cld_obj, start);
189 descr->cld_end = cl_index(descr->cld_obj, end);
190 descr->cld_mode = lock->cll_descr.cld_mode;
191 descr->cld_gid = lock->cll_descr.cld_gid;
192 descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
194 lls->sub_index = lov_comp_index(index, i);
196 /* initialize sub lock */
197 result = lov_sublock_init(env, lock, lls);
201 lls->sub_initialized = 1;
205 LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
208 for (i = 0; i < nr; ++i) {
209 if (!lovlck->lls_sub[i].sub_initialized)
212 cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
215 OBD_FREE_LARGE(lovlck,
216 offsetof(struct lov_lock, lls_sub[nr]));
217 lovlck = ERR_PTR(result);
223 static void lov_lock_fini(const struct lu_env *env,
224 struct cl_lock_slice *slice)
226 struct lov_lock *lovlck;
230 lovlck = cl2lov_lock(slice);
231 for (i = 0; i < lovlck->lls_nr; ++i) {
232 LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
233 if (lovlck->lls_sub[i].sub_initialized)
234 cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
236 OBD_FREE_LARGE(lovlck,
237 offsetof(struct lov_lock, lls_sub[lovlck->lls_nr]));
242 * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
243 * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
244 * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
245 * state machines in the face of sub-locks sharing (by multiple top-locks),
246 * and concurrent sub-lock cancellations.
248 static int lov_lock_enqueue(const struct lu_env *env,
249 const struct cl_lock_slice *slice,
250 struct cl_io *io, struct cl_sync_io *anchor)
252 struct cl_lock *lock = slice->cls_lock;
253 struct lov_lock *lovlck = cl2lov_lock(slice);
259 for (i = 0; i < lovlck->lls_nr; ++i) {
260 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
261 struct lov_sublock_env *subenv;
263 subenv = lov_sublock_env_get(env, lock, lls);
264 if (IS_ERR(subenv)) {
265 rc = PTR_ERR(subenv);
269 rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
270 &lls->sub_lock, anchor);
274 lls->sub_is_enqueued = 1;
279 static void lov_lock_cancel(const struct lu_env *env,
280 const struct cl_lock_slice *slice)
282 struct cl_lock *lock = slice->cls_lock;
283 struct lov_lock *lovlck = cl2lov_lock(slice);
288 for (i = 0; i < lovlck->lls_nr; ++i) {
289 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
290 struct cl_lock *sublock = &lls->sub_lock;
291 struct lov_sublock_env *subenv;
293 if (!lls->sub_is_enqueued)
296 lls->sub_is_enqueued = 0;
297 subenv = lov_sublock_env_get(env, lock, lls);
298 if (!IS_ERR(subenv)) {
299 cl_lock_cancel(subenv->lse_env, sublock);
301 CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
302 "lov_lock_cancel fails with %ld.\n",
308 static int lov_lock_print(const struct lu_env *env, void *cookie,
309 lu_printer_t p, const struct cl_lock_slice *slice)
311 struct lov_lock *lck = cl2lov_lock(slice);
314 (*p)(env, cookie, "%d\n", lck->lls_nr);
315 for (i = 0; i < lck->lls_nr; ++i) {
316 struct lov_lock_sub *sub;
318 sub = &lck->lls_sub[i];
319 (*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued);
320 cl_lock_print(env, cookie, p, &sub->sub_lock);
325 static const struct cl_lock_operations lov_lock_ops = {
326 .clo_fini = lov_lock_fini,
327 .clo_enqueue = lov_lock_enqueue,
328 .clo_cancel = lov_lock_cancel,
329 .clo_print = lov_lock_print
332 int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj,
333 struct cl_lock *lock, const struct cl_io *io)
335 struct lov_lock *lck;
339 lck = lov_lock_sub_init(env, io, obj, lock);
341 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
343 result = PTR_ERR(lck);
347 static void lov_empty_lock_fini(const struct lu_env *env,
348 struct cl_lock_slice *slice)
350 struct lov_lock *lck = cl2lov_lock(slice);
352 OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
355 static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
356 lu_printer_t p, const struct cl_lock_slice *slice)
358 (*p)(env, cookie, "empty\n");
362 /* XXX: more methods will be added later. */
363 static const struct cl_lock_operations lov_empty_lock_ops = {
364 .clo_fini = lov_empty_lock_fini,
365 .clo_print = lov_empty_lock_print
368 int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
369 struct cl_lock *lock, const struct cl_io *io)
371 struct lov_lock *lck;
372 int result = -ENOMEM;
375 OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
377 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);