4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * Implementation of cl_lock for LOV layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
36 #define DEBUG_SUBSYSTEM S_LOV
38 #include "lov_cl_internal.h"
45 * Lov lock operations.
47 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
48 const struct cl_lock *parent,
49 struct lov_lock_sub *lls)
51 struct lov_sublock_env *subenv;
52 struct lov_io *lio = lov_env_io(env);
53 struct cl_io *io = lio->lis_cl.cis_io;
54 struct lov_io_sub *sub;
56 subenv = &lov_env_session(env)->ls_subenv;
59 * FIXME: We tend to use the subio's env & io to call the sublock
60 * lock operations because osc lock sometimes stores some control
61 * variables in thread's IO infomation(Now only lockless information).
62 * However, if the lock's host(object) is different from the object
63 * for current IO, we have no way to get the subenv and subio because
64 * they are not initialized at all. As a temp fix, in this case,
65 * we still borrow the parent's env to call sublock operations.
67 if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
68 subenv->lse_env = env;
71 sub = lov_sub_get(env, lio, lls->sub_index);
73 subenv->lse_env = sub->sub_env;
74 subenv->lse_io = &sub->sub_io;
82 static int lov_sublock_init(const struct lu_env *env,
83 const struct cl_lock *parent,
84 struct lov_lock_sub *lls)
86 struct lov_sublock_env *subenv;
91 subenv = lov_sublock_env_get(env, parent, lls);
92 if (!IS_ERR(subenv)) {
93 result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
97 result = PTR_ERR(subenv);
103 * Creates sub-locks for a given lov_lock for the first time.
105 * Goes through all sub-objects of top-object, and creates sub-locks on every
106 * sub-object intersecting with top-lock extent. This is complicated by the
107 * fact that top-lock (that is being created) can be accessed concurrently
108 * through already created sub-locks (possibly shared with other top-locks).
110 static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
111 const struct cl_io *io,
112 const struct cl_object *obj,
113 struct cl_lock *lock)
115 struct lov_object *lov = cl2lov(obj);
116 struct lov_io *lio = lov_env_io(env);
117 bool is_trunc = cl_io_is_trunc(io);
118 struct lov_lock *lovlck;
119 struct lu_extent ext;
129 LASSERT(ergo(is_trunc, lio->lis_trunc_stripe_index != NULL));
131 ext.e_start = lock->cll_descr.cld_start << PAGE_SHIFT;
132 if (lock->cll_descr.cld_end == CL_PAGE_EOF)
133 ext.e_end = OBD_OBJECT_EOF;
135 ext.e_end = (lock->cll_descr.cld_end + 1) << PAGE_SHIFT;
138 lov_foreach_io_layout(index, lio, &ext) {
139 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
141 for (i = 0; i < r0->lo_nr; i++) {
142 if (likely(r0->lo_sub[i])) {/* spare layout */
143 if (lov_stripe_intersects(lov->lo_lsm, index, i, &ext, &start, &end) ||
144 (is_trunc && i == lio->lis_trunc_stripe_index[index]))
150 * Aggressive lock request (from cl_setattr_ost) which asks for
151 * [eof, -1) lock, could come across uninstantiated layout extent,
152 * hence a 0 nr is possible.
155 OBD_ALLOC_LARGE(lovlck, offsetof(struct lov_lock, lls_sub[nr]));
157 RETURN(ERR_PTR(-ENOMEM));
161 lov_foreach_io_layout(index, lov_env_io(env), &ext) {
162 struct lov_layout_raid0 *r0 = lov_r0(lov, index);
164 for (i = 0; i < r0->lo_nr; ++i) {
165 struct lov_lock_sub *lls;
166 struct cl_lock_descr *descr;
168 if (unlikely(!r0->lo_sub[i]))
171 if (lov_stripe_intersects(lov->lo_lsm, index, i, &ext, &start, &end) ||
172 (is_trunc && i == lio->lis_trunc_stripe_index[index]))
177 LASSERT(nr < lovlck->lls_nr);
178 lls = &lovlck->lls_sub[nr];
179 descr = &lls->sub_lock.cll_descr;
180 LASSERT(descr->cld_obj == NULL);
181 descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
182 descr->cld_start = start >> PAGE_SHIFT;
183 descr->cld_end = end >> PAGE_SHIFT;
184 descr->cld_mode = lock->cll_descr.cld_mode;
185 descr->cld_gid = lock->cll_descr.cld_gid;
186 descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
188 lls->sub_index = lov_comp_index(index, i);
190 /* initialize sub lock */
191 result = lov_sublock_init(env, lock, lls);
195 lls->sub_initialized = 1;
201 LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
204 for (i = 0; i < nr; ++i) {
205 if (!lovlck->lls_sub[i].sub_initialized)
208 cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
211 OBD_FREE_LARGE(lovlck,
212 offsetof(struct lov_lock, lls_sub[nr]));
213 lovlck = ERR_PTR(result);
219 static void lov_lock_fini(const struct lu_env *env,
220 struct cl_lock_slice *slice)
222 struct lov_lock *lovlck;
226 lovlck = cl2lov_lock(slice);
227 for (i = 0; i < lovlck->lls_nr; ++i) {
228 LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
229 if (lovlck->lls_sub[i].sub_initialized)
230 cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
232 OBD_FREE_LARGE(lovlck,
233 offsetof(struct lov_lock, lls_sub[lovlck->lls_nr]));
238 * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
239 * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
240 * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
241 * state machines in the face of sub-locks sharing (by multiple top-locks),
242 * and concurrent sub-lock cancellations.
244 static int lov_lock_enqueue(const struct lu_env *env,
245 const struct cl_lock_slice *slice,
246 struct cl_io *io, struct cl_sync_io *anchor)
248 struct cl_lock *lock = slice->cls_lock;
249 struct lov_lock *lovlck = cl2lov_lock(slice);
255 for (i = 0; i < lovlck->lls_nr; ++i) {
256 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
257 struct lov_sublock_env *subenv;
259 subenv = lov_sublock_env_get(env, lock, lls);
260 if (IS_ERR(subenv)) {
261 rc = PTR_ERR(subenv);
265 rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
266 &lls->sub_lock, anchor);
270 lls->sub_is_enqueued = 1;
275 static void lov_lock_cancel(const struct lu_env *env,
276 const struct cl_lock_slice *slice)
278 struct cl_lock *lock = slice->cls_lock;
279 struct lov_lock *lovlck = cl2lov_lock(slice);
284 for (i = 0; i < lovlck->lls_nr; ++i) {
285 struct lov_lock_sub *lls = &lovlck->lls_sub[i];
286 struct cl_lock *sublock = &lls->sub_lock;
287 struct lov_sublock_env *subenv;
289 if (!lls->sub_is_enqueued)
292 lls->sub_is_enqueued = 0;
293 subenv = lov_sublock_env_get(env, lock, lls);
294 if (!IS_ERR(subenv)) {
295 cl_lock_cancel(subenv->lse_env, sublock);
297 CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
298 "lov_lock_cancel fails with %ld.\n",
304 static int lov_lock_print(const struct lu_env *env, void *cookie,
305 lu_printer_t p, const struct cl_lock_slice *slice)
307 struct lov_lock *lck = cl2lov_lock(slice);
310 (*p)(env, cookie, "%d\n", lck->lls_nr);
311 for (i = 0; i < lck->lls_nr; ++i) {
312 struct lov_lock_sub *sub;
314 sub = &lck->lls_sub[i];
315 (*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued);
316 cl_lock_print(env, cookie, p, &sub->sub_lock);
321 static const struct cl_lock_operations lov_lock_ops = {
322 .clo_fini = lov_lock_fini,
323 .clo_enqueue = lov_lock_enqueue,
324 .clo_cancel = lov_lock_cancel,
325 .clo_print = lov_lock_print
328 int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj,
329 struct cl_lock *lock, const struct cl_io *io)
331 struct lov_lock *lck;
335 lck = lov_lock_sub_init(env, io, obj, lock);
337 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
339 result = PTR_ERR(lck);
343 static void lov_empty_lock_fini(const struct lu_env *env,
344 struct cl_lock_slice *slice)
346 struct lov_lock *lck = cl2lov_lock(slice);
348 OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
351 static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
352 lu_printer_t p, const struct cl_lock_slice *slice)
354 (*p)(env, cookie, "empty\n");
358 /* XXX: more methods will be added later. */
359 static const struct cl_lock_operations lov_empty_lock_ops = {
360 .clo_fini = lov_empty_lock_fini,
361 .clo_print = lov_empty_lock_print
364 int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
365 struct cl_lock *lock, const struct cl_io *io)
367 struct lov_lock *lck;
368 int result = -ENOMEM;
371 OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
373 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);