*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
const struct cl_lock *parent,
- struct lov_lock_sub *lls)
+ struct lov_lock_sub *lls)
{
- struct lov_sublock_env *subenv;
- struct lov_io *lio = lov_env_io(env);
- struct cl_io *io = lio->lis_cl.cis_io;
- struct lov_io_sub *sub;
-
- subenv = &lov_env_session(env)->ls_subenv;
-
- /*
- * FIXME: We tend to use the subio's env & io to call the sublock
- * lock operations because osc lock sometimes stores some control
- * variables in thread's IO infomation(Now only lockless information).
- * However, if the lock's host(object) is different from the object
- * for current IO, we have no way to get the subenv and subio because
- * they are not initialized at all. As a temp fix, in this case,
- * we still borrow the parent's env to call sublock operations.
- */
- if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
- subenv->lse_env = env;
- subenv->lse_io = io;
- subenv->lse_sub = NULL;
- } else {
- sub = lov_sub_get(env, lio, lls->sub_stripe);
- if (!IS_ERR(sub)) {
- subenv->lse_env = sub->sub_env;
- subenv->lse_io = sub->sub_io;
- subenv->lse_sub = sub;
- } else {
- subenv = (void*)sub;
- }
- }
- return subenv;
-}
-
-static void lov_sublock_env_put(struct lov_sublock_env *subenv)
-{
- if (subenv && subenv->lse_sub)
- lov_sub_put(subenv->lse_sub);
+ struct lov_sublock_env *subenv;
+ struct lov_io *lio = lov_env_io(env);
+ struct cl_io *io = lio->lis_cl.cis_io;
+ struct lov_io_sub *sub;
+
+ subenv = &lov_env_session(env)->ls_subenv;
+
+ /*
+ * FIXME: We tend to use the subio's env & io to call the sublock
+ * lock operations because osc lock sometimes stores some control
+ * variables in thread's IO infomation(Now only lockless information).
+ * However, if the lock's host(object) is different from the object
+ * for current IO, we have no way to get the subenv and subio because
+ * they are not initialized at all. As a temp fix, in this case,
+ * we still borrow the parent's env to call sublock operations.
+ */
+ if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
+ subenv->lse_env = env;
+ subenv->lse_io = io;
+ } else {
+ sub = lov_sub_get(env, lio, lls->sub_index);
+ if (!IS_ERR(sub)) {
+ subenv->lse_env = sub->sub_env;
+ subenv->lse_io = &sub->sub_io;
+ } else {
+ subenv = (void *)sub;
+ }
+ }
+ return subenv;
}
static int lov_sublock_init(const struct lu_env *env,
{
struct lov_sublock_env *subenv;
int result;
+
ENTRY;
subenv = lov_sublock_env_get(env, parent, lls);
if (!IS_ERR(subenv)) {
result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
subenv->lse_io);
- lov_sublock_env_put(subenv);
} else {
/* error occurs. */
result = PTR_ERR(subenv);
* through already created sub-locks (possibly shared with other top-locks).
*/
static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
+ const struct cl_io *io,
const struct cl_object *obj,
struct cl_lock *lock)
{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_lock *lovlck;
+ struct lu_extent ext;
+ loff_t start;
+ loff_t end;
int result = 0;
int i;
+ int index;
int nr;
- loff_t start;
- loff_t end;
- loff_t file_start;
- loff_t file_end;
-
- struct lov_object *loo = cl2lov(obj);
- struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct lov_lock *lovlck;
ENTRY;
- file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start);
- file_end = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1;
-
- for (i = 0, nr = 0; i < r0->lo_nr; i++) {
- /*
- * XXX for wide striping smarter algorithm is desirable,
- * breaking out of the loop, early.
- */
- if (likely(r0->lo_sub[i] != NULL) && /* spare layout */
- lov_stripe_intersects(loo->lo_lsm, i,
- file_start, file_end, &start, &end))
- nr++;
+ ext.e_start = cl_offset(obj, lock->cll_descr.cld_start);
+ if (lock->cll_descr.cld_end == CL_PAGE_EOF)
+ ext.e_end = OBD_OBJECT_EOF;
+ else
+ ext.e_end = cl_offset(obj, lock->cll_descr.cld_end + 1);
+
+ nr = 0;
+ lov_foreach_io_layout(index, lov_env_io(env), &ext) {
+ struct lov_layout_raid0 *r0 = lov_r0(lov, index);
+
+ for (i = 0; i < r0->lo_nr; i++) {
+ if (likely(r0->lo_sub[i])) {/* spare layout */
+ if (lov_stripe_intersects(lov->lo_lsm, index, i,
+ &ext, &start, &end))
+ nr++;
+ else if (cl_io_is_trunc(io) &&
+ r0->lo_trunc_stripeno == i)
+ nr++;
+ }
+ }
}
- LASSERT(nr > 0);
+ /**
+ * Aggressive lock request (from cl_setattr_ost) which asks for
+ * [eof, -1) lock, could come across uninstantiated layout extent,
+ * hence a 0 nr is possible.
+ */
OBD_ALLOC_LARGE(lovlck, offsetof(struct lov_lock, lls_sub[nr]));
- if (lovlck == NULL)
+ if (!lovlck)
RETURN(ERR_PTR(-ENOMEM));
lovlck->lls_nr = nr;
- for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
- if (likely(r0->lo_sub[i] != NULL) &&
- lov_stripe_intersects(loo->lo_lsm, i,
- file_start, file_end, &start, &end)) {
+ nr = 0;
+ lov_foreach_io_layout(index, lov_env_io(env), &ext) {
+ struct lov_layout_raid0 *r0 = lov_r0(lov, index);
+
+ for (i = 0; i < r0->lo_nr; ++i) {
struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
- struct cl_lock_descr *descr;
+ struct cl_lock_descr *descr = &lls->sub_lock.cll_descr;
+ bool intersect = false;
+
+ if (unlikely(!r0->lo_sub[i]))
+ continue;
+
+ intersect = lov_stripe_intersects(lov->lo_lsm, index, i,
+ &ext, &start, &end);
+ if (intersect)
+ goto init_sublock;
- descr = &lls->sub_lock.cll_descr;
+ if (cl_io_is_trunc(io) && i == r0->lo_trunc_stripeno)
+ goto init_sublock;
+ continue;
+
+init_sublock:
LASSERT(descr->cld_obj == NULL);
descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
descr->cld_start = cl_index(descr->cld_obj, start);
descr->cld_gid = lock->cll_descr.cld_gid;
descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
- lls->sub_stripe = i;
+ lls->sub_index = lov_comp_index(index, i);
/* initialize sub lock */
result = lov_sublock_init(env, lock, lls);
const struct cl_lock_slice *slice,
struct cl_io *io, struct cl_sync_io *anchor)
{
- struct cl_lock *lock = slice->cls_lock;
- struct lov_lock *lovlck = cl2lov_lock(slice);
- int i;
- int rc = 0;
+ struct cl_lock *lock = slice->cls_lock;
+ struct lov_lock *lovlck = cl2lov_lock(slice);
+ int i;
+ int rc = 0;
ENTRY;
rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
&lls->sub_lock, anchor);
- lov_sublock_env_put(subenv);
if (rc != 0)
break;
static void lov_lock_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
- struct cl_lock *lock = slice->cls_lock;
+ struct cl_lock *lock = slice->cls_lock;
struct lov_lock *lovlck = cl2lov_lock(slice);
int i;
ENTRY;
for (i = 0; i < lovlck->lls_nr; ++i) {
- struct lov_lock_sub *lls = &lovlck->lls_sub[i];
- struct cl_lock *sublock = &lls->sub_lock;
- struct lov_sublock_env *subenv;
+ struct lov_lock_sub *lls = &lovlck->lls_sub[i];
+ struct cl_lock *sublock = &lls->sub_lock;
+ struct lov_sublock_env *subenv;
if (!lls->sub_is_enqueued)
continue;
subenv = lov_sublock_env_get(env, lock, lls);
if (!IS_ERR(subenv)) {
cl_lock_cancel(subenv->lse_env, sublock);
- lov_sublock_env_put(subenv);
} else {
CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
"lov_lock_cancel fails with %ld.\n",
}
static int lov_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
+ lu_printer_t p, const struct cl_lock_slice *slice)
{
- struct lov_lock *lck = cl2lov_lock(slice);
- int i;
+ struct lov_lock *lck = cl2lov_lock(slice);
+ int i;
- (*p)(env, cookie, "%d\n", lck->lls_nr);
- for (i = 0; i < lck->lls_nr; ++i) {
- struct lov_lock_sub *sub;
+ (*p)(env, cookie, "%d\n", lck->lls_nr);
+ for (i = 0; i < lck->lls_nr; ++i) {
+ struct lov_lock_sub *sub;
- sub = &lck->lls_sub[i];
+ sub = &lck->lls_sub[i];
(*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued);
cl_lock_print(env, cookie, p, &sub->sub_lock);
- }
- return 0;
+ }
+ return 0;
}
static const struct cl_lock_operations lov_lock_ops = {
- .clo_fini = lov_lock_fini,
- .clo_enqueue = lov_lock_enqueue,
- .clo_cancel = lov_lock_cancel,
- .clo_print = lov_lock_print
+ .clo_fini = lov_lock_fini,
+ .clo_enqueue = lov_lock_enqueue,
+ .clo_cancel = lov_lock_cancel,
+ .clo_print = lov_lock_print
};
-int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io)
{
struct lov_lock *lck;
int result = 0;
ENTRY;
- lck = lov_lock_sub_init(env, obj, lock);
+ lck = lov_lock_sub_init(env, io, obj, lock);
if (!IS_ERR(lck))
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
else
struct cl_lock_slice *slice)
{
struct lov_lock *lck = cl2lov_lock(slice);
+
OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
}
ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
- if (lck != NULL) {
+ if (lck) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
result = 0;
}