* (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
* cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
*
- * Typical cl_lock consists of the two layers:
+ * Typical cl_lock consists of one layer:
*
- * - vvp_lock (vvp specific data), and
* - lov_lock (lov specific data).
*
* lov_lock contains an array of sub-locks. Each of these sub-locks is a
* normal cl_lock: it has a header (struct cl_lock) and a list of layers:
*
- * - lovsub_lock, and
* - osc_lock
*
* Each sub-lock is associated with a cl_object (representing stripe
/**
* Per-layer part of cl_lock
*
- * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
+ * \see lov_lock, osc_lock
*/
struct cl_lock_slice {
struct cl_lock *cls_lock;
/**
*
- * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
+ * \see lov_lock_ops, osc_lock_ops
*/
struct cl_lock_operations {
/** @{ */
* @anchor for resources
* \retval -ve failure
*
- * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
- * \see osc_lock_enqueue()
+ * \see lov_lock_enqueue(), osc_lock_enqueue()
*/
int (*clo_enqueue)(const struct lu_env *env,
const struct cl_lock_slice *slice,
/**
* Destructor. Frees resources and the slice.
*
- * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
- * \see osc_lock_fini()
+ * \see lov_lock_fini(), osc_lock_fini()
*/
void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
/**
lustre-objs += glimpse.o
lustre-objs += lcommon_cl.o
lustre-objs += lcommon_misc.o
-lustre-objs += vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o
+lustre-objs += vvp_dev.o vvp_page.o vvp_io.o vvp_object.o
lustre-objs += range_lock.o
EXTRA_DIST := $(lustre-objs:.o=.c) llite_internal.h rw26.c super25.c
*/
static struct kmem_cache *ll_thread_kmem;
-struct kmem_cache *vvp_lock_kmem;
struct kmem_cache *vvp_object_kmem;
static struct kmem_cache *vvp_session_kmem;
static struct kmem_cache *vvp_thread_kmem;
.ckd_size = sizeof(struct ll_thread_info),
},
{
- .ckd_cache = &vvp_lock_kmem,
- .ckd_name = "vvp_lock_kmem",
- .ckd_size = sizeof(struct vvp_lock),
- },
- {
.ckd_cache = &vvp_object_kmem,
.ckd_name = "vvp_object_kmem",
.ckd_size = sizeof(struct vvp_object),
extern struct lu_context_key vvp_session_key;
extern struct lu_context_key vvp_thread_key;
-extern struct kmem_cache *vvp_lock_kmem;
extern struct kmem_cache *vvp_object_kmem;
struct vvp_thread_info {
struct cl_device *vdv_next;
};
-struct vvp_lock {
- struct cl_lock_slice vlk_cl;
-};
-
static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
{
return &vdv->vdv_cl.cd_lu_dev;
return cl2vvp_page(slice)->vpg_page;
}
-static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
-{
- return container_of(slice, struct vvp_lock, vlk_cl);
-}
-
#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
# define CLOBINVRNT(env, clob, expr) \
do { \
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
-int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index);
struct lu_object *vvp_object_alloc(const struct lu_env *env,
+++ /dev/null
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2014, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_lock for VVP layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <obd_support.h>
-#include "vvp_internal.h"
-
-/*****************************************************************************
- *
- * Vvp lock functions.
- *
- */
-
-static void vvp_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
-{
- struct vvp_lock *vlk = cl2vvp_lock(slice);
-
- OBD_SLAB_FREE_PTR(vlk, vvp_lock_kmem);
-}
-
-static int vvp_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *unused, struct cl_sync_io *anchor)
-{
- CLOBINVRNT(env, slice->cls_obj, vvp_object_invariant(slice->cls_obj));
-
- return 0;
-}
-
-static const struct cl_lock_operations vvp_lock_ops = {
- .clo_fini = vvp_lock_fini,
- .clo_enqueue = vvp_lock_enqueue,
-};
-
-int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *unused)
-{
- struct vvp_lock *vlk;
- int result;
-
- CLOBINVRNT(env, obj, vvp_object_invariant(obj));
-
- OBD_SLAB_ALLOC_PTR_GFP(vlk, vvp_lock_kmem, GFP_NOFS);
- if (vlk != NULL) {
- cl_lock_slice_add(lock, &vlk->vlk_cl, obj, &vvp_lock_ops);
- result = 0;
- } else {
- result = -ENOMEM;
- }
-
- return result;
-}
static const struct cl_object_operations vvp_ops = {
.coo_page_init = vvp_page_init,
- .coo_lock_init = vvp_lock_init,
.coo_io_init = vvp_io_init,
.coo_attr_get = vvp_attr_get,
.coo_attr_update = vvp_attr_update,
lov_pool.o \
lov_request.o \
lovsub_dev.o \
- lovsub_lock.o \
lovsub_object.o \
lovsub_page.o \
lproc_lov.o
struct lovsub_device;
struct lovsub_object;
-struct lovsub_lock;
enum lov_device_flags {
LOV_DEV_INITIALIZED = 1 << 0
};
/**
- * Lock state at lovsub layer.
- */
-struct lovsub_lock {
- struct cl_lock_slice lss_cl;
-};
-
-/**
* Describe the environment settings for sublocks.
*/
struct lov_sublock_env {
extern struct kmem_cache *lov_thread_kmem;
extern struct kmem_cache *lov_session_kmem;
-extern struct kmem_cache *lovsub_lock_kmem;
extern struct kmem_cache *lovsub_object_kmem;
int lov_object_init (const struct lu_env *env, struct lu_object *obj,
struct cl_lock *lock, const struct cl_io *io);
int lov_io_init (const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
-int lovsub_lock_init (const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
return container_of0(obj, struct lovsub_object, lso_cl.co_lu);
}
-static inline struct lovsub_lock *
-cl2lovsub_lock(const struct cl_lock_slice *slice)
-{
- LINVRNT(lovsub_is_object(&slice->cls_obj->co_lu));
- return container_of(slice, struct lovsub_lock, lss_cl);
-}
-
-static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
-
- slice = cl_lock_at(lock, &lovsub_device_type);
- LASSERT(slice != NULL);
- return cl2lovsub_lock(slice);
-}
-
static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice)
{
LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
struct kmem_cache *lov_thread_kmem;
struct kmem_cache *lov_session_kmem;
-struct kmem_cache *lovsub_lock_kmem;
struct kmem_cache *lovsub_object_kmem;
struct lu_kmem_descr lov_caches[] = {
.ckd_size = sizeof (struct lov_session)
},
{
- .ckd_cache = &lovsub_lock_kmem,
- .ckd_name = "lovsub_lock_kmem",
- .ckd_size = sizeof (struct lovsub_lock)
- },
- {
.ckd_cache = &lovsub_object_kmem,
.ckd_name = "lovsub_object_kmem",
.ckd_size = sizeof (struct lovsub_object)
+++ /dev/null
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2016, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Implementation of cl_lock for LOVSUB layer.
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LOV
-
-#include "lov_cl_internal.h"
-
-/** \addtogroup lov
- * @{
- */
-
-/*****************************************************************************
- *
- * Lovsub lock operations.
- *
- */
-
-static void lovsub_lock_fini(const struct lu_env *env,
- struct cl_lock_slice *slice)
-{
- struct lovsub_lock *lsl;
-
- ENTRY;
- lsl = cl2lovsub_lock(slice);
- OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
- EXIT;
-}
-
-static const struct cl_lock_operations lovsub_lock_ops = {
- .clo_fini = lovsub_lock_fini,
-};
-
-int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
-{
- struct lovsub_lock *lsk;
- int result;
-
- ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, GFP_NOFS);
- if (lsk != NULL) {
- cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
- result = 0;
- } else
- result = -ENOMEM;
- RETURN(result);
-}
-
-/** @} lov */
below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
if (below != NULL) {
lu_object_add(obj, below);
- cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
+ cl_object_page_init(lu2cl(obj), 0);
result = 0;
} else
result = -ENOMEM;
}
static const struct cl_object_operations lovsub_ops = {
- .coo_page_init = lovsub_page_init,
- .coo_lock_init = lovsub_lock_init,
.coo_attr_update = lovsub_attr_update,
.coo_glimpse = lovsub_object_glimpse,
.coo_req_attr_set = lovsub_req_attr_set
INIT_LIST_HEAD(&lock->cll_layers);
list_for_each_entry(scan, &obj->co_lu.lo_header->loh_layers,
co_lu.lo_linkage) {
- result = scan->co_ops->coo_lock_init(env, scan, lock, io);
+ if (scan->co_ops->coo_lock_init != NULL)
+ result = scan->co_ops->coo_lock_init(env, scan, lock,
+ io);
+
if (result != 0) {
cl_lock_fini(env, lock);
break;
int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
struct cl_lock *lock, struct cl_sync_io *anchor)
{
- const struct cl_lock_slice *slice;
- int rc = -ENOSYS;
+ const struct cl_lock_slice *slice;
+ int rc = 0;
ENTRY;