# 2.6.12
# ~2.6.12 merge patch from oracle to convert tree_lock from spinlock to rwlock
+# yet tree_lock is converted from rwlock to spin_lock since v2.6.26
AC_DEFUN([LC_RW_TREE_LOCK],
[AC_MSG_CHECKING([if kernel has tree_lock as rwlock])
tmp_flags="$EXTRA_KCFLAGS"
EXTRA_KCFLAGS="$tmp_flags"
])
-# 2.6.15
-
-# LC_INODE_I_MUTEX
-# after 2.6.15 inode have i_mutex intead of i_sem
-AC_DEFUN([LC_INODE_I_MUTEX],
-[AC_MSG_CHECKING([if inode has i_mutex ])
-LB_LINUX_TRY_COMPILE([
- #include <linux/mutex.h>
- #include <linux/fs.h>
- #undef i_mutex
-],[
- struct inode i;
-
- mutex_unlock(&i.i_mutex);
-],[
- AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_INODE_I_MUTEX, 1,
- [after 2.6.15 inode have i_mutex intead of i_sem])
-],[
- AC_MSG_RESULT(no)
-])
-])
-
-# 2.6.16
-
-# LC_SECURITY_PLUG # for SLES10 SP2
-# check security plug in sles10 sp2 kernel
-AC_DEFUN([LC_SECURITY_PLUG],
-[AC_MSG_CHECKING([If kernel has security plug support])
-LB_LINUX_TRY_COMPILE([
- #include <linux/fs.h>
- #include <linux/stddef.h>
-],[
- notify_change(NULL, NULL, NULL);
-],[
- AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_SECURITY_PLUG, 1,
- [SLES10 SP2 use extra parameter in vfs])
-],[
- AC_MSG_RESULT(no)
-])
-])
-
-# 2.6.17
-
-# inode have i_private field since 2.6.17
-AC_DEFUN([LC_INODE_IPRIVATE],
-[AC_MSG_CHECKING([if inode has a i_private field])
-LB_LINUX_TRY_COMPILE([
-#include <linux/fs.h>
-],[
- struct inode i;
- i.i_private = NULL;
-],[
- AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_INODE_IPRIVATE, 1,
- [struct inode has i_private field])
-],[
- AC_MSG_RESULT(no)
-])
-])
-
-# LC_DQUOTOFF_MUTEX
-# after 2.6.17 dquote use mutex instead if semaphore
-AC_DEFUN([LC_DQUOTOFF_MUTEX],
-[AC_MSG_CHECKING([use dqonoff_mutex])
-LB_LINUX_TRY_COMPILE([
- #include <linux/mutex.h>
- #include <linux/fs.h>
- #include <linux/quota.h>
-],[
- struct quota_info dq;
-
- mutex_unlock(&dq.dqonoff_mutex);
-],[
- AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_DQUOTOFF_MUTEX, 1,
- [after 2.6.17 dquote use mutex instead if semaphore])
-],[
- AC_MSG_RESULT(no)
-])
-])
-
# 2.6.18
# LC_NR_PAGECACHE
#
# 2.6.27
#
+
+# LC_SECURITY_PLUG # for SLES10 SP2 (2.6.27)
+# check security plug in sles10 sp2 kernel
+AC_DEFUN([LC_SECURITY_PLUG],
+[AC_MSG_CHECKING([If kernel has security plug support])
+LB_LINUX_TRY_COMPILE([
+ #include <linux/fs.h>
+ #include <linux/stddef.h>
+],[
+ notify_change(NULL, NULL, NULL);
+],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SECURITY_PLUG, 1,
+ [SLES10 SP2 use extra parameter in vfs])
+],[
+ AC_MSG_RESULT(no)
+])
+])
+
AC_DEFUN([LC_PGMKWRITE_USE_VMFAULT],
[AC_MSG_CHECKING([kernel .page_mkwrite uses struct vm_fault *])
tmp_flags="$EXTRA_KCFLAGS"
# 2.6.12
LC_RW_TREE_LOCK
- # 2.6.15
- LC_INODE_I_MUTEX
-
- # 2.6.16
- LC_SECURITY_PLUG # for SLES10 SP2
-
- # 2.6.17
- LC_INODE_IPRIVATE
- LC_DQUOTOFF_MUTEX
-
# 2.6.18
LC_NR_PAGECACHE
LC_STATFS_DENTRY_PARAM
LC_FS_STRUCT_USE_PATH
# 2.6.27
+ LC_SECURITY_PLUG # for SLES10 SP2
LC_PGMKWRITE_USE_VMFAULT
LC_INODE_PERMISION_2ARGS
LC_FILE_REMOVE_SUID
*/
#define ATTR_BLOCKS (1 << 27)
-#if HAVE_INODE_I_MUTEX
-#define UNLOCK_INODE_MUTEX(inode) \
-do {cfs_mutex_unlock(&(inode)->i_mutex); } while(0)
-#define LOCK_INODE_MUTEX(inode) \
-do {cfs_mutex_lock(&(inode)->i_mutex); } while(0)
-#define LOCK_INODE_MUTEX_PARENT(inode) \
-do {cfs_mutex_lock_nested(&(inode)->i_mutex, I_MUTEX_PARENT); } while(0)
-#define TRYLOCK_INODE_MUTEX(inode) cfs_mutex_trylock(&(inode)->i_mutex)
-#else
-#define UNLOCK_INODE_MUTEX(inode) do cfs_up(&(inode)->i_sem); } while(0)
-#define LOCK_INODE_MUTEX(inode) do cfs_down(&(inode)->i_sem); } while(0)
-#define TRYLOCK_INODE_MUTEX(inode) (!down_trylock(&(inode)->i_sem))
-#define LOCK_INODE_MUTEX_PARENT(inode) LOCK_INODE_MUTEX(inode)
-#endif /* HAVE_INODE_I_MUTEX */
-
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
#define d_child d_u.d_child
#define d_rcu d_u.d_rcu
#endif
-#ifdef HAVE_DQUOTOFF_MUTEX
-#define UNLOCK_DQONOFF_MUTEX(dqopt) cfs_mutex_unlock(&(dqopt)->dqonoff_mutex)
-#define LOCK_DQONOFF_MUTEX(dqopt) cfs_mutex_lock(&(dqopt)->dqonoff_mutex)
-#else
-#define UNLOCK_DQONOFF_MUTEX(dqopt) cfs_up(&(dqopt)->dqonoff_sem)
-#define LOCK_DQONOFF_MUTEX(dqopt) cfs_down(&(dqopt)->dqonoff_sem)
-#endif /* HAVE_DQUOTOFF_MUTEX */
-
#define current_ngroups current_cred()->group_info->ngroups
#define current_groups current_cred()->group_info->small_block
#define invalidate_mapping_pages(mapping,s,e) invalidate_inode_pages(mapping)
#endif
-#ifdef HAVE_INODE_IPRIVATE
-#define INODE_PRIVATE_DATA(inode) ((inode)->i_private)
-#else
-#define INODE_PRIVATE_DATA(inode) ((inode)->u.generic_ip)
-#endif
-
#ifndef HAVE_SIMPLE_SETATTR
#define simple_setattr(dentry, ops) inode_setattr((dentry)->d_inode, ops)
#endif
struct dentry *dparent,
int fid_namelen)
{
- struct dentry *dchild;
-
- LOCK_INODE_MUTEX(dparent->d_inode);
- dchild = lookup_one_len(fid_name, dparent, fid_namelen);
- UNLOCK_INODE_MUTEX(dparent->d_inode);
-
- if (IS_ERR(dchild) || dchild->d_inode == NULL)
- return dchild;
-
- if (is_bad_inode(dchild->d_inode)) {
- CERROR("bad inode returned %lu/%u\n",
- dchild->d_inode->i_ino, dchild->d_inode->i_generation);
- dput(dchild);
- dchild = ERR_PTR(-ENOENT);
- }
- return dchild;
+ struct dentry *dchild;
+
+ mutex_lock(&dparent->d_inode->i_mutex);
+ dchild = lookup_one_len(fid_name, dparent, fid_namelen);
+ mutex_unlock(&dparent->d_inode->i_mutex);
+
+ if (IS_ERR(dchild) || dchild->d_inode == NULL)
+ return dchild;
+
+ if (is_bad_inode(dchild->d_inode)) {
+ CERROR("bad inode returned %lu/%u\n",
+ dchild->d_inode->i_ino, dchild->d_inode->i_generation);
+ dput(dchild);
+ dchild = ERR_PTR(-ENOENT);
+ }
+ return dchild;
}
static inline void ll_sleep(int t)
if (op_data == NULL)
RETURN(-ENOMEM);
- if (!S_ISDIR(inode->i_mode)) {
- if (ia_valid & ATTR_SIZE)
- UP_WRITE_I_ALLOC_SEM(inode);
- UNLOCK_INODE_MUTEX(inode);
- cfs_down_write(&lli->lli_trunc_sem);
- LOCK_INODE_MUTEX(inode);
- if (ia_valid & ATTR_SIZE)
- DOWN_WRITE_I_ALLOC_SEM(inode);
- }
+ if (!S_ISDIR(inode->i_mode)) {
+ if (ia_valid & ATTR_SIZE)
+ UP_WRITE_I_ALLOC_SEM(inode);
+ mutex_unlock(&inode->i_mutex);
+ cfs_down_write(&lli->lli_trunc_sem);
+ mutex_lock(&inode->i_mutex);
+ if (ia_valid & ATTR_SIZE)
+ DOWN_WRITE_I_ALLOC_SEM(inode);
+ }
/* We need a steady stripe configuration for setattr to avoid
* confusion. */
pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
pvec->ldp_nr = page_count;
- /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
- * write those pages into OST. Even worse case is that more pages
- * would be asked to write out to swap space, and then finally get here
- * again.
- * Unfortunately this is NOT easy to fix.
- * Thoughts on solution:
- * 0. Define a reserved pool for cl_pages, which could be a list of
- * pre-allocated cl_pages from cl_page_kmem;
- * 1. Define a new operation in cl_object_operations{}, says clo_depth,
- * which measures how many layers for this lustre object. Generally
- * speaking, the depth would be 2, one for llite, and one for lovsub.
- * However, for SNS, there will be more since we need additional page
- * to store parity;
- * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
- * pool. Afterwards, the clio would allocate the pages from reserved
- * pool, this guarantees we neeedn't allocate the cl_pages from
- * generic cl_page slab cache.
- * Of course, if there is NOT enough pages in the pool, we might
- * be asked to write less pages once, this purely depends on
- * implementation. Anyway, we should be careful to avoid deadlocking.
- */
- LOCK_INODE_MUTEX(inode);
- bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
- UNLOCK_INODE_MUTEX(inode);
- cl_io_fini(env, io);
- return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
+ /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
+ * write those pages into OST. Even worse case is that more pages
+ * would be asked to write out to swap space, and then finally get here
+ * again.
+ * Unfortunately this is NOT easy to fix.
+ * Thoughts on solution:
+ * 0. Define a reserved pool for cl_pages, which could be a list of
+ * pre-allocated cl_pages from cl_page_kmem;
+ * 1. Define a new operation in cl_object_operations{}, says clo_depth,
+ * which measures how many layers for this lustre object. Generally
+ * speaking, the depth would be 2, one for llite, and one for lovsub.
+ * However, for SNS, there will be more since we need additional page
+ * to store parity;
+ * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
+ * pool. Afterwards, the clio would allocate the pages from reserved
+ * pool, this guarantees we neeedn't allocate the cl_pages from
+ * generic cl_page slab cache.
+ * Of course, if there is NOT enough pages in the pool, we might
+ * be asked to write less pages once, this purely depends on
+ * implementation. Anyway, we should be careful to avoid deadlocking.
+ */
+ mutex_lock(&inode->i_mutex);
+ bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
+ mutex_unlock(&inode->i_mutex);
+ cl_io_fini(env, io);
+ return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
}
/*
struct inode *inode = vmpage->mapping->host;
loff_t pos;
- if (TRYLOCK_INODE_MUTEX(inode)) {
- UNLOCK_INODE_MUTEX(inode);
+ if (mutex_trylock(&inode->i_mutex)) {
+ mutex_unlock(&(inode)->i_mutex);
/* this is too bad. Someone is trying to write the
* page w/o holding inode mutex. This means we can
io = ccc_env_io(env)->cui_cl.cis_io;
LASSERT(io != NULL);
- /* 0. Need locking between buffered and direct access. and race with
- * size changing by concurrent truncates and writes.
- * 1. Need inode sem to operate transient pages. */
- if (rw == READ)
- LOCK_INODE_MUTEX(inode);
+ /* 0. Need locking between buffered and direct access. and race with
+ * size changing by concurrent truncates and writes.
+ * 1. Need inode mutex to operate transient pages.
+ */
+ if (rw == READ)
+ mutex_lock(&inode->i_mutex);
LASSERT(obj->cob_transient_pages == 0);
for (seg = 0; seg < nr_segs; seg++) {
}
}
out:
- LASSERT(obj->cob_transient_pages == 0);
- if (rw == READ)
- UNLOCK_INODE_MUTEX(inode);
+ LASSERT(obj->cob_transient_pages == 0);
+ if (rw == READ)
+ mutex_unlock(&inode->i_mutex);
if (tot_bytes > 0) {
if (rw == WRITE) {
}
static int vvp_io_setattr_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
- struct inode *inode = ccc_object_inode(ios->cis_obj);
+ struct ccc_io *cio = ccc_env_io(env);
+ struct inode *inode = ccc_object_inode(ios->cis_obj);
- /*
- * We really need to get our PW lock before we change inode->i_size.
- * If we don't we can race with other i_size updaters on our node,
- * like ll_file_read. We can also race with i_size propogation to
- * other nodes through dirtying and writeback of final cached pages.
- * This last one is especially bad for racing o_append users on other
- * nodes.
- */
- UNLOCK_INODE_MUTEX(inode);
- if (cl_io_is_trunc(ios->cis_io))
- UP_WRITE_I_ALLOC_SEM(inode);
- cio->u.setattr.cui_locks_released = 1;
- return 0;
+ /*
+ * We really need to get our PW lock before we change inode->i_size.
+ * If we don't we can race with other i_size updaters on our node,
+ * like ll_file_read. We can also race with i_size propogation to
+ * other nodes through dirtying and writeback of final cached pages.
+ * This last one is especially bad for racing o_append users on other
+ * nodes.
+ */
+ mutex_unlock(&inode->i_mutex);
+ if (cl_io_is_trunc(ios->cis_io))
+ UP_WRITE_I_ALLOC_SEM(inode);
+ cio->u.setattr.cui_locks_released = 1;
+ return 0;
}
/**
}
static int vvp_io_setattr_start(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = ccc_object_inode(io->ci_obj);
- LASSERT(cio->u.setattr.cui_locks_released);
+ LASSERT(cio->u.setattr.cui_locks_released);
- LOCK_INODE_MUTEX(inode);
- cio->u.setattr.cui_locks_released = 0;
+ mutex_lock(&inode->i_mutex);
+ cio->u.setattr.cui_locks_released = 0;
- if (cl_io_is_trunc(io))
- return vvp_io_setattr_trunc(env, ios, inode,
- io->u.ci_setattr.sa_attr.lvb_size);
- else
- return vvp_io_setattr_time(env, ios);
+ if (cl_io_is_trunc(io))
+ return vvp_io_setattr_trunc(env, ios, inode,
+ io->u.ci_setattr.sa_attr.lvb_size);
+ else
+ return vvp_io_setattr_time(env, ios);
}
static void vvp_io_setattr_end(const struct lu_env *env,
}
static void vvp_io_setattr_fini(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(ios->cis_io->ci_obj);
-
- if (cio->u.setattr.cui_locks_released) {
- LOCK_INODE_MUTEX(inode);
- if (cl_io_is_trunc(io))
- DOWN_WRITE_I_ALLOC_SEM(inode);
- cio->u.setattr.cui_locks_released = 0;
- }
- vvp_io_fini(env, ios);
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = ccc_object_inode(ios->cis_io->ci_obj);
+
+ if (cio->u.setattr.cui_locks_released) {
+ mutex_lock(&inode->i_mutex);
+ if (cl_io_is_trunc(io))
+ DOWN_WRITE_I_ALLOC_SEM(inode);
+ cio->u.setattr.cui_locks_released = 0;
+ }
+ vvp_io_fini(env, ios);
}
#ifdef HAVE_FILE_READV
static void vvp_transient_page_verify(const struct cl_page *page)
{
- struct inode *inode = ccc_object_inode(page->cp_obj);
+ struct inode *inode = ccc_object_inode(page->cp_obj);
- LASSERT(!TRYLOCK_INODE_MUTEX(inode));
- /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
+ LASSERT(!mutex_trylock(&inode->i_mutex));
+ /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
}
static int vvp_transient_page_own(const struct lu_env *env,
}
static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
- const struct cl_page_slice *slice)
+ const struct cl_page_slice *slice)
{
- struct inode *inode = ccc_object_inode(slice->cpl_obj);
- int locked;
+ struct inode *inode = ccc_object_inode(slice->cpl_obj);
+ int locked;
- locked = !TRYLOCK_INODE_MUTEX(inode);
- if (!locked)
- UNLOCK_INODE_MUTEX(inode);
- return locked ? -EBUSY : -ENODATA;
+ locked = !mutex_trylock(&inode->i_mutex);
+ if (!locked)
+ mutex_unlock(&inode->i_mutex);
+ return locked ? -EBUSY : -ENODATA;
}
static void
}
static void vvp_transient_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
+ struct cl_page_slice *slice)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *clp = slice->cpl_page;
- struct ccc_object *clobj = cl2ccc(clp->cp_obj);
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct cl_page *clp = slice->cpl_page;
+ struct ccc_object *clobj = cl2ccc(clp->cp_obj);
- vvp_page_fini_common(cp);
- LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
- clobj->cob_transient_pages--;
+ vvp_page_fini_common(cp);
+ LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
+ clobj->cob_transient_pages--;
}
static const struct cl_page_operations vvp_transient_page_ops = {
};
struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+ struct cl_page *page, cfs_page_t *vmpage)
{
- struct ccc_page *cpg;
- int result;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
- if (cpg != NULL) {
- cpg->cpg_page = vmpage;
- page_cache_get(vmpage);
-
- CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
- if (page->cp_type == CPT_CACHEABLE) {
- SetPagePrivate(vmpage);
- vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
- &vvp_page_ops);
- } else {
- struct ccc_object *clobj = cl2ccc(obj);
-
- LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
- &vvp_transient_page_ops);
- clobj->cob_transient_pages++;
- }
- result = 0;
- } else
- result = -ENOMEM;
- return ERR_PTR(result);
+ struct ccc_page *cpg;
+ int result;
+
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+
+ OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
+ if (cpg != NULL) {
+ cpg->cpg_page = vmpage;
+ page_cache_get(vmpage);
+
+ CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+ if (page->cp_type == CPT_CACHEABLE) {
+ SetPagePrivate(vmpage);
+ vmpage->private = (unsigned long)page;
+ cl_page_slice_add(page, &cpg->cpg_cl, obj,
+ &vvp_page_ops);
+ } else {
+ struct ccc_object *clobj = cl2ccc(obj);
+
+ LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
+ cl_page_slice_add(page, &cpg->cpg_cl, obj,
+ &vvp_transient_page_ops);
+ clobj->cob_transient_pages++;
+ }
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return ERR_PTR(result);
}
}
static int fsfilt_ext3_set_md(struct inode *inode, void *handle,
- void *lmm, int lmm_size, const char *name)
+ void *lmm, int lmm_size, const char *name)
{
- int rc;
-
- LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
+ int rc;
- rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
- name, lmm, lmm_size, XATTR_NO_CTIME);
+ LASSERT(mutex_trylock(&inode->i_mutex) == 0);
+ rc = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_TRUSTED,
+ name, lmm, lmm_size, XATTR_NO_CTIME);
- if (rc && rc != -EROFS)
- CERROR("error adding MD data to inode %lu: rc = %d\n",
- inode->i_ino, rc);
- return rc;
+ if (rc && rc != -EROFS)
+ CERROR("error adding MD data to inode %lu: rc = %d\n",
+ inode->i_ino, rc);
+ return rc;
}
/* Must be called with i_mutex held */
static int fsfilt_ext3_get_md(struct inode *inode, void *lmm, int lmm_size,
- const char *name)
+ const char *name)
{
- int rc;
+ int rc;
- LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
+ LASSERT(mutex_trylock(&inode->i_mutex) == 0);
- rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
- name, lmm, lmm_size);
+ rc = ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED,
+ name, lmm, lmm_size);
- /* This gives us the MD size */
- if (lmm == NULL)
- return (rc == -ENODATA) ? 0 : rc;
+ /* This gives us the MD size */
+ if (lmm == NULL)
+ return (rc == -ENODATA) ? 0 : rc;
- if (rc < 0) {
- CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
- EXT3_XATTR_INDEX_TRUSTED, name,
- inode->i_ino, rc);
- memset(lmm, 0, lmm_size);
- return (rc == -ENODATA) ? 0 : rc;
- }
+ if (rc < 0) {
+ CDEBUG(D_INFO, "error getting EA %d/%s from inode %lu: rc %d\n",
+ EXT3_XATTR_INDEX_TRUSTED, name,
+ inode->i_ino, rc);
+ memset(lmm, 0, lmm_size);
+ return (rc == -ENODATA) ? 0 : rc;
+ }
- return rc;
+ return rc;
}
static int fsfilt_ext3_send_bio(int rw, struct inode *inode, struct bio *bio)
EXPORT_SYMBOL(l_readdir);
int l_notify_change(struct vfsmount *mnt, struct dentry *dchild,
- struct iattr *newattrs)
+ struct iattr *newattrs)
{
- int rc;
+ int rc;
- LOCK_INODE_MUTEX(dchild->d_inode);
+ mutex_lock(&dchild->d_inode->i_mutex);
#ifdef HAVE_SECURITY_PLUG
- rc = notify_change(dchild, mnt, newattrs);
+ rc = notify_change(dchild, mnt, newattrs);
#else
- rc = notify_change(dchild, newattrs);
+ rc = notify_change(dchild, newattrs);
#endif
- UNLOCK_INODE_MUTEX(dchild->d_inode);
- return rc;
+ mutex_unlock(&dchild->d_inode->i_mutex);
+ return rc;
}
EXPORT_SYMBOL(l_notify_change);
char *name = test_quotafile[i];
int namelen = strlen(name);
- /* remove the stale test quotafile */
- LOCK_INODE_MUTEX_PARENT(parent_inode);
- de = lookup_one_len(name, tgt->obd_lvfs_ctxt.pwd, namelen);
- if (!IS_ERR(de) && de->d_inode)
- ll_vfs_unlink(parent_inode, de,
- tgt->obd_lvfs_ctxt.pwdmnt);
- if (!IS_ERR(de))
- dput(de);
- UNLOCK_INODE_MUTEX(parent_inode);
+ /* remove the stale test quotafile */
+ mutex_lock_nested(&parent_inode->i_mutex, I_MUTEX_PARENT);
+ de = lookup_one_len(name, tgt->obd_lvfs_ctxt.pwd, namelen);
+ if (!IS_ERR(de) && de->d_inode)
+ ll_vfs_unlink(parent_inode, de,
+ tgt->obd_lvfs_ctxt.pwdmnt);
+ if (!IS_ERR(de))
+ dput(de);
+ mutex_unlock(&parent_inode->i_mutex);
/* create quota file */
fp = filp_open(name, O_CREAT | O_EXCL, 0644);
/* close quota file */
filp_close(lqi->qi_files[i], 0);
- /* unlink quota file */
- LOCK_INODE_MUTEX_PARENT(parent_inode);
+ /* unlink quota file */
+ mutex_lock_nested(&parent_inode->i_mutex, I_MUTEX_PARENT);
de = lookup_one_len(name, tgt->obd_lvfs_ctxt.pwd, namelen);
if (IS_ERR(de) || de->d_inode == NULL) {
if (rc)
CERROR("error unlink quotafile %s (rc = %d)\n",
name, rc);
- dput:
- if (!IS_ERR(de))
- dput(de);
- UNLOCK_INODE_MUTEX(parent_inode);
+dput:
+ if (!IS_ERR(de))
+ dput(de);
+ mutex_unlock(&parent_inode->i_mutex);
}
pop_ctxt(saved, &tgt->obd_lvfs_ctxt, NULL);
oa->o_generation = filp->f_dentry->d_inode->i_generation;
namelen = ll_fid2str(fidname, oa->o_id, oa->o_generation);
- LOCK_INODE_MUTEX_PARENT(parent_inode);
- new_child = lookup_one_len(fidname, mds->mds_objects_dir, namelen);
+ mutex_lock_nested(&parent_inode->i_mutex, I_MUTEX_PARENT);
+ new_child = lookup_one_len(fidname, mds->mds_objects_dir, namelen);
if (IS_ERR(new_child)) {
CERROR("getting neg dentry for obj rename: %d\n", rc);
out_dput:
dput(new_child);
out_close:
- UNLOCK_INODE_MUTEX(parent_inode);
- err = filp_close(filp, 0);
- if (err) {
- CERROR("closing tmpfile %u: rc %d\n", tmpname, rc);
- if (!rc)
- rc = err;
- }
+ mutex_unlock(&parent_inode->i_mutex);
+ err = filp_close(filp, 0);
+ if (err) {
+ CERROR("closing tmpfile %u: rc %d\n", tmpname, rc);
+ if (!rc)
+ rc = err;
+ }
out_pop:
pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, &ucred);
RETURN(rc);
namelen = ll_fid2str(fidname, oa->o_id, oa->o_generation);
- LOCK_INODE_MUTEX_PARENT(parent_inode);
- de = lookup_one_len(fidname, mds->mds_objects_dir, namelen);
- if (IS_ERR(de)) {
- rc = IS_ERR(de);
- de = NULL;
- CERROR("error looking up object "LPU64" %s: rc %d\n",
- oa->o_id, fidname, rc);
- GOTO(out_dput, rc);
- }
+ mutex_lock_nested(&parent_inode->i_mutex, I_MUTEX_PARENT);
+ de = lookup_one_len(fidname, mds->mds_objects_dir, namelen);
+ if (IS_ERR(de)) {
+ rc = IS_ERR(de);
+ de = NULL;
+ CERROR("error looking up object "LPU64" %s: rc %d\n",
+ oa->o_id, fidname, rc);
+ GOTO(out_dput, rc);
+ }
if (de->d_inode == NULL) {
CERROR("destroying non-existent object "LPU64" %s: rc %d\n",
oa->o_id, fidname, rc);
if (err && !rc)
rc = err;
out_dput:
- if (de != NULL)
- l_dput(de);
- UNLOCK_INODE_MUTEX(parent_inode);
+ if (de != NULL)
+ l_dput(de);
+ mutex_unlock(&parent_inode->i_mutex);
if (inode)
iput(inode);
dget(fdentry);
rc = llog_lvfs_close(handle);
- if (rc == 0) {
- LOCK_INODE_MUTEX_PARENT(inode);
- rc = ll_vfs_unlink(inode, fdentry, mnt);
- UNLOCK_INODE_MUTEX(inode);
- }
- mntput(mnt);
+ if (rc == 0) {
+ mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
+ rc = ll_vfs_unlink(inode, fdentry, mnt);
+ mutex_unlock(&inode->i_mutex);
+ }
+ mntput(mnt);
dput(fdentry);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
static int filter_lock_dentry(struct obd_device *obd, struct dentry *dparent)
{
- LOCK_INODE_MUTEX_PARENT(dparent->d_inode);
- return 0;
+ mutex_lock_nested(&dparent->d_inode->i_mutex, I_MUTEX_PARENT);
+ return 0;
}
/* We never dget the object parent, so DON'T dput it either */
/* We never dget the object parent, so DON'T dput it either */
static void filter_parent_unlock(struct dentry *dparent)
{
- UNLOCK_INODE_MUTEX(dparent->d_inode);
+ mutex_unlock(&dparent->d_inode->i_mutex);
}
/* How to get files, dentries, inodes from object id's.
/* don't need dir->i_zombie for 2.4, it is for rename/unlink of dir
* itself we already hold dir->i_mutex for child create/unlink ops */
- LASSERT(dentry->d_inode != NULL);
- LASSERT(TRYLOCK_INODE_MUTEX(dir) == 0);
- LASSERT(TRYLOCK_INODE_MUTEX(dentry->d_inode) == 0);
+ LASSERT(dentry->d_inode != NULL);
+ LASSERT(mutex_trylock(&dir->i_mutex) == 0);
+ LASSERT(mutex_trylock(&dentry->d_inode->i_mutex) == 0);
/* may_delete() */
rc = dir->i_op->unlink(dir, dentry);
out:
- /* need to drop i_mutex before we lose inode reference */
- UNLOCK_INODE_MUTEX(dentry->d_inode);
- if (rc == 0)
- d_delete(dentry);
+ /* need to drop i_mutex before we lose inode reference */
+ mutex_unlock(&dentry->d_inode->i_mutex);
+ if (rc == 0)
+ d_delete(dentry);
- RETURN(rc);
+ RETURN(rc);
}
/* Caller must hold LCK_PW on parent and push us into kernel context.
if (fcc != NULL)
*fcc = oa->o_lcookie;
}
- if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
- unsigned long now = jiffies;
- /* Filter truncates and writes are serialized by
- * i_alloc_sem, see the comment in
- * filter_preprw_write.*/
- if (ia_valid & ATTR_SIZE)
- down_write(&inode->i_alloc_sem);
- LOCK_INODE_MUTEX(inode);
- fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem and i_mutex");
- old_size = i_size_read(inode);
- }
+ if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
+ unsigned long now = jiffies;
+ /* Filter truncates and writes are serialized by
+ * i_alloc_sem, see the comment in
+ * filter_preprw_write.*/
+ if (ia_valid & ATTR_SIZE)
+ down_write(&inode->i_alloc_sem);
+ mutex_lock(&inode->i_mutex);
+ fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem and i_mutex");
+ old_size = i_size_read(inode);
+ }
/* VBR: version recovery check */
rc = filter_version_get_check(exp, oti, inode);
if (page)
page_cache_release(page);
- if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID))
- UNLOCK_INODE_MUTEX(inode);
- if (ia_valid & ATTR_SIZE)
- up_write(&inode->i_alloc_sem);
- if (fcc)
- OBD_FREE(fcc, sizeof(*fcc));
+ if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID))
+ mutex_unlock(&inode->i_mutex);
+ if (ia_valid & ATTR_SIZE)
+ up_write(&inode->i_alloc_sem);
+ if (fcc)
+ OBD_FREE(fcc, sizeof(*fcc));
/* trigger quota release */
if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
* between page lock, i_mutex & starting new journal handle.
* (see bug 20321) -johann
*/
- now = jiffies;
- down_write(&dchild->d_inode->i_alloc_sem);
- LOCK_INODE_MUTEX(dchild->d_inode);
- fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem and i_mutex");
+ now = jiffies;
+ down_write(&dchild->d_inode->i_alloc_sem);
+ mutex_lock(&dchild->d_inode->i_mutex);
+ fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem and i_mutex");
+
+ /* VBR: version recovery check */
+ rc = filter_version_get_check(exp, oti, dchild->d_inode);
+ if (rc) {
+ mutex_unlock(&dchild->d_inode->i_mutex);
+ up_write(&dchild->d_inode->i_alloc_sem);
+ GOTO(cleanup, rc);
+ }
- /* VBR: version recovery check */
- rc = filter_version_get_check(exp, oti, dchild->d_inode);
- if (rc) {
- UNLOCK_INODE_MUTEX(dchild->d_inode);
- up_write(&dchild->d_inode->i_alloc_sem);
- GOTO(cleanup, rc);
- }
+ handle = fsfilt_start_log(obd, dchild->d_inode, FSFILT_OP_SETATTR,
+ NULL, 1);
+ if (IS_ERR(handle)) {
+ mutex_unlock(&dchild->d_inode->i_mutex);
+ up_write(&dchild->d_inode->i_alloc_sem);
+ GOTO(cleanup, rc = PTR_ERR(handle));
+ }
- handle = fsfilt_start_log(obd, dchild->d_inode, FSFILT_OP_SETATTR,
- NULL, 1);
- if (IS_ERR(handle)) {
- UNLOCK_INODE_MUTEX(dchild->d_inode);
- up_write(&dchild->d_inode->i_alloc_sem);
- GOTO(cleanup, rc = PTR_ERR(handle));
- }
+ /* Locking order: i_mutex -> journal_lock -> dqptr_sem. LU-952 */
+ ll_vfs_dq_init(dchild->d_inode);
- /* Locking order: i_mutex -> journal_lock -> dqptr_sem. LU-952 */
- ll_vfs_dq_init(dchild->d_inode);
-
- iattr.ia_valid = ATTR_SIZE;
- iattr.ia_size = 0;
- rc = fsfilt_setattr(obd, dchild, handle, &iattr, 1);
- rc2 = fsfilt_commit(obd, dchild->d_inode, handle, 0);
- UNLOCK_INODE_MUTEX(dchild->d_inode);
- up_write(&dchild->d_inode->i_alloc_sem);
- if (rc)
- GOTO(cleanup, rc);
- if (rc2)
- GOTO(cleanup, rc = rc2);
+ iattr.ia_valid = ATTR_SIZE;
+ iattr.ia_size = 0;
+ rc = fsfilt_setattr(obd, dchild, handle, &iattr, 1);
+ rc2 = fsfilt_commit(obd, dchild->d_inode, handle, 0);
+ mutex_unlock(&dchild->d_inode->i_mutex);
+ up_write(&dchild->d_inode->i_alloc_sem);
+ if (rc)
+ GOTO(cleanup, rc);
+ if (rc2)
+ GOTO(cleanup, rc = rc2);
/* We don't actually need to lock the parent until we are unlinking
* here, and not while truncating above. That avoids holding the
GOTO(cleanup, rc = PTR_ERR(dparent));
cleanup_phase = 3; /* filter_parent_unlock */
- LOCK_INODE_MUTEX(dchild->d_inode);
- handle = fsfilt_start_log(obd, dparent->d_inode,FSFILT_OP_UNLINK,oti,1);
- if (IS_ERR(handle)) {
- UNLOCK_INODE_MUTEX(dchild->d_inode);
- GOTO(cleanup, rc = PTR_ERR(handle));
- }
- cleanup_phase = 4; /* fsfilt_commit */
+ mutex_lock(&dchild->d_inode->i_mutex);
+ handle = fsfilt_start_log(obd, dparent->d_inode,
+ FSFILT_OP_UNLINK, oti, 1);
+ if (IS_ERR(handle)) {
+ mutex_unlock(&dchild->d_inode->i_mutex);
+ GOTO(cleanup, rc = PTR_ERR(handle));
+ }
+ cleanup_phase = 4; /* fsfilt_commit */
/* Quota release need uid/gid of inode */
obdo_from_inode(oa, dchild->d_inode, OBD_MD_FLUID | OBD_MD_FLGID);
push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
- LOCK_INODE_MUTEX(dentry->d_inode);
+ mutex_lock(&dentry->d_inode->i_mutex);
- rc = filemap_fdatawrite(dentry->d_inode->i_mapping);
- if (rc == 0) {
- /* just any file to grab fsync method - "file" arg unused */
- struct file *file = obt->obt_rcvd_filp;
+ rc = filemap_fdatawrite(dentry->d_inode->i_mapping);
+ if (rc == 0) {
+ /* just any file to grab fsync method - "file" arg unused */
+ struct file *file = obt->obt_rcvd_filp;
- if (file->f_op && file->f_op->fsync)
- rc = file->f_op->fsync(NULL, dentry, 1);
+ if (file->f_op && file->f_op->fsync)
+ rc = file->f_op->fsync(NULL, dentry, 1);
- rc2 = filemap_fdatawait(dentry->d_inode->i_mapping);
- if (!rc)
- rc = rc2;
- }
- UNLOCK_INODE_MUTEX(dentry->d_inode);
+ rc2 = filemap_fdatawait(dentry->d_inode->i_mapping);
+ if (!rc)
+ rc = rc2;
+ }
+ mutex_unlock(&dentry->d_inode->i_mutex);
oinfo->oi_oa->o_valid = OBD_MD_FLID;
obdo_from_inode(oinfo->oi_oa, dentry->d_inode, FILTER_VALID_FLAGS);
oti->oti_handle, attr, 0);
}
- UNLOCK_INODE_MUTEX(inode);
+ mutex_unlock(&inode->i_mutex);
/* Force commit to make the just-deleted blocks
* reusable. LU-456 */
fsfilt_check_slow(obd, now, "quota init");
retry:
- LOCK_INODE_MUTEX(inode);
- fsfilt_check_slow(obd, now, "i_mutex");
- oti->oti_handle = fsfilt_brw_start(obd, objcount, &fso, niocount, res,
- oti);
- if (IS_ERR(oti->oti_handle)) {
- UNLOCK_INODE_MUTEX(inode);
- rc = PTR_ERR(oti->oti_handle);
- CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
- "error starting transaction: rc = %d\n", rc);
- oti->oti_handle = NULL;
- GOTO(cleanup, rc);
- }
- /* have to call fsfilt_commit() from this point on */
+ mutex_lock(&inode->i_mutex);
+ fsfilt_check_slow(obd, now, "i_mutex");
+ oti->oti_handle = fsfilt_brw_start(obd, objcount, &fso, niocount, res,
+ oti);
+ if (IS_ERR(oti->oti_handle)) {
+ mutex_unlock(&inode->i_mutex);
+ rc = PTR_ERR(oti->oti_handle);
+ CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
+ "error starting transaction: rc = %d\n", rc);
+ oti->oti_handle = NULL;
+ GOTO(cleanup, rc);
+ }
+ /* have to call fsfilt_commit() from this point on */
fsfilt_check_slow(obd, now, "brw_start");
struct ost_filterdata *ofd;
ENTRY;
- LOCK_INODE_MUTEX(inode);
- ofd = INODE_PRIVATE_DATA(inode);
-
- if (ofd && ofd->ofd_epoch >= ioepoch) {
- if (ofd->ofd_epoch > ioepoch)
- CERROR("client sent old epoch %d for obj ino %ld\n",
- ioepoch, inode->i_ino);
- UNLOCK_INODE_MUTEX(inode);
- RETURN(0);
- }
-
- if (ofd && ofd->ofd_epoch < ioepoch) {
- ofd->ofd_epoch = ioepoch;
- } else if (!ofd) {
- OBD_ALLOC(ofd, sizeof(*ofd));
- if (!ofd)
- GOTO(out, rc = -ENOMEM);
- igrab(inode);
- INODE_PRIVATE_DATA(inode) = ofd;
- ofd->ofd_epoch = ioepoch;
- }
- /* the decision to write a record is now made, unlock */
- UNLOCK_INODE_MUTEX(inode);
+ mutex_lock(&inode->i_mutex);
+ ofd = inode->i_private;
+
+ if (ofd && ofd->ofd_epoch >= ioepoch) {
+ if (ofd->ofd_epoch > ioepoch)
+ CERROR("client sent old epoch %d for obj ino %ld\n",
+ ioepoch, inode->i_ino);
+ mutex_unlock(&inode->i_mutex);
+ RETURN(0);
+ }
+
+ if (ofd && ofd->ofd_epoch < ioepoch) {
+ ofd->ofd_epoch = ioepoch;
+ } else if (!ofd) {
+ OBD_ALLOC(ofd, sizeof(*ofd));
+ if (!ofd)
+ GOTO(out, rc = -ENOMEM);
+ igrab(inode);
+ inode->i_private = ofd;
+ ofd->ofd_epoch = ioepoch;
+ }
+ /* the decision to write a record is now made, unlock */
+ mutex_unlock(&inode->i_mutex);
OBD_ALLOC(lsc, sizeof(*lsc));
if (lsc == NULL)
child->d_parent = dird;
child->d_inode = NULL;
- LOCK_INODE_MUTEX(dir);
- rc = -ENOENT;
- bh = osd_ldiskfs_find_entry(dir, child, &de, NULL);
- if (bh) {
- rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
- brelse(bh);
- }
- UNLOCK_INODE_MUTEX(dir);
+ mutex_lock(&dir->i_mutex);
+ rc = -ENOENT;
+ bh = osd_ldiskfs_find_entry(dir, child, &de, NULL);
+ if (bh) {
+ rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
+ brelse(bh);
+ }
+ mutex_unlock(&dir->i_mutex);
- RETURN(rc);
+ RETURN(rc);
}
int osd_compat_add_entry(struct osd_thread_info *info, struct osd_device *osd,
child->d_parent = dir;
child->d_inode = inode;
- LOCK_INODE_MUTEX(dir->d_inode);
- rc = osd_ldiskfs_add_entry(oh->ot_handle, child, inode, NULL);
- UNLOCK_INODE_MUTEX(dir->d_inode);
+ mutex_lock(&dir->d_inode->i_mutex);
+ rc = osd_ldiskfs_add_entry(oh->ot_handle, child, inode, NULL);
+ mutex_unlock(&dir->d_inode->i_mutex);
- RETURN(rc);
+ RETURN(rc);
}
int osd_compat_objid_lookup(struct osd_thread_info *info,
/* XXX: we can use rc from sprintf() instead of strlen() */
d_seq->d_name.len = strlen(name);
- dir = d->d_inode;
- LOCK_INODE_MUTEX(dir);
- bh = osd_ldiskfs_find_entry(dir, d_seq, &de, NULL);
- UNLOCK_INODE_MUTEX(dir);
+ dir = d->d_inode;
+ mutex_lock(&dir->i_mutex);
+ bh = osd_ldiskfs_find_entry(dir, d_seq, &de, NULL);
+ mutex_unlock(&dir->i_mutex);
if (bh == NULL)
RETURN(-ENOENT);
static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = osd_oti_get(env);
- struct dentry *dentry = &info->oti_obj_dentry;
- struct file *file = &info->oti_file;
- int rc;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct dentry *dentry = &info->oti_obj_dentry;
+ struct file *file = &info->oti_file;
+ int rc;
- ENTRY;
+ ENTRY;
- dentry->d_inode = inode;
- file->f_dentry = dentry;
- file->f_mapping = inode->i_mapping;
- file->f_op = inode->i_fop;
- LOCK_INODE_MUTEX(inode);
- rc = file->f_op->fsync(file, dentry, 0);
- UNLOCK_INODE_MUTEX(inode);
- RETURN(rc);
+ dentry->d_inode = inode;
+ file->f_dentry = dentry;
+ file->f_mapping = inode->i_mapping;
+ file->f_op = inode->i_fop;
+ mutex_lock(&inode->i_mutex);
+ rc = file->f_op->fsync(file, dentry, 0);
+ mutex_unlock(&inode->i_mutex);
+ RETURN(rc);
}
static int osd_data_get(const struct lu_env *env, struct dt_object *dt,
struct dquot_id *dqid, *tmp;
int ret;
- LOCK_DQONOFF_MUTEX(dqopt);
- if (!ll_sb_has_quota_active(qctxt->lqc_sb, type)) {
- UNLOCK_DQONOFF_MUTEX(dqopt);
- break;
- }
+ mutex_lock(&dqopt->dqonoff_mutex);
+ if (!ll_sb_has_quota_active(qctxt->lqc_sb, type)) {
+ mutex_unlock(&dqopt->dqonoff_mutex);
+ break;
+ }
LASSERT(dqopt->files[type] != NULL);
CFS_INIT_LIST_HEAD(&id_list);
rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
- UNLOCK_DQONOFF_MUTEX(dqopt);
+ mutex_unlock(&dqopt->dqonoff_mutex);
if (rc)
CERROR("Get ids from quota file failed. (rc:%d)\n", rc);