]) # LC_HAVE_FILE_DENTRY
#
+# LC_HAVE_INODE_LOCK
+#
+# 4.5 introduced inode_lock
+#
+AC_DEFUN([LC_HAVE_INODE_LOCK], [
+LB_CHECK_COMPILE([if 'inode_lock' is defined],
+inode_lock, [
+ #include <linux/fs.h>
+],[
+ inode_lock(NULL);
+], [
+ AC_DEFINE(HAVE_INODE_LOCK, 1,
+ [inode_lock is defined])
+])
+]) # LC_HAVE_INODE_LOCK
+
+#
# LC_PROG_LINUX
#
# Lustre linux kernel checks
# 4.5
LC_HAVE_FILE_DENTRY
+ # 4.5
+ LC_HAVE_INODE_LOCK
+
#
AS_IF([test "x$enable_server" != xno], [
LC_FUNC_DEV_SET_RDONLY
#define ll_vfs_unlink(a, b) vfs_unlink(a, b)
#endif
+#ifndef HAVE_INODE_LOCK
+# define inode_lock(inode) mutex_lock(&(inode)->i_mutex)
+# define inode_unlock(inode) mutex_unlock(&(inode)->i_mutex)
+# define inode_trylock(inode) mutex_trylock(&(inode)->i_mutex)
+#endif
+
#ifndef HAVE_RADIX_EXCEPTION_ENTRY
static inline int radix_tree_exceptional_entry(void *arg)
{
{
struct dentry *dchild;
- mutex_lock(&dparent->d_inode->i_mutex);
+ inode_lock(dparent->d_inode);
dchild = lookup_one_len(fid_name, dparent, fid_namelen);
- mutex_unlock(&dparent->d_inode->i_mutex);
+ inode_unlock(dparent->d_inode);
if (IS_ERR(dchild) || dchild->d_inode == NULL)
return dchild;
loff_t ret = -EINVAL;
ENTRY;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
switch (origin) {
case SEEK_SET:
break;
GOTO(out, ret);
out:
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return ret;
}
ATTR_MTIME | ATTR_MTIME_SET |
ATTR_ATIME | ATTR_ATIME_SET;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
rc = ll_setattr_raw(file_dentry(file), attr, true);
if (rc == -ENODATA)
rc = 0;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
out:
if (hss != NULL)
if (!S_ISREG(inode->i_mode))
RETURN(-EINVAL);
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
rc = ll_setattr_raw(file_dentry(file), &ia, false);
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
RETURN(rc);
}
* SEEK_CURs. Note that parallel writes and reads behave
* like SEEK_SET.
*/
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
offset = llseek_execute(file, file->f_pos + offset, maxsize);
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return offset;
case SEEK_DATA:
/*
#ifdef HAVE_FILE_FSYNC_4ARGS
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
#else
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
}
#ifdef HAVE_FILE_FSYNC_4ARGS
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
#endif
RETURN(rc);
}
if (child_inode == parent->i_sb->s_root->d_inode)
GOTO(out_iput, rc = -EINVAL);
- mutex_lock(&child_inode->i_mutex);
+ inode_lock(child_inode);
op_data->op_fid3 = *ll_inode2fid(child_inode);
if (!fid_is_sane(&op_data->op_fid3)) {
CERROR("%s: migrate %s, but FID "DFID" is insane\n",
if (rc == 0)
clear_nlink(child_inode);
out_unlock:
- mutex_unlock(&child_inode->i_mutex);
+ inode_unlock(child_inode);
out_iput:
iput(child_inode);
out_free:
struct list_head fd_lccs; /* list of ll_cl_context */
};
-extern spinlock_t inode_lock;
-
extern struct proc_dir_entry *proc_lustre_fs_root;
static inline struct inode *ll_info2i(struct ll_inode_info *lli)
* cache is not cleared yet. */
op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
if (S_ISREG(inode->i_mode))
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
rc = simple_setattr(dentry, &op_data->op_attr);
if (S_ISREG(inode->i_mode))
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
op_data->op_attr.ia_valid = ia_valid;
rc = ll_update_inode(inode, &md);
if (S_ISREG(inode->i_mode)) {
if (attr->ia_valid & ATTR_SIZE)
inode_dio_write_done(inode);
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
}
/* We always do an MDS RPC, even if we're only changing the size;
ll_finish_md_op_data(op_data);
if (S_ISREG(inode->i_mode)) {
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
inode_dio_wait(inode);
}
GOTO(out, rc = PTR_ERR(op_data));
op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
- mutex_lock(&dir->i_mutex);
+ inode_lock(dir);
#ifdef HAVE_DIR_CONTEXT
rc = ll_dir_read(dir, &pos, op_data, &lgd.ctx);
#else
rc = ll_dir_read(dir, &pos, op_data, &lgd, ll_nfs_get_name_filldir);
#endif
- mutex_unlock(&dir->i_mutex);
+ inode_unlock(dir);
ll_finish_md_op_data(op_data);
if (!rc && !lgd.lgd_found)
rc = -ENOENT;
* be asked to write less pages once, this purely depends on
* implementation. Anyway, we should be careful to avoid deadlocking.
*/
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
cl_io_fini(env, io);
return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
}
* 1. Need inode mutex to operate transient pages.
*/
if (iov_iter_rw(iter) == READ)
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
while (iov_iter_count(iter)) {
struct page **pages;
}
out:
if (iov_iter_rw(iter) == READ)
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
if (tot_bytes > 0) {
struct vvp_io *vio = vvp_env_io(env);
struct inode *inode = vvp_object_inode(io->ci_obj);
struct ll_inode_info *lli = ll_i2info(inode);
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
if (cl_io_is_trunc(io)) {
down_write(&lli->lli_trunc_sem);
inode_dio_wait(inode);
inode_dio_write_done(inode);
up_write(&lli->lli_trunc_sem);
}
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
}
static void vvp_io_setattr_fini(const struct lu_env *env,
struct inode *inode = vvp_object_inode(slice->cpl_obj);
int locked;
- locked = !mutex_trylock(&inode->i_mutex);
+ locked = !inode_trylock(inode);
if (!locked)
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return locked ? -EBUSY : -ENODATA;
}