const struct cl_object_conf *conf);
void cl_object_prune (const struct lu_env *env, struct cl_object *obj);
void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
+int cl_object_has_locks (struct cl_object *obj);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
void ll_io_init(struct cl_io *io, const struct file *file, int write)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct inode *inode = file->f_dentry->d_inode;
- LASSERT(fd != NULL);
memset(io, 0, sizeof *io);
io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
if (write)
io->u.ci_wr.wr_append = file->f_flags & O_APPEND;
io->ci_obj = ll_i2info(inode)->lli_clob;
io->ci_lockreq = CILR_MAYBE;
- if (fd->fd_flags & LL_FILE_IGNORE_LOCK ||
- sbi->ll_flags & LL_SBI_NOLCK) {
+ if (ll_file_nolock(file)) {
io->ci_lockreq = CILR_NEVER;
io->ci_no_srvlock = 1;
} else if (file->f_flags & O_APPEND) {
int rc;
ENTRY;
+ if (ll_file_nolock(file))
+ RETURN(-EOPNOTSUPP);
+
spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
CERROR("group lock already existed with gid %lu\n",
RETURN(-EINVAL);
}
- fd->fd_flags |= (LL_FILE_GROUP_LOCKED | LL_FILE_IGNORE_LOCK);
+ fd->fd_flags |= LL_FILE_GROUP_LOCKED;
fd->fd_grouplock = grouplock;
spin_unlock(&lli->lli_lock);
fd->fd_grouplock.cg_env = NULL;
fd->fd_grouplock.cg_lock = NULL;
fd->fd_grouplock.cg_gid = 0;
- fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED | LL_FILE_IGNORE_LOCK);
+ fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
spin_unlock(&lli->lli_lock);
cl_put_grouplock(&grouplock);
int rw, struct inode *inode,
struct ll_dio_pages *pv);
+static inline int ll_file_nolock(const struct file *file)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct inode *inode = file->f_dentry->d_inode;
+
+ LASSERT(fd != NULL);
+ return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
+ (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
+}
#endif /* LLITE_INTERNAL_H */
ENTRY;
+ if (ll_file_nolock(file))
+ RETURN(ERR_PTR(-EOPNOTSUPP));
+
/*
* vm_operations_struct::nopage() can be called when lustre IO is
* already active for the current thread, e.g., when doing read/write
struct vvp_io *vio = vvp_env_io(env);
struct ccc_io *cio = ccc_env_io(env);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
LASSERT(cio->cui_cl.cis_io == io);
- /* mmap lock should be MANDATORY or NEVER. */
- if (fd->fd_flags & LL_FILE_IGNORE_LOCK ||
- sbi->ll_flags & LL_SBI_NOLCK) {
- io->ci_lockreq = CILR_NEVER;
- io->ci_no_srvlock = 1;
- } else {
- io->ci_lockreq = CILR_MANDATORY;
- }
-
+ /* mmap lock must be MANDATORY. */
+ io->ci_lockreq = CILR_MANDATORY;
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_address = address;
vio->u.fault.ft_type = type;
.populate = ll_populate,
};
-int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
+int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
{
+ struct inode *inode = file->f_dentry->d_inode;
int rc;
ENTRY;
- ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
+ if (ll_file_nolock(file))
+ RETURN(-EOPNOTSUPP);
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
rc = generic_file_mmap(file, vma);
if (rc == 0) {
#if !defined(HAVE_FILEMAP_POPULATE)
vma->vm_ops = &ll_file_vm_ops;
vma->vm_ops->open(vma);
/* update the inode's size and mtime */
- rc = cl_glimpse_size(file->f_dentry->d_inode);
+ rc = cl_glimpse_size(inode);
}
RETURN(rc);
struct vm_area_struct *vma;
struct cl_lock_descr *descr = &cti->cti_descr;
ldlm_policy_data_t policy;
- struct inode *inode;
unsigned long addr;
unsigned long seg;
ssize_t count;
count += addr & (~CFS_PAGE_MASK);
addr &= CFS_PAGE_MASK;
while((vma = our_vma(addr, count)) != NULL) {
- struct file *file = vma->vm_file;
- struct ll_file_data *fd;
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ int flags = CEF_MUST;
- LASSERT(file);
- fd = LUSTRE_FPRIVATE(file);
-
- inode = file->f_dentry->d_inode;
- if (!(fd->fd_flags & LL_FILE_IGNORE_LOCK ||
- ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK))
- goto cont;
+ if (ll_file_nolock(vma->vm_file)) {
+ /*
+ * For no lock case, a lockless lock will be
+ * generated.
+ */
+ flags = CEF_NEVER;
+ }
/*
* XXX: Required lock mode can be weakened: CIT_WRITE
policy.l_extent.start);
descr->cld_end = cl_index(descr->cld_obj,
policy.l_extent.end);
- result = cl_io_lock_alloc_add(env, io, descr, CEF_MUST);
+ result = cl_io_lock_alloc_add(env, io, descr, flags);
if (result < 0)
RETURN(result);
- cont:
if (vma->vm_end - addr >= count)
break;
+
count -= vma->vm_end - addr;
addr = vma->vm_end;
}
struct ll_readahead_state *ras = &fd->fd_ras;
cfs_page_t *vmpage = cp->cpg_page;
struct cl_2queue *queue = &io->ci_queue;
+ int rc;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
LASSERT(cl2vvp_io(env, ios)->cui_oneshot == 0);
cp->cpg_defer_uptodate);
/* Sanity check whether the page is protected by a lock. */
- if (likely(!(fd->fd_flags & LL_FILE_IGNORE_LOCK))) {
- int rc;
-
- rc = cl_page_is_under_lock(env, io, page);
- if (rc != -EBUSY) {
- CL_PAGE_HEADER(D_WARNING, env, page, "%s: %i\n",
- rc == -ENODATA ? "without a lock" :
- "match failed", rc);
- if (rc != -ENODATA)
- RETURN(rc);
- }
+ rc = cl_page_is_under_lock(env, io, page);
+ if (rc != -EBUSY) {
+ CL_PAGE_HEADER(D_WARNING, env, page, "%s: %i\n",
+ rc == -ENODATA ? "without a lock" :
+ "match failed", rc);
+ if (rc != -ENODATA)
+ RETURN(rc);
}
if (cp->cpg_defer_uptodate) {
}
EXPORT_SYMBOL(cl_object_prune);
+/**
+ * Check if the object has locks.
+ */
+int cl_object_has_locks(struct cl_object *obj)
+{
+ struct cl_object_header *head = cl_object_header(obj);
+ int has;
+
+ spin_lock(&head->coh_lock_guard);
+ has = list_empty(&head->coh_locks);
+ spin_unlock(&head->coh_lock_guard);
+
+ return (has == 0);
+}
+EXPORT_SYMBOL(cl_object_has_locks);
+
void cache_stats_init(struct cache_stats *cs, const char *name)
{
cs->cs_name = name;