#endif
if ((iot == CIT_WRITE) &&
!(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- cfs_down(&lli->lli_write_sem);
+ if(cfs_down_interruptible(&lli->lli_write_sem))
+ GOTO(out, result = -ERESTARTSYS);
write_sem_locked = 1;
+ } else if (iot == CIT_READ) {
+ cfs_down_read(&lli->lli_trunc_sem);
}
break;
case IO_SENDFILE:
result = cl_io_loop(env, io);
if (write_sem_locked)
cfs_up(&lli->lli_write_sem);
+ else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
+ cfs_up_read(&lli->lli_trunc_sem);
} else {
/* cl_io_rw_init() handled IO */
result = io->ci_result;
result = io->ci_nob;
*ppos = io->u.ci_wr.wr.crw_pos;
}
+ GOTO(out, result);
+out:
cl_io_fini(env, io);
- RETURN(result);
+ return result;
}
RETURN(retval);
}
+#ifdef HAVE_FLUSH_OWNER_ID
+int ll_flush(struct file *file, fl_owner_t id)
+#else
+int ll_flush(struct file *file)
+#endif
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ int rc, err;
+
+ /* catch async errors that were recorded back when async writeback
+ * failed for pages in this mapping. */
+ rc = lli->lli_async_rc;
+ lli->lli_async_rc = 0;
+ if (lsm) {
+ err = lov_test_and_clear_async_rc(lsm);
+ if (rc == 0)
+ rc = err;
+ }
+
+ return rc ? -EIO : 0;
+}
+
int ll_fsync(struct file *file, struct dentry *dentry, int data)
{
struct inode *inode = dentry->d_inode;
RETURN(-ENOSYS);
}
-int ll_have_md_lock(struct inode *inode, __u64 bits)
+int ll_have_md_lock(struct inode *inode, __u64 bits, ldlm_mode_t l_req_mode)
{
struct lustre_handle lockh;
ldlm_policy_data_t policy = { .l_inodebits = {bits}};
+ ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
+ (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
struct lu_fid *fid;
int flags;
ENTRY;
RETURN(0);
fid = &ll_i2info(inode)->lli_fid;
- CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
+ CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
+ ldlm_lockname[mode]);
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS, &policy,
- LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh)) {
+ mode, &lockh)) {
RETURN(1);
}
RETURN(0);
}
ll_lookup_finish_locks(&oit, dentry);
- } else if (!ll_have_md_lock(dentry->d_inode, ibits)) {
+ } else if (!ll_have_md_lock(dentry->d_inode, ibits, LCK_MINMODE)) {
struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
obd_valid valid = OBD_MD_FLGETATTR;
struct md_op_data *op_data;
return -EROFS;
if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
return -EACCES;
- if (current->fsuid == inode->i_uid) {
+ if (cfs_curproc_fsuid() == inode->i_uid) {
mode >>= 6;
} else if (1) {
if (((mode >> 3) & mask & S_IRWXO) != mask)
.splice_read = ll_file_splice_read,
#endif
.fsync = ll_fsync,
+ .flush = ll_flush
};
struct file_operations ll_file_operations_flock = {
.splice_read = ll_file_splice_read,
#endif
.fsync = ll_fsync,
+ .flush = ll_flush,
#ifdef HAVE_F_OP_FLOCK
.flock = ll_file_flock,
#endif
.splice_read = ll_file_splice_read,
#endif
.fsync = ll_fsync,
+ .flush = ll_flush,
#ifdef HAVE_F_OP_FLOCK
.flock = ll_file_noflock,
#endif