struct address_space *mapping = inode->i_mapping;
struct page *vmpage;
struct niobuf_remote *rnb;
+ struct mdt_body *body;
char *data;
unsigned long index, start;
struct niobuf_local lnb;
if (rnb->rnb_offset % PAGE_SIZE)
RETURN_EXIT;
- /* Server returns whole file or just file tail if it fills in
- * reply buffer, in both cases total size should be inode size.
+ /* Server returns whole file or just file tail if it fills in reply
+ * buffer, in both cases total size should be equal to the file size.
*/
- if (rnb->rnb_offset + rnb->rnb_len < i_size_read(inode)) {
- CERROR("%s: server returns off/len %llu/%u < i_size %llu\n",
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size) {
+ CERROR("%s: server returns off/len %llu/%u but size %llu\n",
ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset,
- rnb->rnb_len, i_size_read(inode));
+ rnb->rnb_len, body->mbo_dom_size);
RETURN_EXIT;
}
- CDEBUG(D_INFO, "Get data along with open at %llu len %i, i_size %llu\n",
- rnb->rnb_offset, rnb->rnb_len, i_size_read(inode));
+ CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n",
+ rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size);
data = (char *)rnb + sizeof(*rnb);
return false;
}
-static void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot)
+void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot)
{
struct inode *inode = file_inode(file);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
io->ci_lockreq = CILR_MANDATORY;
}
io->ci_noatime = file_is_noatime(file);
+ io->ci_async_readahead = false;
/* FLR: only use non-delay I/O for read as there is only one
* avaliable mirror for write. */
__u16 refcheck;
bool cached;
+ if (!iov_iter_count(to))
+ return 0;
+
/**
* Currently when PCC read failed, we do not fall back to the
* normal read path, just return the error.
ENTRY;
+ if (!iov_iter_count(from))
+ GOTO(out, rc_normal = 0);
+
/**
* When PCC write failed, we usually do not fall back to the normal
* write path, just return the error. But there is a special case when
if (result)
RETURN(result);
+ if (!iov_count)
+ RETURN(0);
+
# ifdef HAVE_IOV_ITER_INIT_DIRECTION
iov_iter_init(&to, READ, iov, nr_segs, iov_count);
# else /* !HAVE_IOV_ITER_INIT_DIRECTION */
struct iovec iov = { .iov_base = buf, .iov_len = count };
struct kiocb kiocb;
ssize_t result;
+
ENTRY;
+ if (!count)
+ RETURN(0);
+
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = *ppos;
#ifdef HAVE_KIOCB_KI_LEFT
if (result)
RETURN(result);
+ if (!iov_count)
+ RETURN(0);
+
# ifdef HAVE_IOV_ITER_INIT_DIRECTION
iov_iter_init(&from, WRITE, iov, nr_segs, iov_count);
# else /* !HAVE_IOV_ITER_INIT_DIRECTION */
ENTRY;
+ if (!count)
+ RETURN(0);
+
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = *ppos;
#ifdef HAVE_KIOCB_KI_LEFT
if (!(exp_connect_flags2(ll_i2sbi(parent)->ll_md_exp) &
OBD_CONNECT2_DIR_MIGRATE)) {
if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
- ll_i2info(child_inode)->lli_lsm_md) {
+ ll_dir_striped(child_inode)) {
CERROR("%s: MDT doesn't support stripe directory "
"migration!\n", ll_i2sbi(parent)->ll_fsname);
GOTO(out_iput, rc = -EOPNOTSUPP);
static int
ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
{
- ENTRY;
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ ENTRY;
- RETURN(-ENOSYS);
+ /*
+ * In order to avoid flood of warning messages, only print one message
+ * for one file. And the entire message rate on the client is limited
+ * by CDEBUG_LIMIT too.
+ */
+ if (!(fd->fd_flags & LL_FILE_FLOCK_WARNING)) {
+ fd->fd_flags |= LL_FILE_FLOCK_WARNING;
+ CDEBUG_LIMIT(D_TTY | D_CONSOLE,
+ "flock disabled, mount with '-o [local]flock' to enable\r\n");
+ }
+ RETURN(-ENOSYS);
}
/**
/* If it is striped directory, and there is bad stripe
* Let's revalidate the dentry again, instead of returning
* error */
- if (S_ISDIR(inode->i_mode) &&
- ll_i2info(inode)->lli_lsm_md != NULL)
+ if (ll_dir_striped(inode))
return 0;
/* This path cannot be hit for regular files unless in
LASSERT(lli->lli_lsm_md != NULL);
- /* foreign dir is not striped dir */
- if (lli->lli_lsm_md->lsm_md_magic == LMV_MAGIC_FOREIGN)
+ if (!lmv_dir_striped(lli->lli_lsm_md))
RETURN(0);
down_read(&lli->lli_lsm_sem);
}
} else {
/* If object isn't regular a file then don't validate size. */
- if (S_ISDIR(inode->i_mode) &&
- lli->lli_lsm_md != NULL) {
+ if (ll_dir_striped(inode)) {
rc = ll_merge_md_attr(inode);
if (rc < 0)
RETURN(rc);
/* mostly layout lock is caching on the local side, so try to
* match it before grabbing layout lock mutex. */
mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
- LCK_CR | LCK_CW | LCK_PR | LCK_PW);
+ LCK_CR | LCK_CW | LCK_PR |
+ LCK_PW | LCK_EX);
if (mode != 0) { /* hit cached lock */
rc = ll_layout_lock_set(&lockh, mode, inode);
if (rc == -EAGAIN)