ast_flags |= CEF_NONBLOCK;
if (io->ci_lock_no_expand)
ast_flags |= CEF_LOCK_NO_EXPAND;
+ if (vio->vui_fd) {
+ /* Group lock held means no lockless any more */
+ if (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
+ io->ci_ignore_lockless = 1;
+
+ if (ll_file_nolock(vio->vui_fd->fd_file) ||
+ (vio->vui_fd->fd_flags & LL_FILE_LOCKLESS_IO &&
+ !io->ci_ignore_lockless))
+ ast_flags |= CEF_NEVER;
+ }
result = vvp_mmap_locks(env, vio, io);
if (result == 0)
struct ll_inode_info *lli = ll_i2info(inode);
if (cl_io_is_trunc(io)) {
- down_write(&lli->lli_trunc_sem);
+ trunc_sem_down_write(&lli->lli_trunc_sem);
inode_lock(inode);
inode_dio_wait(inode);
} else {
vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
inode_dio_write_done(inode);
inode_unlock(inode);
- up_write(&lli->lli_trunc_sem);
+ trunc_sem_up_write(&lli->lli_trunc_sem);
} else {
inode_unlock(inode);
}
static int vvp_io_read_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = io->ci_obj;
- struct inode *inode = vvp_object_inode(obj);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct file *file = vio->vui_fd->fd_file;
- loff_t pos = io->u.ci_rd.rd.crw_pos;
- long cnt = io->u.ci_rd.rd.crw_count;
- long tot = vio->vui_tot_count;
- int exceed = 0;
- int result;
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct file *file = vio->vui_fd->fd_file;
+ loff_t pos = io->u.ci_rd.rd.crw_pos;
+ size_t cnt = io->u.ci_rd.rd.crw_count;
+ size_t tot = vio->vui_tot_count;
+ int exceed = 0;
+ int result;
ENTRY;
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
pos, pos + cnt);
if (vio->vui_io_subtype == IO_NORMAL)
- down_read(&lli->lli_trunc_sem);
+ trunc_sem_down_read(&lli->lli_trunc_sem);
if (io->ci_async_readahead) {
file_accessed(file);
/* initialize read-ahead window once per syscall */
if (!vio->vui_ra_valid) {
vio->vui_ra_valid = true;
- vio->vui_ra_start = cl_index(obj, pos);
- vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1);
+ vio->vui_ra_start_idx = cl_index(obj, pos);
+ vio->vui_ra_pages = cl_index(obj, tot + PAGE_SIZE - 1);
/* If both start and end are unaligned, we read one more page
* than the index math suggests. */
- if (pos % PAGE_SIZE != 0 && (pos + tot) % PAGE_SIZE != 0)
- vio->vui_ra_count++;
+ if ((pos & ~PAGE_MASK) != 0 && ((pos + tot) & ~PAGE_MASK) != 0)
+ vio->vui_ra_pages++;
- CDEBUG(D_READA, "tot %ld, ra_start %lu, ra_count %lu\n", tot,
- vio->vui_ra_start, vio->vui_ra_count);
+ CDEBUG(D_READA, "tot %zu, ra_start %lu, ra_count %lu\n",
+ tot, vio->vui_ra_start_idx, vio->vui_ra_pages);
}
/* BUG: 5972 */
/*
* Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied()
+ * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
*/
static inline void ll_account_page_dirtied(struct page *page,
struct address_space *mapping)
account_page_dirtied(page, mapping, memcg);
mem_cgroup_end_page_stat(memcg);
+#elif defined HAVE_ACCOUNT_PAGE_DIRTIED
+ account_page_dirtied(page, mapping, memcg);
#else
- account_page_dirtied(page, mapping);
+ typedef unsigned int (dirtied_t)(struct page *pg,
+ struct address_space *as);
+ const char *symbol = "account_page_dirtied";
+ static dirtied_t *dirtied = NULL;
+
+ if (!dirtied)
+ dirtied = (dirtied_t *)symbol_get(symbol);
+
+ if (dirtied)
+ dirtied(page, mapping);
#endif
}
ENTRY;
if (vio->vui_io_subtype == IO_NORMAL)
- down_read(&lli->lli_trunc_sem);
+ trunc_sem_down_read(&lli->lli_trunc_sem);
if (!can_populate_pages(env, io, inode))
RETURN(0);
struct ll_inode_info *lli = ll_i2info(inode);
if (vio->vui_io_subtype == IO_NORMAL)
- up_read(&lli->lli_trunc_sem);
+ trunc_sem_up_read(&lli->lli_trunc_sem);
}
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
pgoff_t last_index;
ENTRY;
- down_read(&lli->lli_trunc_sem);
+ trunc_sem_down_read_nowait(&lli->lli_trunc_sem);
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
CLOBINVRNT(env, ios->cis_io->ci_obj,
vvp_object_invariant(ios->cis_io->ci_obj));
- up_read(&lli->lli_trunc_sem);
+ trunc_sem_up_read(&lli->lli_trunc_sem);
}
static int vvp_io_fsync_start(const struct lu_env *env,
struct vvp_io *vio = cl2vvp_io(env, ios);
if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- ra->cra_end = CL_PAGE_EOF;
+ ra->cra_end_idx = CL_PAGE_EOF;
result = +1; /* no need to call down */
}
}