const struct cl_page_slice *slice,
struct cl_io *io)
{
- struct ccc_io *vio = ccc_env_io(env);
+ struct ccc_io *cio = ccc_env_io(env);
struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
struct cl_page *page = slice->cpl_page;
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
io->ci_type == CIT_FAULT) {
- if (vio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
+ if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
result = -EBUSY;
else {
desc->cld_start = page->cp_index;
*
*/
+void ccc_lock_delete(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
+}
+
void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
{
struct ccc_lock *clk = cl2ccc_lock(slice);
-
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem);
}
obj = slice->cls_obj;
inode = ccc_object_inode(obj);
- attr = &ccc_env_info(env)->cti_attr;
+ attr = ccc_env_thread_attr(env);
/* vmtruncate()->ll_truncate() first sets the i_size and then
* the kms under both a DLM lock and the
__u32 enqflags, enum cl_lock_mode mode,
pgoff_t start, pgoff_t end)
{
- struct ccc_io *vio = ccc_env_io(env);
- struct cl_lock_descr *descr = &vio->cui_link.cill_descr;
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
struct cl_object *obj = io->ci_obj;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
CDEBUG(D_VFSTRACE, "lock: %i [%lu, %lu]\n", mode, start, end);
- memset(&vio->cui_link, 0, sizeof vio->cui_link);
+ memset(&cio->cui_link, 0, sizeof cio->cui_link);
descr->cld_mode = mode;
descr->cld_obj = obj;
descr->cld_start = start;
descr->cld_end = end;
- vio->cui_link.cill_enq_flags = enqflags;
- cl_io_lock_add(env, io, &vio->cui_link);
+ cio->cui_link.cill_enq_flags = enqflags;
+ cl_io_lock_add(env, io, &cio->cui_link);
RETURN(0);
}
+void ccc_io_update_iov(const struct lu_env *env,
+ struct ccc_io *cio, struct cl_io *io)
+{
+ int i;
+ size_t size = io->u.ci_rw.crw_count;
+
+ cio->cui_iov_olen = 0;
+ if (cl_io_is_sendfile(io) || size == cio->cui_tot_count)
+ return;
+
+ if (cio->cui_tot_nrsegs == 0)
+ cio->cui_tot_nrsegs = cio->cui_nrsegs;
+
+ for (i = 0; i < cio->cui_tot_nrsegs; i++) {
+ struct iovec *iv = &cio->cui_iov[i];
+
+ if (iv->iov_len < size)
+ size -= iv->iov_len;
+ else {
+ if (iv->iov_len > size) {
+ cio->cui_iov_olen = iv->iov_len;
+ iv->iov_len = size;
+ }
+ break;
+ }
+ }
+
+ cio->cui_nrsegs = i + 1;
+}
+
int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
__u32 enqflags, enum cl_lock_mode mode,
loff_t start, loff_t end)
{
struct cl_object *obj = io->ci_obj;
-
return ccc_io_one_lock_index(env, io, enqflags, mode,
cl_index(obj, start), cl_index(obj, end));
}
ccc_object_invariant(ios->cis_io->ci_obj));
}
+void ccc_io_advance(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ size_t nob)
+{
+ struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = ios->cis_io->ci_obj;
+
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+
+ if (!cl_io_is_sendfile(io) && io->ci_continue) {
+ /* update the iov */
+ LASSERT(cio->cui_tot_nrsegs >= cio->cui_nrsegs);
+ LASSERT(cio->cui_tot_count >= nob);
+
+ cio->cui_iov += cio->cui_nrsegs;
+ cio->cui_tot_nrsegs -= cio->cui_nrsegs;
+ cio->cui_tot_count -= nob;
+
+ if (cio->cui_iov_olen) {
+ struct iovec *iv;
+
+ cio->cui_iov--;
+ cio->cui_tot_nrsegs++;
+ iv = &cio->cui_iov[0];
+ iv->iov_base += iv->iov_len;
+ LASSERT(cio->cui_iov_olen > iv->iov_len);
+ iv->iov_len = cio->cui_iov_olen - iv->iov_len;
+ }
+ }
+}
+
static void ccc_object_size_lock(struct cl_object *obj, int vfslock)
{
struct inode *inode = ccc_object_inode(obj);
* the resulting races.
*/
int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t pos, int vfslock)
+ struct cl_io *io, loff_t start, size_t count, int vfslock,
+ int *exceed)
{
- struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
+ struct cl_attr *attr = ccc_env_thread_attr(env);
struct inode *inode = ccc_object_inode(obj);
+ loff_t pos = start + count - 1;
loff_t kms;
int result;
* of the buffer (C)
*/
ccc_object_size_unlock(obj, vfslock);
- return cl_glimpse_lock(env, io, inode, obj);
+ result = cl_glimpse_lock(env, io, inode, obj);
+ if (result == 0 && exceed != NULL) {
+ /* If objective page index exceed end-of-file
+ * page index, return directly. Do not expect
+ * kernel will check such case correctly.
+ * linux-2.6.18-128.1.1 miss to do that.
+ * --bug 17336 */
+ loff_t size = cl_isize_read(inode);
+ unsigned long cur_index = start >> CFS_PAGE_SHIFT;
+
+ if ((size == 0 && cur_index != 0) ||
+ (((size - 1) >> CFS_PAGE_SHIFT) < cur_index))
+ *exceed = 1;
+ }
+ return result;
} else {
/*
* region is within kms and, hence, within real file
int emergency;
if (clob != NULL) {
- struct lu_object_header *head = clob->co_lu.lo_header;
void *cookie;
cookie = cl_env_reenter();
*/
cl_object_kill(env, clob);
lu_object_ref_del(&clob->co_lu, "inode", inode);
- /* XXX temporary: this is racy */
- LASSERT(atomic_read(&head->loh_ref) == 1);
cl_object_put(env, clob);
lli->lli_clob = NULL;
if (emergency) {