static void slp_page_completion_common(const struct lu_env *env,
struct ccc_page *cp, int ioret)
{
- struct cl_sync_io *anchor = cp->cpg_sync_io;
-
- if (anchor) {
- cp->cpg_sync_io = NULL;
- cl_sync_io_note(anchor, ioret);
- } else {
- LBUG();
- }
+ LASSERT(cp->cpg_cl.cpl_page->cp_sync_io != NULL);
}
static void slp_page_completion_read(const struct lu_env *env,
static int slp_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *_, __u32 enqflags)
+ struct cl_io *unused, __u32 enqflags)
{
CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
}
static const struct cl_lock_operations slp_lock_ops = {
+ .clo_delete = ccc_lock_delete,
.clo_fini = ccc_lock_fini,
.clo_enqueue = slp_lock_enqueue,
.clo_wait = ccc_lock_wait,
*/
static int slp_io_rw_lock(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
- struct cl_io *io = ios->cis_io;
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_io *io = ios->cis_io;
loff_t start;
loff_t end;
start = io->u.ci_wr.wr.crw_pos;
end = start + io->u.ci_wr.wr.crw_count - 1;
}
+
+ ccc_io_update_iov(env, cio, io);
+
/*
* This acquires real DLM lock only in O_APPEND case, because of
* the io->ci_lockreq setting in llu_io_init().
struct intnl_stat *st = llu_i2stat(inode);
struct obd_export *exp = llu_i2obdexp(inode);
struct page *page;
- int rc = 0, npages = 0, ret_bytes = 0;
+ int rc = 0, ret_bytes = 0;
int local_lock;
struct cl_page *clp;
- struct ccc_page *clup;
struct cl_2queue *queue;
- struct cl_sync_io *anchor = &ccc_env_info(env)->cti_sync_io;
ENTRY;
if (!exp)
break;
}
- clup = cl2ccc_page(cl_page_at(clp, &slp_device_type));
- clup->cpg_sync_io = anchor;
cl_2queue_add(queue, clp);
/* drop the reference count for cl_page_find, so that the page
cl_page_clip(env, clp, offset, offset+bytes);
- npages++;
count -= bytes;
pos += bytes;
buf += bytes;
page++;
} while (count);
- cl_sync_io_init(anchor, npages);
- /* printk("Inited anchor with %d pages\n", npages); */
-
if (rc == 0) {
- rc = cl_io_submit_rw(env, io,
- io->ci_type == CIT_READ ? CRT_READ :
- CRT_WRITE,
- queue);
- if (rc == 0) {
- /* If some pages weren't sent for any reason, count
- * then as completed, to avoid infinite wait. */
- cl_page_list_for_each(clp, &queue->c2_qin) {
- CL_PAGE_DEBUG(D_ERROR, env, clp,
- "not completed\n");
- cl_sync_io_note(anchor, +1);
- }
- /* wait for the IO to be finished. */
- rc = cl_sync_io_wait(env, io, &queue->c2_qout, anchor);
- }
+ enum cl_req_type iot;
+ iot = io->ci_type == CIT_READ ? CRT_READ : CRT_WRITE;
+ rc = cl_io_submit_sync(env, io, iot, queue, CRP_NORMAL, 0);
}
group->lig_rc = rc;
struct llu_inode_info *lli = llu_i2info(inode);
struct llu_io_session *session = cl2slp_io(env, ios)->sio_session;
int write = io->ci_type == CIT_WRITE;
+ int exceed = 0;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
if (IS_ERR(iogroup))
RETURN(PTR_ERR(iogroup));
- err = ccc_prep_size(env, obj, io, pos + cnt - 1, 0);
- if (err != 0)
+ err = ccc_prep_size(env, obj, io, pos, cnt, 0, &exceed);
+ if (err != 0 || (write == 0 && exceed != 0))
GOTO(out, err);
CDEBUG(D_INODE,
}
LASSERT(cnt == 0 || io->ci_type == CIT_READ); /* libsysio should guarantee this */
- session->lis_groups[session->lis_ngroups++] = iogroup;
+ if (!iogroup->lig_rc)
+ session->lis_rwcount += iogroup->lig_rwcount;
+ else if (!session->lis_rc)
+ session->lis_rc = iogroup->lig_rc;
+ err = 0;
- return 0;
out:
put_io_group(iogroup);
return err;
.cio_fini = ccc_io_fini,
.cio_lock = slp_io_rw_lock,
.cio_start = slp_io_start,
- .cio_end = ccc_io_end
+ .cio_end = ccc_io_end,
+ .cio_advance = ccc_io_advance
},
[CIT_WRITE] = {
.cio_fini = ccc_io_fini,
.cio_lock = slp_io_rw_lock,
.cio_start = slp_io_start,
- .cio_end = ccc_io_end
+ .cio_end = ccc_io_end,
+ .cio_advance = ccc_io_advance
},
[CIT_TRUNC] = {
.cio_fini = ccc_io_fini,