*
* \param[in] env execution environment
* \param[in] dt object to be locked
- * \param[in] role lock role from MDD layer, see mdd_object_role().
+ * \param[in] role lock role from MDD layer, see dt_object_role().
*/
static void osp_md_read_lock(const struct lu_env *env, struct dt_object *dt,
unsigned role)
*
* \param[in] env execution environment
* \param[in] dt object to be locked
- * \param[in] role lock role from MDD layer, see mdd_object_role().
+ * \param[in] role lock role from MDD layer, see dt_object_role().
*/
static void osp_md_write_lock(const struct lu_env *env, struct dt_object *dt,
unsigned role)
OUT_UPDATE_REPLY_SIZE);
if (reply->ourp_magic != UPDATE_REPLY_MAGIC) {
DEBUG_REQ(D_ERROR, req,
- "%s: Wrong version %x expected %x "DFID": rc = %d\n",
+ "%s: Wrong version %x expected %x "DFID": rc = %d",
dname, reply->ourp_magic, UPDATE_REPLY_MAGIC,
PFID(lu_object_fid(&dt->do_lu)), -EPROTO);
return 0;
}
+static int osp_write_interpreter(const struct lu_env *env,
+ struct object_update_reply *reply,
+ struct ptlrpc_request *req,
+ struct osp_object *obj,
+ void *data, int index, int rc)
+{
+ if (rc) {
+ CDEBUG(D_HA, "error "DFID": rc = %d\n",
+ PFID(lu_object_fid(&obj->opo_obj.do_lu)), rc);
+ spin_lock(&obj->opo_lock);
+ obj->opo_attr.la_valid = 0;
+ obj->opo_stale = 1;
+ spin_unlock(&obj->opo_lock);
+ }
+ return 0;
+}
+
/**
* Implementation of dt_body_operations::dbo_write
*
if (rc < 0)
RETURN(rc);
+ /* to be able to invalidate object's state in case of an error */
+ rc = osp_insert_update_callback(env, update, obj, NULL,
+ osp_write_interpreter);
+ if (rc < 0)
+ RETURN(rc);
+
/* XXX: how about the write error happened later? */
*pos += buf->lb_len;
struct out_read_reply *orr;
struct ptlrpc_bulk_desc *desc;
struct object_update_reply *reply;
- __u32 left_size;
- int nbufs;
- int i;
+ int pages;
int rc;
ENTRY;
if (rc != 0)
GOTO(out_update, rc);
- nbufs = (rbuf->lb_len + OUT_BULK_BUFFER_SIZE - 1) /
- OUT_BULK_BUFFER_SIZE;
+ /* First *and* last might be partial pages, hence +1 */
+ pages = DIV_ROUND_UP(rbuf->lb_len, PAGE_SIZE) + 1;
+
/* allocate bulk descriptor */
- desc = ptlrpc_prep_bulk_imp(req, nbufs, 1,
- PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KVEC,
- MDS_BULK_PORTAL, &ptlrpc_bulk_kvec_ops);
+ desc = ptlrpc_prep_bulk_imp(req, pages, 1,
+ PTLRPC_BULK_PUT_SINK,
+ MDS_BULK_PORTAL,
+ &ptlrpc_bulk_kiov_nopin_ops);
if (desc == NULL)
GOTO(out, rc = -ENOMEM);
- /* split the buffer into small chunk size */
- left_size = rbuf->lb_len;
- for (i = 0; i < nbufs; i++) {
- int read_size;
-
- read_size = left_size > OUT_BULK_BUFFER_SIZE ?
- OUT_BULK_BUFFER_SIZE : left_size;
- desc->bd_frag_ops->add_iov_frag(desc, ptr, read_size);
-
- ptr += read_size;
- }
+ desc->bd_frag_ops->add_iov_frag(desc, ptr, rbuf->lb_len);
osp_set_req_replay(osp, req);
req->rq_bulk_read = 1;