#
# This code is issued under the GNU General Public License.
# See the file COPYING in this distribution
-LYX2PDF = lyx --export pdf
+LYX2PDF = tex2pdf -overwrite
LYX2PS = lyx --export ps
LYX2TEX = lyx --export latex
LYX2TXT = lyx --export text
SUFFIXES = .lin .lyx .pdf .ps .sgml .html .txt .tex .fig .eps .dvi
DOCS = lustre.pdf lustre-HOWTO.txt
-IMAGES = sys_open.eps sys_write.eps sys_mount.eps cache1.eps cache2.eps intermezzocache.eps intermezzofilesys.eps meta.eps metadata1.eps networklayer.eps bigpicture.eps intermezzo.eps mds.eps portals.eps client.eps layering.eps metadata.eps sb.eps cow.eps lockacq.eps obdfs.eps snapsetup.eps dirbodyapi.eps loraid.eps ost.eps updates.eps hotmigrate.eps lustreclusters.eps osthw.eps portals-lib.eps lockqueues.eps lockexample1.eps lockexample2.eps lockexample3.eps lockexample4.eps lockseverity.eps gssapi.eps lovextents.eps cpsd.eps setup-accept.eps authenticate.eps login.eps
+IMAGES = sys_stat.eps sys_odirect.eps sys_open.eps sys_write.eps sys_mount.eps cache1.eps cache2.eps intermezzocache.eps intermezzofilesys.eps meta.eps metadata1.eps networklayer.eps bigpicture.eps intermezzo.eps mds.eps portals.eps client.eps layering.eps metadata.eps sb.eps cow.eps lockacq.eps obdfs.eps snapsetup.eps dirbodyapi.eps loraid.eps ost.eps updates.eps hotmigrate.eps lustreclusters.eps osthw.eps portals-lib.eps lockqueues.eps lockexample1.eps lockexample2.eps lockexample3.eps lockexample4.eps lockseverity.eps gssapi.eps lovextents.eps cpsd.eps setup-accept.eps authenticate.eps login.eps
LYXFILES= lustre.lin evolution.lyx llocks.lyx mgmt.lyx uncertain.lyx\
fs.lyx lustre-HOWTO.lin namespace.lyx\
glossary.lyx lustre-debugging.lyx network.lyx\
};
struct ptlrpc_bulk_page {
- struct ptlrpc_bulk_desc *b_desc;
- struct list_head b_link;
- void *b_buf;
- int b_buflen;
- struct page *b_page;
- __u32 b_xid;
- __u32 b_flags;
- struct dentry *b_dentry;
- int (*b_cb)(struct ptlrpc_bulk_page *);
+ struct ptlrpc_bulk_desc *bp_desc;
+ struct list_head bp_link;
+ void *bp_buf;
+ int bp_buflen;
+ struct page *bp_page;
+ __u32 bp_xid;
+ __u32 bp_flags;
+ struct dentry *bp_dentry;
+ int (*bp_cb)(struct ptlrpc_bulk_page *);
};
struct ptlrpc_bulk_desc {
- int b_flags;
- struct ptlrpc_connection *b_connection;
- struct ptlrpc_client *b_client;
- __u32 b_portal;
- struct lustre_handle b_conn;
- void (*b_cb)(struct ptlrpc_bulk_desc *, void *);
- void *b_cb_data;
-
- wait_queue_head_t b_waitq;
- struct list_head b_page_list;
- __u32 b_page_count;
- atomic_t b_refcount;
- void *b_desc_private;
- struct tq_struct b_queue;
-
- ptl_md_t b_md;
- ptl_handle_md_t b_md_h;
- ptl_handle_me_t b_me_h;
-
- struct iovec b_iov[16]; /* self-sized pre-allocated iov */
+ int bd_flags;
+ struct ptlrpc_connection *bd_connection;
+ struct ptlrpc_client *bd_client;
+ __u32 bd_portal;
+ struct lustre_handle bd_conn;
+ void (*bd_cb)(struct ptlrpc_bulk_desc *, void *);
+ void *bd_cb_data;
+
+ wait_queue_head_t bd_waitq;
+ struct list_head bd_page_list;
+ __u32 bd_page_count;
+ atomic_t bd_refcount;
+ void *bd_desc_private;
+ struct tq_struct bd_queue;
+
+ ptl_md_t bd_md;
+ ptl_handle_md_t bd_md_h;
+ ptl_handle_me_t bd_me_h;
+
+ struct iovec bd_iov[16]; /* self-sized pre-allocated iov */
};
struct ptlrpc_thread {
static inline void ptlrpc_bulk_decref(struct ptlrpc_bulk_desc *desc)
{
- if (atomic_dec_and_test(&desc->b_refcount)) {
+ if (atomic_dec_and_test(&desc->bd_refcount)) {
CDEBUG(D_PAGE, "Released last ref on %p, freeing\n", desc);
ptlrpc_free_bulk(desc);
} else {
CDEBUG(D_PAGE, "%p -> %d\n", desc,
- atomic_read(&desc->b_refcount));
+ atomic_read(&desc->bd_refcount));
}
}
static inline void ptlrpc_bulk_addref(struct ptlrpc_bulk_desc *desc)
{
- atomic_inc(&desc->b_refcount);
+ atomic_inc(&desc->bd_refcount);
CDEBUG(D_PAGE, "Set refcount of %p to %d\n", desc,
- atomic_read(&desc->b_refcount));
+ atomic_read(&desc->bd_refcount));
}
#endif
struct ptlrpc_bulk_desc *desc = cbd->desc;
ENTRY;
- desc->b_connection->c_level = LUSTRE_CONN_RECOVD;
- desc->b_flags |= PTL_RPC_FL_TIMEOUT;
- if (desc->b_connection && class_signal_connection_failure) {
+ desc->bd_connection->c_level = LUSTRE_CONN_RECOVD;
+ desc->bd_flags |= PTL_RPC_FL_TIMEOUT;
+ if (desc->bd_connection && class_signal_connection_failure) {
+
/* XXXshaver Do we need a resend strategy, or do we just
* XXXshaver return -ERESTARTSYS and punt it?
*/
- CERROR("signalling failure of conn %p\n", desc->b_connection);
- class_signal_connection_failure(desc->b_connection);
+ CERROR("signalling failure of conn %p\n", desc->bd_connection);
+ class_signal_connection_failure(desc->bd_connection);
/* We go back to sleep, until we're resumed or interrupted. */
RETURN(0);
struct ptlrpc_bulk_desc *desc = cbd->desc;
ENTRY;
- desc->b_flags |= PTL_RPC_FL_INTR;
+ desc->bd_flags |= PTL_RPC_FL_INTR;
RETURN(1); /* ignored, as of this writing */
}
GOTO(out2, rc = -ENOMEM);
bulk = ptlrpc_prep_bulk_page(desc);
- bulk->b_buflen = PAGE_SIZE;
- bulk->b_buf = addr;
- bulk->b_xid = req->rq_xid;
- desc->b_portal = MDS_BULK_PORTAL;
+ bulk->bp_buflen = PAGE_SIZE;
+ bulk->bp_buf = addr;
+ bulk->bp_xid = req->rq_xid;
+ desc->bd_portal = MDS_BULK_PORTAL;
rc = ptlrpc_register_bulk(desc);
if (rc) {
struct ptlrpc_bulk_desc *desc = data;
ENTRY;
- CERROR("(not yet) starting recovery of client %p\n", desc->b_client);
+ CERROR("(not yet) starting recovery of client %p\n", desc->bd_client);
RETURN(1);
}
if (rc != PAGE_SIZE)
GOTO(cleanup_buf, rc = -EIO);
- bulk->b_xid = req->rq_xid;
- bulk->b_buf = buf;
- bulk->b_buflen = PAGE_SIZE;
- desc->b_portal = MDS_BULK_PORTAL;
+ bulk->bp_xid = req->rq_xid;
+ bulk->bp_buf = buf;
+ bulk->bp_buflen = PAGE_SIZE;
+ desc->bd_portal = MDS_BULK_PORTAL;
rc = ptlrpc_send_bulk(desc);
if (rc)
}
lwi = LWI_TIMEOUT(obd_timeout * HZ, mds_bulk_timeout, desc);
- rc = l_wait_event(desc->b_waitq, desc->b_flags & PTL_BULK_FL_SENT, &lwi);
+ rc = l_wait_event(desc->bd_waitq, desc->bd_flags & PTL_BULK_FL_SENT, &lwi);
if (rc) {
if (rc != -ETIMEDOUT)
LBUG();
ENTRY;
/* This feels wrong to me. */
- list_for_each(tmp, &desc->b_page_list) {
+ list_for_each(tmp, &desc->bd_page_list) {
struct ptlrpc_bulk_page *bulk;
- bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
+ bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
- kunmap(bulk->b_page);
+ kunmap(bulk->bp_page);
}
ptlrpc_bulk_decref(desc);
int err = 0;
ENTRY;
- if (desc->b_flags & PTL_RPC_FL_TIMEOUT) {
- err = (desc->b_flags & PTL_RPC_FL_INTR ? -ERESTARTSYS :
+ if (desc->bd_flags & PTL_RPC_FL_TIMEOUT) {
+ err = (desc->bd_flags & PTL_RPC_FL_INTR ? -ERESTARTSYS :
-ETIMEDOUT);
}
/* We can't kunmap the desc from interrupt context, so we do it from
* the bottom half above. */
- INIT_TQUEUE(&desc->b_queue, 0, 0);
- PREPARE_TQUEUE(&desc->b_queue, unmap_and_decref_bulk_desc, desc);
- schedule_task(&desc->b_queue);
+ INIT_TQUEUE(&desc->bd_queue, 0, 0);
+ PREPARE_TQUEUE(&desc->bd_queue, unmap_and_decref_bulk_desc, desc);
+ schedule_task(&desc->bd_queue);
EXIT;
}
desc = ptlrpc_prep_bulk(connection);
if (!desc)
GOTO(out_req, rc = -ENOMEM);
- desc->b_portal = OST_BULK_PORTAL;
- desc->b_cb = brw_finish;
+ desc->bd_portal = OST_BULK_PORTAL;
+ desc->bd_cb = brw_finish;
OBD_ALLOC(cb_data, sizeof(*cb_data));
if (!cb_data)
GOTO(out_desc, rc = -ENOMEM);
cb_data->callback = callback;
cb_data->cb_data = data;
data->desc = desc;
- desc->b_cb_data = cb_data;
+ desc->bd_cb_data = cb_data;
iooptr = lustre_msg_buf(request->rq_reqmsg, 1);
nioptr = lustre_msg_buf(request->rq_reqmsg, 2);
if (bulk == NULL)
GOTO(out_unmap, rc = -ENOMEM);
- bulk->b_xid = xid; /* single xid for all pages */
+ bulk->bp_xid = xid; /* single xid for all pages */
- bulk->b_buf = kmap(pga[mapped].pg);
- bulk->b_page = pga[mapped].pg;
- bulk->b_buflen = PAGE_SIZE;
+ bulk->bp_buf = kmap(pga[mapped].pg);
+ bulk->bp_page = pga[mapped].pg;
+ bulk->bp_buflen = PAGE_SIZE;
ost_pack_niobuf(&nioptr, pga[mapped].off, pga[mapped].count,
- pga[mapped].flag, bulk->b_xid);
+ pga[mapped].flag, bulk->bp_xid);
}
/*
desc = ptlrpc_prep_bulk(connection);
if (!desc)
GOTO(out_req, rc = -ENOMEM);
- desc->b_portal = OSC_BULK_PORTAL;
- desc->b_cb = brw_finish;
+ desc->bd_portal = OSC_BULK_PORTAL;
+ desc->bd_cb = brw_finish;
OBD_ALLOC(cb_data, sizeof(*cb_data));
if (!cb_data)
GOTO(out_desc, rc = -ENOMEM);
cb_data->callback = callback;
cb_data->cb_data = data;
data->desc = desc;
- desc->b_cb_data = cb_data;
+ desc->bd_cb_data = cb_data;
iooptr = lustre_msg_buf(request->rq_reqmsg, 1);
nioptr = lustre_msg_buf(request->rq_reqmsg, 2);
if (!bulk)
GOTO(out_unmap, rc = -ENOMEM);
- bulk->b_buf = (void *)(unsigned long)local[j].addr;
- bulk->b_buflen = local[j].len;
- bulk->b_xid = remote->xid;
- bulk->b_page = pga[j].pg;
+ bulk->bp_buf = (void *)(unsigned long)local[j].addr;
+ bulk->bp_buflen = local[j].len;
+ bulk->bp_xid = remote->xid;
+ bulk->bp_page = pga[j].pg;
}
- if (desc->b_page_count != page_count)
+ if (desc->bd_page_count != page_count)
LBUG();
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_WRITE_BULK))
struct ptlrpc_bulk_desc *desc = data;
ENTRY;
- CERROR("(not yet) starting recovery of client %p\n", desc->b_client);
+ CERROR("(not yet) starting recovery of client %p\n", desc->bd_client);
RETURN(1);
}
desc = ptlrpc_prep_bulk(req->rq_connection);
if (desc == NULL)
GOTO(out_local, rc = -ENOMEM);
- desc->b_portal = OST_BULK_PORTAL;
+ desc->bd_portal = OST_BULK_PORTAL;
for (i = 0; i < niocount; i++) {
struct ptlrpc_bulk_page *bulk = ptlrpc_prep_bulk_page(desc);
if (bulk == NULL)
GOTO(out_bulk, rc = -ENOMEM);
- bulk->b_xid = remote_nb[i].xid;
- bulk->b_buf = local_nb[i].addr;
- bulk->b_buflen = remote_nb[i].len;
+ bulk->bp_xid = remote_nb[i].xid;
+ bulk->bp_buf = local_nb[i].addr;
+ bulk->bp_buflen = remote_nb[i].len;
}
rc = ptlrpc_send_bulk(desc);
GOTO(out_bulk, rc);
lwi = LWI_TIMEOUT(obd_timeout * HZ, ost_bulk_timeout, desc);
- rc = l_wait_event(desc->b_waitq, desc->b_flags &PTL_BULK_FL_SENT, &lwi);
+ rc = l_wait_event(desc->bd_waitq, desc->bd_flags &PTL_BULK_FL_SENT, &lwi);
if (rc) {
LASSERT(rc == -ETIMEDOUT);
GOTO(out_bulk, rc);
desc = ptlrpc_prep_bulk(req->rq_connection);
if (desc == NULL)
GOTO(fail_preprw, rc = -ENOMEM);
- desc->b_cb = NULL;
- desc->b_portal = OSC_BULK_PORTAL;
- desc->b_desc_private = desc_priv;
- memcpy(&(desc->b_conn), &conn, sizeof(conn));
+ desc->bd_cb = NULL;
+ desc->bd_portal = OSC_BULK_PORTAL;
+ desc->bd_desc_private = desc_priv;
+ memcpy(&(desc->bd_conn), &conn, sizeof(conn));
srv = req->rq_obd->u.ost.ost_service;
spin_lock(&srv->srv_lock);
if (bulk == NULL)
GOTO(fail_bulk, rc = -ENOMEM);
- bulk->b_xid = xid; /* single xid for all pages */
+ bulk->bp_xid = xid; /* single xid for all pages */
- bulk->b_buf = lnb->addr;
- bulk->b_page = lnb->page;
- bulk->b_flags = lnb->flags;
- bulk->b_dentry = lnb->dentry;
- bulk->b_buflen = lnb->len;
- bulk->b_cb = NULL;
+ bulk->bp_buf = lnb->addr;
+ bulk->bp_page = lnb->page;
+ bulk->bp_flags = lnb->flags;
+ bulk->bp_dentry = lnb->dentry;
+ bulk->bp_buflen = lnb->len;
+ bulk->bp_cb = NULL;
/* this advances remote_nb */
ost_pack_niobuf((void **)&remote_nb, lnb->offset, lnb->len, 0,
- bulk->b_xid);
+ bulk->bp_xid);
}
rc = ptlrpc_register_bulk(desc);
ptlrpc_reply(req->rq_svc, req);
lwi = LWI_TIMEOUT(obd_timeout * HZ, ost_bulk_timeout, desc);
- rc = l_wait_event(desc->b_waitq, desc->b_flags & PTL_BULK_FL_RCVD, &lwi);
+ rc = l_wait_event(desc->bd_waitq, desc->bd_flags & PTL_BULK_FL_RCVD, &lwi);
if (rc) {
if (rc != -ETIMEDOUT)
LBUG();
}
rc = obd_commitrw(cmd, conn, objcount, tmp1, niocount, local_nb,
- desc->b_desc_private);
+ desc->bd_desc_private);
ptlrpc_free_bulk(desc);
EXIT;
out_free:
OBD_ALLOC(desc, sizeof(*desc));
if (desc != NULL) {
- desc->b_connection = ptlrpc_connection_addref(conn);
- atomic_set(&desc->b_refcount, 1);
- init_waitqueue_head(&desc->b_waitq);
- INIT_LIST_HEAD(&desc->b_page_list);
- ptl_set_inv_handle(&desc->b_md_h);
- ptl_set_inv_handle(&desc->b_me_h);
+ desc->bd_connection = ptlrpc_connection_addref(conn);
+ atomic_set(&desc->bd_refcount, 1);
+ init_waitqueue_head(&desc->bd_waitq);
+ INIT_LIST_HEAD(&desc->bd_page_list);
+ ptl_set_inv_handle(&desc->bd_md_h);
+ ptl_set_inv_handle(&desc->bd_me_h);
}
return desc;
OBD_ALLOC(bulk, sizeof(*bulk));
if (bulk != NULL) {
- bulk->b_desc = desc;
- list_add_tail(&bulk->b_link, &desc->b_page_list);
- desc->b_page_count++;
+ bulk->bp_desc = desc;
+ list_add_tail(&bulk->bp_link, &desc->bd_page_list);
+ desc->bd_page_count++;
}
return bulk;
}
return;
}
- list_for_each_safe(tmp, next, &desc->b_page_list) {
+ list_for_each_safe(tmp, next, &desc->bd_page_list) {
struct ptlrpc_bulk_page *bulk;
- bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
+ bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
ptlrpc_free_bulk_page(bulk);
}
- ptlrpc_put_connection(desc->b_connection);
+ ptlrpc_put_connection(desc->bd_connection);
OBD_FREE(desc, sizeof(*desc));
EXIT;
return;
}
- list_del(&bulk->b_link);
- bulk->b_desc->b_page_count--;
+ list_del(&bulk->bp_link);
+ bulk->bp_desc->bd_page_count--;
OBD_FREE(bulk, sizeof(*bulk));
EXIT;
}
ENTRY;
/* 1 fragment for each page always */
- LASSERT (ev->mem_desc.niov == desc->b_page_count);
+ LASSERT (ev->mem_desc.niov == desc->bd_page_count);
if (ev->type == PTL_EVENT_SENT) {
CDEBUG(D_NET, "got SENT event\n");
} else if (ev->type == PTL_EVENT_ACK) {
CDEBUG(D_NET, "got ACK event\n");
- list_for_each_safe(tmp, next, &desc->b_page_list) {
- bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
+ list_for_each_safe(tmp, next, &desc->bd_page_list) {
+ bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
- if (bulk->b_cb != NULL)
- bulk->b_cb(bulk);
+ if (bulk->bp_cb != NULL)
+ bulk->bp_cb(bulk);
}
- desc->b_flags |= PTL_BULK_FL_SENT;
- wake_up(&desc->b_waitq);
- if (desc->b_cb != NULL)
- desc->b_cb(desc, desc->b_cb_data);
+ desc->bd_flags |= PTL_BULK_FL_SENT;
+ wake_up(&desc->bd_waitq);
+ if (desc->bd_cb != NULL)
+ desc->bd_cb(desc, desc->bd_cb_data);
} else {
CERROR("Unexpected event type!\n");
LBUG();
/* used iovs */
LASSERT ((ev->mem_desc.options & PTL_MD_IOV) != 0);
/* 1 fragment for each page always */
- LASSERT (ev->mem_desc.niov == desc->b_page_count);
+ LASSERT (ev->mem_desc.niov == desc->bd_page_count);
- list_for_each_safe (tmp, next, &desc->b_page_list) {
- bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
+ list_for_each_safe (tmp, next, &desc->bd_page_list) {
+ bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
- total += bulk->b_buflen;
+ total += bulk->bp_buflen;
- if (bulk->b_cb != NULL)
- bulk->b_cb(bulk);
+ if (bulk->bp_cb != NULL)
+ bulk->bp_cb(bulk);
}
LASSERT (ev->mem_desc.length == total);
- desc->b_flags |= PTL_BULK_FL_RCVD;
- wake_up(&desc->b_waitq);
- if (desc->b_cb != NULL)
- desc->b_cb(desc, desc->b_cb_data);
+ desc->bd_flags |= PTL_BULK_FL_RCVD;
+ wake_up(&desc->bd_waitq);
+ if (desc->bd_cb != NULL)
+ desc->bd_cb(desc, desc->bd_cb_data);
} else {
CERROR("Unexpected event type!\n");
LBUG();
{
struct iovec *iov;
- if (desc->b_page_count <= sizeof (desc->b_iov)/sizeof (struct iovec))
- return (desc->b_iov);
+ if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
+ return (desc->bd_iov);
- OBD_ALLOC (iov, desc->b_page_count * sizeof (struct iovec));
+ OBD_ALLOC (iov, desc->bd_page_count * sizeof (struct iovec));
if (iov == NULL)
LBUG();
static inline void
ptlrpc_put_bulk_iov (struct ptlrpc_bulk_desc *desc, struct iovec *iov)
{
- if (desc->b_page_count <= sizeof (desc->b_iov)/sizeof (struct iovec))
+ if (desc->bd_page_count <= sizeof (desc->bd_iov)/sizeof (struct iovec))
return;
- OBD_FREE (iov, desc->b_page_count * sizeof (struct iovec));
+ OBD_FREE (iov, desc->bd_page_count * sizeof (struct iovec));
}
int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *desc)
if (iov == NULL)
RETURN (-ENOMEM);
- desc->b_md.start = iov;
- desc->b_md.niov = 0;
- desc->b_md.length = 0;
- desc->b_md.eventq = bulk_source_eq;
- desc->b_md.threshold = 2; /* SENT and ACK */
- desc->b_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
- desc->b_md.user_ptr = desc;
+ desc->bd_md.start = iov;
+ desc->bd_md.niov = 0;
+ desc->bd_md.length = 0;
+ desc->bd_md.eventq = bulk_source_eq;
+ desc->bd_md.threshold = 2; /* SENT and ACK */
+ desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
+ desc->bd_md.user_ptr = desc;
- list_for_each_safe(tmp, next, &desc->b_page_list) {
+ list_for_each_safe(tmp, next, &desc->bd_page_list) {
struct ptlrpc_bulk_page *bulk;
- bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
+ bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
- LASSERT (desc->b_md.niov < desc->b_page_count);
+ LASSERT (desc->bd_md.niov < desc->bd_page_count);
- if (desc->b_md.niov == 0)
- xid = bulk->b_xid;
- LASSERT (xid == bulk->b_xid); /* should all be the same */
+ if (desc->bd_md.niov == 0)
+ xid = bulk->bp_xid;
+ LASSERT (xid == bulk->bp_xid); /* should all be the same */
- iov[desc->b_md.niov].iov_base = bulk->b_buf;
- iov[desc->b_md.niov].iov_len = bulk->b_buflen;
- desc->b_md.niov++;
- desc->b_md.length += bulk->b_buflen;
+ iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
+ iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
+ desc->bd_md.niov++;
+ desc->bd_md.length += bulk->bp_buflen;
}
- LASSERT (desc->b_md.niov == desc->b_page_count);
- LASSERT (desc->b_md.niov != 0);
+ LASSERT (desc->bd_md.niov == desc->bd_page_count);
+ LASSERT (desc->bd_md.niov != 0);
- rc = PtlMDBind(desc->b_connection->c_peer.peer_ni, desc->b_md,
- &desc->b_md_h);
+ rc = PtlMDBind(desc->bd_connection->c_peer.peer_ni, desc->bd_md,
+ &desc->bd_md_h);
ptlrpc_put_bulk_iov (desc, iov); /* move down to reduce latency to send */
RETURN(rc);
}
- remote_id.nid = desc->b_connection->c_peer.peer_nid;
+ remote_id.nid = desc->bd_connection->c_peer.peer_nid;
remote_id.pid = 0;
CDEBUG(D_NET, "Sending %u pages %u bytes to portal %d nid %Lx pid %d xid %d\n",
- desc->b_md.niov, desc->b_md.length,
- desc->b_portal, remote_id.nid, remote_id.pid, xid);
+ desc->bd_md.niov, desc->bd_md.length,
+ desc->bd_portal, remote_id.nid, remote_id.pid, xid);
- rc = PtlPut(desc->b_md_h, PTL_ACK_REQ, remote_id,
- desc->b_portal, 0, xid, 0, 0);
+ rc = PtlPut(desc->bd_md_h, PTL_ACK_REQ, remote_id,
+ desc->bd_portal, 0, xid, 0, 0);
if (rc != PTL_OK) {
CERROR("PtlPut(%Lu, %d, %d) failed: %d\n",
- remote_id.nid, desc->b_portal, xid, rc);
- PtlMDUnlink(desc->b_md_h);
+ remote_id.nid, desc->bd_portal, xid, rc);
+ PtlMDUnlink(desc->bd_md_h);
LBUG();
RETURN(rc);
}
if (iov == NULL)
return (-ENOMEM);
- desc->b_md.start = iov;
- desc->b_md.niov = 0;
- desc->b_md.length = 0;
- desc->b_md.threshold = 1;
- desc->b_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
- desc->b_md.user_ptr = desc;
- desc->b_md.eventq = bulk_sink_eq;
+ desc->bd_md.start = iov;
+ desc->bd_md.niov = 0;
+ desc->bd_md.length = 0;
+ desc->bd_md.threshold = 1;
+ desc->bd_md.options = PTL_MD_OP_PUT | PTL_MD_IOV;
+ desc->bd_md.user_ptr = desc;
+ desc->bd_md.eventq = bulk_sink_eq;
- list_for_each_safe(tmp, next, &desc->b_page_list) {
+ list_for_each_safe(tmp, next, &desc->bd_page_list) {
struct ptlrpc_bulk_page *bulk;
- bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
+ bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
- LASSERT (desc->b_md.niov < desc->b_page_count);
+ LASSERT (desc->bd_md.niov < desc->bd_page_count);
- if (desc->b_md.niov == 0)
- xid = bulk->b_xid;
- LASSERT (xid == bulk->b_xid); /* should all be the same */
+ if (desc->bd_md.niov == 0)
+ xid = bulk->bp_xid;
+ LASSERT (xid == bulk->bp_xid); /* should all be the same */
- iov[desc->b_md.niov].iov_base = bulk->b_buf;
- iov[desc->b_md.niov].iov_len = bulk->b_buflen;
- desc->b_md.niov++;
- desc->b_md.length += bulk->b_buflen;
+ iov[desc->bd_md.niov].iov_base = bulk->bp_buf;
+ iov[desc->bd_md.niov].iov_len = bulk->bp_buflen;
+ desc->bd_md.niov++;
+ desc->bd_md.length += bulk->bp_buflen;
}
- LASSERT (desc->b_md.niov == desc->b_page_count);
- LASSERT (desc->b_md.niov != 0);
+ LASSERT (desc->bd_md.niov == desc->bd_page_count);
+ LASSERT (desc->bd_md.niov != 0);
- rc = PtlMEAttach(desc->b_connection->c_peer.peer_ni,
- desc->b_portal, local_id, xid, 0,
- PTL_UNLINK, PTL_INS_AFTER, &desc->b_me_h);
+ rc = PtlMEAttach(desc->bd_connection->c_peer.peer_ni,
+ desc->bd_portal, local_id, xid, 0,
+ PTL_UNLINK, PTL_INS_AFTER, &desc->bd_me_h);
ptlrpc_put_bulk_iov (desc, iov);
GOTO(cleanup, rc);
}
- rc = PtlMDAttach(desc->b_me_h, desc->b_md, PTL_UNLINK,
- &desc->b_md_h);
+ rc = PtlMDAttach(desc->bd_me_h, desc->bd_md, PTL_UNLINK,
+ &desc->bd_md_h);
if (rc != PTL_OK) {
CERROR("PtlMDAttach failed: %d\n", rc);
LBUG();
}
CDEBUG(D_NET, "Setup bulk sink buffers: %u pages %u bytes, xid %u, "
- "portal %u\n", desc->b_md.niov, desc->b_md.length,
- xid, desc->b_portal);
+ "portal %u\n", desc->bd_md.niov, desc->bd_md.length,
+ xid, desc->bd_portal);
RETURN(0);
{
/* This should be safe: these handles are initialized to be
* invalid in ptlrpc_prep_bulk() */
- PtlMDUnlink(desc->b_md_h);
- PtlMEUnlink(desc->b_me_h);
+ PtlMDUnlink(desc->bd_md_h);
+ PtlMEUnlink(desc->bd_me_h);
return 0;
}