* one must start on page boundary, and all but the last must end on
* page boundary.
*/
- void *start;
- unsigned int length;
+ void *umd_start;
+ unsigned int umd_length;
/**
* Specifies the maximum number of operations that can be performed
* on the memory descriptor. An operation is any action that could
* there is no bound on the number of operations that may be applied
* to a MD.
*/
- int threshold;
+ int umd_threshold;
/**
* Specifies the largest incoming request that the memory descriptor
* should respond to. When the unused portion of a MD (length -
* does not respond to further operations. This value is only used
* if the LNET_MD_MAX_SIZE option is set.
*/
- int max_size;
+ int umd_max_size;
+
/**
* Specifies the behavior of the memory descriptor. A bitwise OR
* of the following values can be used:
* region (i.e. sum of all fragment lengths) must not be less than
* \a max_size.
*/
- unsigned int options;
+ unsigned int umd_options;
/**
* A user-specified value that is associated with the memory
* descriptor. The value does not need to be a pointer, but must fit
* in the space used by a pointer. This value is recorded in events
* associated with operations on this MD.
*/
- void *user_ptr;
+ void *umd_user_ptr;
/**
* The event handler used to log the operations performed on
* the memory region. If this argument is NULL operations
* performed on this memory descriptor are not logged.
*/
- lnet_handler_t handler;
+ lnet_handler_t umd_handler;
/**
* The bulk MD handle which was registered to describe the buffers
* either to be used to transfer data to the peer or receive data
* nearest local network interface. This value is only used
* if the LNET_MD_BULK_HANDLE option is set.
*/
- struct lnet_handle_md bulk_handle;
+ struct lnet_handle_md umd_bulk_handle;
};
/* Max Transfer Unit (minimum supported everywhere).
* @{ */
enum {
- LNET_CTL_DROP_ADD,
- LNET_CTL_DROP_DEL,
- LNET_CTL_DROP_RESET,
- LNET_CTL_DROP_LIST,
- LNET_CTL_DELAY_ADD,
- LNET_CTL_DELAY_DEL,
- LNET_CTL_DELAY_RESET,
- LNET_CTL_DELAY_LIST,
+ LNET_CTL_DROP_ADD = 0,
+ LNET_CTL_DROP_DEL = 1,
+ LNET_CTL_DROP_RESET = 2,
+ LNET_CTL_DROP_LIST = 3,
+ LNET_CTL_DELAY_ADD = 4,
+ LNET_CTL_DELAY_DEL = 5,
+ LNET_CTL_DELAY_RESET = 6,
+ LNET_CTL_DELAY_LIST = 7,
};
-#define LNET_ACK_BIT (1 << 0)
-#define LNET_PUT_BIT (1 << 1)
-#define LNET_GET_BIT (1 << 2)
-#define LNET_REPLY_BIT (1 << 3)
+enum {
+ LNET_ACK_BIT = (1 << 0),
+ LNET_PUT_BIT = (1 << 1),
+ LNET_GET_BIT = (1 << 2),
+ LNET_REPLY_BIT = (1 << 3),
+};
-#define HSTATUS_END 11
-#define HSTATUS_LOCAL_INTERRUPT_BIT (1 << 1)
-#define HSTATUS_LOCAL_DROPPED_BIT (1 << 2)
-#define HSTATUS_LOCAL_ABORTED_BIT (1 << 3)
-#define HSTATUS_LOCAL_NO_ROUTE_BIT (1 << 4)
-#define HSTATUS_LOCAL_ERROR_BIT (1 << 5)
-#define HSTATUS_LOCAL_TIMEOUT_BIT (1 << 6)
-#define HSTATUS_REMOTE_ERROR_BIT (1 << 7)
-#define HSTATUS_REMOTE_DROPPED_BIT (1 << 8)
-#define HSTATUS_REMOTE_TIMEOUT_BIT (1 << 9)
-#define HSTATUS_NETWORK_TIMEOUT_BIT (1 << 10)
-#define HSTATUS_RANDOM 0xffffffff
+enum {
+ HSTATUS_END = 11,
+ HSTATUS_LOCAL_INTERRUPT_BIT = (1 << 1),
+ HSTATUS_LOCAL_DROPPED_BIT = (1 << 2),
+ HSTATUS_LOCAL_ABORTED_BIT = (1 << 3),
+ HSTATUS_LOCAL_NO_ROUTE_BIT = (1 << 4),
+ HSTATUS_LOCAL_ERROR_BIT = (1 << 5),
+ HSTATUS_LOCAL_TIMEOUT_BIT = (1 << 6),
+ HSTATUS_REMOTE_ERROR_BIT = (1 << 7),
+ HSTATUS_REMOTE_DROPPED_BIT = (1 << 8),
+ HSTATUS_REMOTE_TIMEOUT_BIT = (1 << 9),
+ HSTATUS_NETWORK_TIMEOUT_BIT = (1 << 10),
+ HSTATUS_RANDOM = 0xffffffff,
+};
/** ioctl parameter for LNet fault simulation */
struct lnet_fault_attr {
}
/* initialize md content */
- md.start = &(*ppbuf)->pb_info;
- md.length = (*ppbuf)->pb_nbytes;
- md.threshold = LNET_MD_THRESH_INF;
- md.max_size = 0;
- md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
- LNET_MD_MANAGE_REMOTE;
- md.handler = the_lnet.ln_ping_target_handler;
- md.user_ptr = *ppbuf;
+ md.umd_start = &(*ppbuf)->pb_info;
+ md.umd_length = (*ppbuf)->pb_nbytes;
+ md.umd_threshold = LNET_MD_THRESH_INF;
+ md.umd_max_size = 0;
+ md.umd_options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
+ LNET_MD_MANAGE_REMOTE;
+ md.umd_handler = the_lnet.ln_ping_target_handler;
+ md.umd_user_ptr = *ppbuf;
rc = LNetMDAttach(me, &md, LNET_RETAIN, ping_mdh);
if (rc != 0) {
kref_get(&pbuf->pb_refcnt);
/* initialize md content */
- md.start = &pbuf->pb_info;
- md.length = pbuf->pb_nbytes;
- md.threshold = 1;
- md.max_size = 0;
- md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
- md.user_ptr = pbuf;
- md.handler = the_lnet.ln_push_target_handler;
+ md.umd_start = &pbuf->pb_info;
+ md.umd_length = pbuf->pb_nbytes;
+ md.umd_threshold = 1;
+ md.umd_max_size = 0;
+ md.umd_options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE;
+ md.umd_user_ptr = pbuf;
+ md.umd_handler = the_lnet.ln_push_target_handler;
rc = LNetMDAttach(me, &md, LNET_UNLINK, mdhp);
if (rc) {
return -ENOMEM;
/* initialize md content */
- md.start = &pbuf->pb_info;
- md.length = id_bytes;
- md.threshold = 2; /* GET/REPLY */
- md.max_size = 0;
- md.options = LNET_MD_TRUNCATE;
- md.user_ptr = &pd;
- md.handler = lnet_ping_event_handler;
+ md.umd_start = &pbuf->pb_info;
+ md.umd_length = id_bytes;
+ md.umd_threshold = 2; /* GET/REPLY */
+ md.umd_max_size = 0;
+ md.umd_options = LNET_MD_TRUNCATE;
+ md.umd_user_ptr = &pd;
+ md.umd_handler = lnet_ping_event_handler;
init_completion(&pd.completion);
if (lnet_md_validate(umd) != 0)
return ERR_PTR(-EINVAL);
- if (umd->options & LNET_MD_KIOV)
- niov = umd->length;
+ if (umd->umd_options & LNET_MD_KIOV)
+ niov = umd->umd_length;
else
- niov = DIV_ROUND_UP(offset_in_page(umd->start) + umd->length,
- PAGE_SIZE);
+ niov = DIV_ROUND_UP(
+ offset_in_page(umd->umd_start) + umd->umd_length,
+ PAGE_SIZE);
size = offsetof(struct lnet_libmd, md_kiov[niov]);
if (size <= LNET_SMALL_MD_SIZE) {
lmd->md_niov = niov;
INIT_LIST_HEAD(&lmd->md_list);
lmd->md_me = NULL;
- lmd->md_start = umd->start;
+ lmd->md_start = umd->umd_start;
lmd->md_offset = 0;
- lmd->md_max_size = umd->max_size;
- lmd->md_options = umd->options;
- lmd->md_user_ptr = umd->user_ptr;
+ lmd->md_max_size = umd->umd_max_size;
+ lmd->md_options = umd->umd_options;
+ lmd->md_user_ptr = umd->umd_user_ptr;
lmd->md_handler = NULL;
- lmd->md_threshold = umd->threshold;
+ lmd->md_threshold = umd->umd_threshold;
lmd->md_refcount = 0;
lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
- lmd->md_bulk_handle = umd->bulk_handle;
+ lmd->md_bulk_handle = umd->umd_bulk_handle;
- if (umd->options & LNET_MD_GPU_ADDR)
+ if (umd->umd_options & LNET_MD_GPU_ADDR)
lmd->md_flags |= LNET_MD_FLAG_GPU;
- if (umd->options & LNET_MD_KIOV) {
- memcpy(lmd->md_kiov, umd->start,
+ if (umd->umd_options & LNET_MD_KIOV) {
+ memcpy(lmd->md_kiov, umd->umd_start,
niov * sizeof(lmd->md_kiov[0]));
for (i = 0; i < (int)niov; i++) {
lmd->md_length = total_length;
- if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
- (umd->max_size < 0 ||
- umd->max_size > total_length)) { /* illegal max_size */
+ if ((umd->umd_options & LNET_MD_MAX_SIZE) && /* max size used */
+ (umd->umd_max_size < 0 ||
+ umd->umd_max_size > total_length)) { /* illegal max_size */
lnet_md_free(lmd);
return ERR_PTR(-EINVAL);
}
} else { /* contiguous - split into pages */
- void *pa = umd->start;
- int len = umd->length;
+ void *pa = umd->umd_start;
+ int len = umd->umd_length;
lmd->md_length = len;
i = 0;
WARN(!(lmd->md_options & LNET_MD_GNILND) && i > LNET_MAX_IOV,
"Max IOV exceeded: %d should be < %d\n",
i, LNET_MAX_IOV);
- if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
- (umd->max_size < 0 ||
- umd->max_size > (int)umd->length)) { /* illegal max_size */
+ if ((umd->umd_options & LNET_MD_MAX_SIZE) && /* max size used */
+ (umd->umd_max_size < 0 ||
+ umd->umd_max_size > (int)umd->umd_length)) {
lnet_md_free(lmd);
return ERR_PTR(-EINVAL);
}
static int
lnet_md_validate(const struct lnet_md *umd)
{
- if (umd->start == NULL && umd->length != 0) {
+ if (umd->umd_start == NULL && umd->umd_length != 0) {
CERROR("MD start pointer can not be NULL with length %u\n",
- umd->length);
+ umd->umd_length);
return -EINVAL;
}
- if ((umd->options & LNET_MD_KIOV) &&
- umd->length > LNET_MAX_IOV) {
+ if ((umd->umd_options & LNET_MD_KIOV) &&
+ umd->umd_length > LNET_MAX_IOV) {
CERROR("Invalid option: too many fragments %u, %d max\n",
- umd->length, LNET_MAX_IOV);
+ umd->umd_length, LNET_MAX_IOV);
return -EINVAL;
}
* structure and the MD maintained by the LNet.
* \param unlink A flag to indicate whether the MD is automatically unlinked
* when it becomes inactive, either because the operation threshold drops to
- * zero or because the available memory becomes less than \a umd.max_size.
+ * zero or because the available memory becomes less than \a umd.umd_max_size.
* (Note that the check for unlinking a MD only occurs after the completion
* of a successful operation on the MD.) The value LNET_UNLINK enables auto
* unlinking; the value LNET_RETAIN disables it.
LASSERT(the_lnet.ln_refcount > 0);
LASSERT(!me->me_md);
- if ((umd->options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) {
+ if ((umd->umd_options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) {
CERROR("Invalid option: no MD_OP set\n");
md = ERR_PTR(-EINVAL);
} else
return PTR_ERR(md);
}
- lnet_md_link(md, umd->handler, cpt);
+ lnet_md_link(md, umd->umd_handler, cpt);
/* attach this MD to portal of ME and check if it matches any
* blocked msgs on this portal */
* Create a "free floating" memory descriptor - a MD that is not associated
* with a ME. Such MDs are usually used in LNetPut() and LNetGet() operations.
*
- * \param umd,unlink See the discussion for LNetMDAttach().
+ * \param umd,umd_unlink See the discussion for LNetMDAttach().
* \param handle On successful returns, a handle to the newly created MD is
* saved here. This handle can be used later in LNetMDUnlink(), LNetPut(),
* and LNetGet() operations.
LASSERT(the_lnet.ln_refcount > 0);
- if ((umd->options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) {
+ if ((umd->umd_options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) {
CERROR("Invalid option: GET|PUT illegal on active MDs\n");
return -EINVAL;
}
cpt = lnet_res_lock_current();
- lnet_md_link(md, umd->handler, cpt);
+ lnet_md_link(md, umd->umd_handler, cpt);
lnet_md2handle(handle, md);
}
/* initialize md content */
- md.start = &pbuf->pb_info;
- md.length = bytes;
- md.threshold = 2; /* GET/REPLY */
- md.max_size = 0;
- md.options = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
- md.user_ptr = user_data;
- md.handler = handler;
+ md.umd_start = &pbuf->pb_info;
+ md.umd_length = bytes;
+ md.umd_threshold = 2; /* GET/REPLY */
+ md.umd_max_size = 0;
+ md.umd_options = LNET_MD_TRUNCATE | LNET_MD_TRACK_RESPONSE;
+ md.umd_user_ptr = user_data;
+ md.umd_handler = handler;
rc = LNetMDBind(&md, LNET_UNLINK, mdh);
if (rc) {
{
struct lnet_ping_buffer *pbuf;
struct lnet_processid id;
- struct lnet_md md;
+ struct lnet_md md = { NULL };
int cpt;
int rc;
lnet_net_unlock(cpt);
/* Push source MD */
- md.start = &pbuf->pb_info;
- md.length = pbuf->pb_nbytes;
- md.threshold = 2; /* Put/Ack */
- md.max_size = 0;
- md.options = LNET_MD_TRACK_RESPONSE;
- md.handler = the_lnet.ln_dc_handler;
- md.user_ptr = lp;
+ md.umd_start = &pbuf->pb_info;
+ md.umd_length = pbuf->pb_nbytes;
+ md.umd_threshold = 2; /* Put/Ack */
+ md.umd_max_size = 0;
+ md.umd_options = LNET_MD_TRACK_RESPONSE;
+ md.umd_handler = the_lnet.ln_dc_handler;
+ md.umd_user_ptr = lp;
rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
if (rc) {
int len, int options, struct lnet_process_id peer4,
struct lnet_handle_md *mdh, struct srpc_event *ev)
{
+ struct lnet_md md = {
+ .umd_user_ptr = ev,
+ .umd_start = buf,
+ .umd_length = len,
+ .umd_handler = srpc_data.rpc_lnet_handler,
+ .umd_threshold = 1,
+ .umd_options = options,
+ };
int rc;
- struct lnet_md md;
struct lnet_me *me;
struct lnet_processid peer;
return -ENOMEM;
}
- md.threshold = 1;
- md.user_ptr = ev;
- md.start = buf;
- md.length = len;
- md.options = options;
- md.handler = srpc_data.rpc_lnet_handler;
-
rc = LNetMDAttach(me, &md, LNET_UNLINK, mdh);
if (rc != 0) {
CERROR("LNetMDAttach failed: %d\n", rc);
lnet_nid_t self4, struct lnet_handle_md *mdh,
struct srpc_event *ev)
{
+ struct lnet_md md = {
+ .umd_user_ptr = ev,
+ .umd_start = buf,
+ .umd_length = len,
+ .umd_handler = srpc_data.rpc_lnet_handler,
+ .umd_threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1,
+ .umd_options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET),
+ };
int rc;
- struct lnet_md md;
struct lnet_nid self;
struct lnet_processid peer;
lnet_nid4_to_nid(self4, &self);
lnet_pid4_to_pid(peer4, &peer);
- md.user_ptr = ev;
- md.start = buf;
- md.length = len;
- md.handler = srpc_data.rpc_lnet_handler;
- md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
- md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
-
rc = LNetMDBind(&md, LNET_UNLINK, mdh);
if (rc != 0) {
CERROR("LNetMDBind failed: %d\n", rc);
struct lnet_handle_md *bulk_cookie)
{
int rc;
- struct lnet_md md;
+ struct lnet_md md = {
+ .umd_start = base,
+ .umd_length = len,
+ .umd_threshold = (ack == LNET_ACK_REQ) ? 2 : 1,
+ .umd_options = PTLRPC_MD_OPTIONS,
+ .umd_user_ptr = cbid,
+ .umd_handler = ptlrpc_handler,
+ };
ENTRY;
LASSERT(portal != 0);
CDEBUG(D_INFO, "peer_id %s\n", libcfs_idstr(peer_id));
- md.start = base;
- md.length = len;
- md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
- md.options = PTLRPC_MD_OPTIONS;
- md.user_ptr = cbid;
- md.handler = ptlrpc_handler;
- LNetInvalidateMDHandle(&md.bulk_handle);
+ LNetInvalidateMDHandle(&md.umd_bulk_handle);
if (bulk_cookie) {
- md.bulk_handle = *bulk_cookie;
- md.options |= LNET_MD_BULK_HANDLE;
+ md.umd_bulk_handle = *bulk_cookie;
+ md.umd_options |= LNET_MD_BULK_HANDLE;
}
if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, CFS_FAIL_ONCE) &&
*/
int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
{
- struct obd_export *exp = desc->bd_export;
- struct lnet_nid self_nid;
- struct lnet_processid peer_id;
- int rc = 0;
- __u64 mbits;
- int posted_md;
- int total_md;
- struct lnet_md md;
+ struct obd_export *exp = desc->bd_export;
+ struct lnet_nid self_nid;
+ struct lnet_processid peer_id;
+ int rc = 0;
+ __u64 mbits;
+ int posted_md;
+ int total_md;
+ struct lnet_md md = { NULL };
ENTRY;
desc->bd_refs = total_md;
desc->bd_failure = 0;
- md.user_ptr = &desc->bd_cbid;
- md.handler = ptlrpc_handler;
- md.threshold = 2; /* SENT and ACK/REPLY */
+ md.umd_user_ptr = &desc->bd_cbid;
+ md.umd_handler = ptlrpc_handler;
+ md.umd_threshold = 2; /* SENT and ACK/REPLY */
for (posted_md = 0; posted_md < total_md; mbits++) {
- md.options = PTLRPC_MD_OPTIONS;
+ md.umd_options = PTLRPC_MD_OPTIONS;
/* Note. source and sink buf frags are page-aligned. Else send
* client bulk sizes over and split server buffer accordingly
int total_md;
__u64 mbits;
struct lnet_me *me;
- struct lnet_md md;
+ struct lnet_md md = { NULL };
ENTRY;
desc->bd_registered = 1;
desc->bd_last_mbits = mbits;
desc->bd_refs = total_md;
- md.user_ptr = &desc->bd_cbid;
- md.handler = ptlrpc_handler;
- md.threshold = 1; /* PUT or GET */
+ md.umd_user_ptr = &desc->bd_cbid;
+ md.umd_handler = ptlrpc_handler;
+ md.umd_threshold = 1; /* PUT or GET */
for (posted_md = 0; posted_md < desc->bd_md_count;
posted_md++, mbits++) {
- md.options = PTLRPC_MD_OPTIONS |
+ md.umd_options = PTLRPC_MD_OPTIONS |
(ptlrpc_is_bulk_op_get(desc->bd_type) ?
LNET_MD_OP_GET : LNET_MD_OP_PUT);
ptlrpc_fill_bulk_md(&md, desc, posted_md);
spin_unlock(&request->rq_lock);
if (!noreply) {
- reply_md.start = request->rq_repbuf;
- reply_md.length = request->rq_repbuf_len;
+ reply_md.umd_start = request->rq_repbuf;
+ reply_md.umd_length = request->rq_repbuf_len;
/* Allow multiple early replies */
- reply_md.threshold = LNET_MD_THRESH_INF;
+ reply_md.umd_threshold = LNET_MD_THRESH_INF;
/* Manage remote for early replies */
- reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
+ reply_md.umd_options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
LNET_MD_MANAGE_REMOTE |
LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
- reply_md.user_ptr = &request->rq_reply_cbid;
- reply_md.handler = ptlrpc_handler;
+ reply_md.umd_user_ptr = &request->rq_reply_cbid;
+ reply_md.umd_handler = ptlrpc_handler;
/* We must see the unlink callback to set rq_reply_unlinked,
* so we can't auto-unlink
.nid = LNET_ANY_NID,
.pid = LNET_PID_ANY
};
+ struct lnet_md md = {
+ .umd_start = rqbd->rqbd_buffer,
+ .umd_length = service->srv_buf_size,
+ .umd_max_size = service->srv_max_req_size,
+ .umd_threshold = LNET_MD_THRESH_INF,
+ .umd_options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
+ LNET_MD_MAX_SIZE,
+ .umd_user_ptr = &rqbd->rqbd_cbid,
+ .umd_handler = ptlrpc_handler,
+ };
int rc;
- struct lnet_md md;
struct lnet_me *me;
CDEBUG(D_NET, "%s: registering portal %d\n", service->srv_name,
LASSERT(rqbd->rqbd_refcount == 0);
rqbd->rqbd_refcount = 1;
- md.start = rqbd->rqbd_buffer;
- md.length = service->srv_buf_size;
- md.max_size = service->srv_max_req_size;
- md.threshold = LNET_MD_THRESH_INF;
- md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
- md.user_ptr = &rqbd->rqbd_cbid;
- md.handler = ptlrpc_handler;
-
rc = LNetMDAttach(me, &md, LNET_UNLINK, &rqbd->rqbd_md_h);
if (rc == 0) {
percpu_ref_get(&ptlrpc_pending);
/* just send a lnet header */
if (mdidx >= desc->bd_md_count) {
- md->options |= LNET_MD_KIOV;
- md->length = 0;
- md->start = NULL;
+ md->umd_options |= LNET_MD_KIOV;
+ md->umd_length = 0;
+ md->umd_start = NULL;
return;
}
if (desc->bd_is_rdma)
- md->options |= LNET_MD_GPU_ADDR;
+ md->umd_options |= LNET_MD_GPU_ADDR;
start = desc->bd_mds_off[mdidx];
if (mdidx == (desc->bd_md_count - 1))
- md->length = desc->bd_iov_count - start;
+ md->umd_length = desc->bd_iov_count - start;
else
- md->length = desc->bd_mds_off[mdidx + 1] - start;
+ md->umd_length = desc->bd_mds_off[mdidx + 1] - start;
- md->options |= LNET_MD_KIOV;
+ md->umd_options |= LNET_MD_KIOV;
if (desc->bd_enc_vec)
- md->start = &desc->bd_enc_vec[start];
+ md->umd_start = &desc->bd_enc_vec[start];
else
- md->start = &desc->bd_vec[start];
+ md->umd_start = &desc->bd_vec[start];
}