int i;
for (i = 0; i < desc->bd_iov_count ; i++)
- put_page(BD_GET_KIOV(desc, i).kiov_page);
+ put_page(desc->bd_vec[i].kiov_page);
}
static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
};
EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
-const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = {
- .add_iov_frag = ptlrpc_prep_bulk_frag,
-};
-EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops);
-
static int ptlrpc_send_new_req(struct ptlrpc_request *req);
static int ptlrpcd_check_work(struct ptlrpc_request *req);
static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
struct ptlrpc_bulk_desc *desc;
int i;
- /* ensure that only one of KIOV or IOVEC is set but not both */
- LASSERT((ptlrpc_is_bulk_desc_kiov(type) &&
- ops->add_kiov_frag != NULL) ||
- (ptlrpc_is_bulk_desc_kvec(type) &&
- ops->add_iov_frag != NULL));
+ LASSERT(ops->add_kiov_frag != NULL);
OBD_ALLOC_PTR(desc);
if (!desc)
return NULL;
- if (type & PTLRPC_BULK_BUF_KIOV) {
- OBD_ALLOC_LARGE(GET_KIOV(desc),
- nfrags * sizeof(*GET_KIOV(desc)));
- if (!GET_KIOV(desc))
- goto out;
- } else {
- OBD_ALLOC_LARGE(GET_KVEC(desc),
- nfrags * sizeof(*GET_KVEC(desc)));
- if (!GET_KVEC(desc))
- goto out;
- }
+
+ OBD_ALLOC_LARGE(desc->bd_vec,
+ nfrags * sizeof(*desc->bd_vec));
+ if (!desc->bd_vec)
+ goto out;
spin_lock_init(&desc->bd_lock);
init_waitqueue_head(&desc->bd_waitq);
LASSERT(pageoffset >= 0);
LASSERT(len > 0);
LASSERT(pageoffset + len <= PAGE_SIZE);
- LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
- kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
+ kiov = &desc->bd_vec[desc->bd_iov_count];
desc->bd_nob += len;
}
EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
-int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
- void *frag, int len)
-{
- struct kvec *iovec;
-
- ENTRY;
-
- LASSERT(desc->bd_iov_count < desc->bd_max_iov);
- LASSERT(frag != NULL);
- LASSERT(len > 0);
- LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
-
- iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
-
- desc->bd_nob += len;
-
- iovec->iov_base = frag;
- iovec->iov_len = len;
-
- desc->bd_iov_count++;
-
- RETURN(desc->bd_nob);
-}
-EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
-
void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
{
ENTRY;
LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
LASSERT(desc->bd_frag_ops != NULL);
- if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
- sptlrpc_enc_pool_put_pages(desc);
+ sptlrpc_enc_pool_put_pages(desc);
if (desc->bd_export)
class_export_put(desc->bd_export);
if (desc->bd_frag_ops->release_frags != NULL)
desc->bd_frag_ops->release_frags(desc);
- if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
- OBD_FREE_LARGE(GET_KIOV(desc),
- desc->bd_max_iov * sizeof(*GET_KIOV(desc)));
- else
- OBD_FREE_LARGE(GET_KVEC(desc),
- desc->bd_max_iov * sizeof(*GET_KVEC(desc)));
+ OBD_FREE_LARGE(desc->bd_vec,
+ desc->bd_max_iov * sizeof(*desc->bd_vec));
OBD_FREE_PTR(desc);
EXIT;
}
void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
struct ptlrpc_request *req)
{
+ if (set == PTLRPCD_SET) {
+ ptlrpcd_add_req(req);
+ return;
+ }
+
LASSERT(req->rq_import->imp_state != LUSTRE_IMP_IDLE);
LASSERT(list_empty(&req->rq_set_chain));
}
/**
- * Sets rq_intr flag in \a req under spinlock.
- */
-void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
-{
- spin_lock(&req->rq_lock);
- req->rq_intr = 1;
- spin_unlock(&req->rq_lock);
-}
-EXPORT_SYMBOL(ptlrpc_mark_interrupted);
-
-/**
* Interrupts (sets interrupted flag) all uncompleted requests in
* a set \a data. Callback for l_wait_event for interruptible waits.
*/
!req->rq_allow_intr)
continue;
- ptlrpc_mark_interrupted(req);
+ spin_lock(&req->rq_lock);
+ req->rq_intr = 1;
+ spin_unlock(&req->rq_lock);
}
}
RETURN(0);
/*
- * We have to l_wait_event() whatever the result, to give liblustre
+ * We have to wait_event_idle_timeout() whatever the result, to get
* a chance to run reply_in_callback(), and to make sure we've
* unlinked before returning a req to the pool.
*/