X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lustre%2Fptlrpc%2Fclient.c;h=ba4c079452f046d07c20503fc849450db01fda06;hb=816c9f01d5b44e05437b890aab4ef50edc02230f;hp=cc93dd447e9b7de626af3f16ae1e56e5b01f9137;hpb=cc3643908d6c902db3d6c95647fff007bad0ff53;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index cc93dd4..ba4c079 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -66,19 +66,19 @@ static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc) int i; for (i = 0; i < desc->bd_iov_count ; i++) - put_page(BD_GET_KIOV(desc, i).kiov_page); + put_page(desc->bd_vec[i].kiov_page); } static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc, void *frag, int len) { - unsigned int offset = (uintptr_t)frag & ~PAGE_MASK; + unsigned int offset = (unsigned long)frag & ~PAGE_MASK; ENTRY; while (len > 0) { int page_len = min_t(unsigned int, PAGE_SIZE - offset, len); - uintptr_t vaddr = (uintptr_t) frag; + unsigned long vaddr = (unsigned long)frag; ptlrpc_prep_bulk_page_nopin(desc, lnet_kvaddr_to_page(vaddr), @@ -104,11 +104,6 @@ const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = { }; EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops); -const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = { - .add_iov_frag = ptlrpc_prep_bulk_frag, -}; -EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops); - static int ptlrpc_send_new_req(struct ptlrpc_request *req); static int ptlrpcd_check_work(struct ptlrpc_request *req); static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async); @@ -172,26 +167,16 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags, struct ptlrpc_bulk_desc *desc; int i; - /* ensure that only one of KIOV or IOVEC is set but not both */ - LASSERT((ptlrpc_is_bulk_desc_kiov(type) && - ops->add_kiov_frag != NULL) || - (ptlrpc_is_bulk_desc_kvec(type) && - ops->add_iov_frag != NULL)); + LASSERT(ops->add_kiov_frag != NULL); OBD_ALLOC_PTR(desc); if (!desc) return NULL; - if (type & PTLRPC_BULK_BUF_KIOV) { - OBD_ALLOC_LARGE(GET_KIOV(desc), - nfrags * sizeof(*GET_KIOV(desc))); - if (!GET_KIOV(desc)) - goto out; - } else { - OBD_ALLOC_LARGE(GET_KVEC(desc), - nfrags * sizeof(*GET_KVEC(desc))); - if (!GET_KVEC(desc)) - goto out; - } + + OBD_ALLOC_LARGE(desc->bd_vec, + nfrags * sizeof(*desc->bd_vec)); + if (!desc->bd_vec) + goto out; spin_lock_init(&desc->bd_lock); init_waitqueue_head(&desc->bd_waitq); @@ -265,9 +250,8 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, LASSERT(pageoffset >= 0); LASSERT(len > 0); LASSERT(pageoffset + len <= PAGE_SIZE); - LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - kiov = &BD_GET_KIOV(desc, desc->bd_iov_count); + kiov = &desc->bd_vec[desc->bd_iov_count]; desc->bd_nob += len; @@ -282,31 +266,6 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, } EXPORT_SYMBOL(__ptlrpc_prep_bulk_page); -int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, - void *frag, int len) -{ - struct kvec *iovec; - - ENTRY; - - LASSERT(desc->bd_iov_count < desc->bd_max_iov); - LASSERT(frag != NULL); - LASSERT(len > 0); - LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type)); - - iovec = &BD_GET_KVEC(desc, desc->bd_iov_count); - - desc->bd_nob += len; - - iovec->iov_base = frag; - iovec->iov_len = len; - - desc->bd_iov_count++; - - RETURN(desc->bd_nob); -} -EXPORT_SYMBOL(ptlrpc_prep_bulk_frag); - void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) { ENTRY; @@ -317,8 +276,7 @@ void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); LASSERT(desc->bd_frag_ops != NULL); - if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) - sptlrpc_enc_pool_put_pages(desc); + sptlrpc_enc_pool_put_pages(desc); if (desc->bd_export) class_export_put(desc->bd_export); @@ -328,12 +286,8 @@ void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc) if (desc->bd_frag_ops->release_frags != NULL) desc->bd_frag_ops->release_frags(desc); - if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) - OBD_FREE_LARGE(GET_KIOV(desc), - desc->bd_max_iov * sizeof(*GET_KIOV(desc))); - else - OBD_FREE_LARGE(GET_KVEC(desc), - desc->bd_max_iov * sizeof(*GET_KVEC(desc))); + OBD_FREE_LARGE(desc->bd_vec, + desc->bd_max_iov * sizeof(*desc->bd_vec)); OBD_FREE_PTR(desc); EXIT; } @@ -1177,6 +1131,11 @@ EXPORT_SYMBOL(ptlrpc_set_destroy); void ptlrpc_set_add_req(struct ptlrpc_request_set *set, struct ptlrpc_request *req) { + if (set == PTLRPCD_SET) { + ptlrpcd_add_req(req); + return; + } + LASSERT(req->rq_import->imp_state != LUSTRE_IMP_IDLE); LASSERT(list_empty(&req->rq_set_chain)); @@ -2363,17 +2322,6 @@ void ptlrpc_expired_set(struct ptlrpc_request_set *set) } /** - * Sets rq_intr flag in \a req under spinlock. - */ -void ptlrpc_mark_interrupted(struct ptlrpc_request *req) -{ - spin_lock(&req->rq_lock); - req->rq_intr = 1; - spin_unlock(&req->rq_lock); -} -EXPORT_SYMBOL(ptlrpc_mark_interrupted); - -/** * Interrupts (sets interrupted flag) all uncompleted requests in * a set \a data. Callback for l_wait_event for interruptible waits. */ @@ -2396,7 +2344,9 @@ static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set) !req->rq_allow_intr) continue; - ptlrpc_mark_interrupted(req); + spin_lock(&req->rq_lock); + req->rq_intr = 1; + spin_unlock(&req->rq_lock); } } @@ -2778,7 +2728,7 @@ static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) RETURN(0); /* - * We have to l_wait_event() whatever the result, to give liblustre + * We have to wait_event_idle_timeout() whatever the result, to get * a chance to run reply_in_callback(), and to make sure we've * unlinked before returning a req to the pool. */