+ lnet_net_unlock(cpt);
+
+ list_for_each_entry_safe(msg, tmp, list, msg_list) {
+ lnet_ni_recv(msg->msg_rxni, msg->msg_private, NULL,
+ 0, 0, 0, msg->msg_hdr.payload_length);
+ list_del_init(&msg->msg_list);
+ lnet_finalize(msg, -ECANCELED);
+ }
+
+ lnet_net_lock(cpt);
+}
+
+void
+lnet_return_rx_credits_locked(struct lnet_msg *msg)
+{
+ struct lnet_peer_ni *rxpeer = msg->msg_rxpeer;
+ struct lnet_ni *rxni = msg->msg_rxni;
+ struct lnet_msg *msg2;
+
+ if (msg->msg_rtrcredit) {
+ /* give back global router credits */
+ struct lnet_rtrbuf *rb;
+ struct lnet_rtrbufpool *rbp;
+
+ /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
+ * there until it gets one allocated, or aborts the wait
+ * itself */
+ LASSERT(msg->msg_kiov != NULL);
+
+ rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
+ rbp = rb->rb_pool;
+
+ msg->msg_kiov = NULL;
+ msg->msg_rtrcredit = 0;
+
+ LASSERT(rbp == lnet_msg2bufpool(msg));
+
+ LASSERT((rbp->rbp_credits > 0) ==
+ !list_empty(&rbp->rbp_bufs));
+
+ /* If routing is now turned off, we just drop this buffer and
+ * don't bother trying to return credits. */
+ if (!the_lnet.ln_routing) {
+ lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
+ goto routing_off;
+ }
+
+ /* It is possible that a user has lowered the desired number of
+ * buffers in this pool. Make sure we never put back
+ * more buffers than the stated number. */
+ if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
+ /* Discard this buffer so we don't have too
+ * many. */
+ lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
+ rbp->rbp_nbuffers--;
+ } else {
+ list_add(&rb->rb_list, &rbp->rbp_bufs);
+ rbp->rbp_credits++;
+ if (rbp->rbp_credits <= 0)
+ lnet_schedule_blocked_locked(rbp);
+ }
+ }
+
+routing_off:
+ if (msg->msg_peerrtrcredit) {
+ /* give back peer router credits */
+ msg->msg_peerrtrcredit = 0;
+
+ spin_lock(&rxpeer->lpni_lock);
+ LASSERT((rxpeer->lpni_rtrcredits < 0) ==
+ !list_empty(&rxpeer->lpni_rtrq));
+
+ rxpeer->lpni_rtrcredits++;
+
+ /* drop all messages which are queued to be routed on that
+ * peer. */
+ if (!the_lnet.ln_routing) {
+ struct list_head drop;
+ INIT_LIST_HEAD(&drop);
+ list_splice_init(&rxpeer->lpni_rtrq, &drop);
+ spin_unlock(&rxpeer->lpni_lock);
+ lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
+ } else if (rxpeer->lpni_rtrcredits <= 0) {
+ msg2 = list_entry(rxpeer->lpni_rtrq.next,
+ struct lnet_msg, msg_list);
+ list_del(&msg2->msg_list);
+ spin_unlock(&rxpeer->lpni_lock);
+ (void) lnet_post_routed_recv_locked(msg2, 1);
+ } else {
+ spin_unlock(&rxpeer->lpni_lock);
+ }
+ }
+ if (rxni != NULL) {
+ msg->msg_rxni = NULL;
+ lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
+ }
+ if (rxpeer != NULL) {
+ msg->msg_rxpeer = NULL;
+ lnet_peer_ni_decref_locked(rxpeer);
+ }