X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fevents.c;h=69716b3049bd0129dfd8bd93c2ec003c9fd783b7;hp=6e612364fbaf2439b4d34ca1b343e99b00f481f4;hb=0a59f0c68978fc2dcab11ec5a4b41cead47bc71c;hpb=090c677210ee2946d99c71412e4ff762bb300f4f diff --git a/lustre/ptlrpc/events.c b/lustre/ptlrpc/events.c index 6e61236..69716b3 100644 --- a/lustre/ptlrpc/events.c +++ b/lustre/ptlrpc/events.c @@ -1,175 +1,241 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (c) 2002, 2003 Cluster File Systems, Inc. + * GPL HEADER START * - * This file is part of Lustre, http://www.lustre.org. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. */ #define DEBUG_SUBSYSTEM S_RPC -#ifdef __KERNEL__ -#include +#ifndef __KERNEL__ +# include #else -#include +# include +# ifdef __mips64__ +# include +# endif #endif -#include -#include + +#include +#include +#include #include "ptlrpc_internal.h" -struct ptlrpc_ni ptlrpc_interfaces[NAL_MAX_NR]; -int ptlrpc_ninterfaces; +lnet_handle_eq_t ptlrpc_eq_h; -/* +/* * Client's outgoing request callback */ -void request_out_callback(ptl_event_t *ev) +void request_out_callback(lnet_event_t *ev) { - struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr; + struct ptlrpc_cb_id *cbid = ev->md.user_ptr; struct ptlrpc_request *req = cbid->cbid_arg; - unsigned long flags; ENTRY; - LASSERT (ev->type == PTL_EVENT_SEND_END || - ev->type == PTL_EVENT_UNLINK); + LASSERT (ev->type == LNET_EVENT_SEND || + ev->type == LNET_EVENT_UNLINK); LASSERT (ev->unlinked); - DEBUG_REQ((ev->ni_fail_type == PTL_NI_OK) ? D_NET : D_ERROR, req, - "type %d, status %d", ev->type, ev->ni_fail_type); + DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); + + sptlrpc_request_out_callback(req); + req->rq_real_sent = cfs_time_current_sec(); - if (ev->type == PTL_EVENT_UNLINK || - ev->ni_fail_type != PTL_NI_OK) { + if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) { /* Failed send: make it seem like the reply timed out, just * like failing sends in client.c does currently... */ - spin_lock_irqsave(&req->rq_lock, flags); - req->rq_timeout = 0; - spin_unlock_irqrestore(&req->rq_lock, flags); - - ptlrpc_wake_client_req(req); + cfs_spin_lock(&req->rq_lock); + req->rq_net_err = 1; + cfs_spin_unlock(&req->rq_lock); + + ptlrpc_client_wake_req(req); } - /* this balances the atomic_inc in ptl_send_rpc() */ ptlrpc_req_finished(req); + EXIT; } /* * Client's incoming reply callback */ -void reply_in_callback(ptl_event_t *ev) +void reply_in_callback(lnet_event_t *ev) { - struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr; + struct ptlrpc_cb_id *cbid = ev->md.user_ptr; struct ptlrpc_request *req = cbid->cbid_arg; - unsigned long flags; ENTRY; - LASSERT (ev->type == PTL_EVENT_PUT_END || - ev->type == PTL_EVENT_UNLINK); - LASSERT (ev->unlinked); - LASSERT (ev->mem_desc.start == req->rq_repmsg); - LASSERT (ev->offset == 0); - LASSERT (ev->mlength <= req->rq_replen); - - DEBUG_REQ((ev->ni_fail_type == PTL_NI_OK) ? D_NET : D_ERROR, req, - "type %d, status %d", ev->type, ev->ni_fail_type); + DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); - spin_lock_irqsave (&req->rq_lock, flags); + LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK); + LASSERT (ev->md.start == req->rq_repbuf); + LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len); + /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests + for adaptive timeouts' early reply. */ + LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0); + + cfs_spin_lock(&req->rq_lock); - LASSERT (req->rq_receiving_reply); req->rq_receiving_reply = 0; + req->rq_early = 0; + if (ev->unlinked) + req->rq_must_unlink = 0; + + if (ev->status) + goto out_wake; + + if (ev->type == LNET_EVENT_UNLINK) { + LASSERT(ev->unlinked); + DEBUG_REQ(D_RPCTRACE, req, "unlink"); + goto out_wake; + } - if (ev->type == PTL_EVENT_PUT_END && - ev->ni_fail_type == PTL_NI_OK) { + if (ev->mlength < ev->rlength ) { + CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req, + req->rq_replen, ev->rlength, ev->offset); + req->rq_reply_truncate = 1; req->rq_replied = 1; + req->rq_status = -EOVERFLOW; + req->rq_nob_received = ev->rlength + ev->offset; + goto out_wake; + } + + if ((ev->offset == 0) && + ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) { + /* Early reply */ + DEBUG_REQ(D_ADAPTTO, req, + "Early reply received: mlen=%u offset=%d replen=%d " + "replied=%d unlinked=%d", ev->mlength, ev->offset, + req->rq_replen, req->rq_replied, ev->unlinked); + + req->rq_early_count++; /* number received, client side */ + + if (req->rq_replied) /* already got the real reply */ + goto out_wake; + + req->rq_early = 1; + req->rq_reply_off = ev->offset; + req->rq_nob_received = ev->mlength; + /* And we're still receiving */ + req->rq_receiving_reply = 1; + } else { + /* Real reply */ + req->rq_rep_swab_mask = 0; + req->rq_replied = 1; + req->rq_reply_off = ev->offset; req->rq_nob_received = ev->mlength; + /* LNetMDUnlink can't be called under the LNET_LOCK, + so we must unlink in ptlrpc_unregister_reply */ + DEBUG_REQ(D_INFO, req, + "reply in flags=%x mlen=%u offset=%d replen=%d", + lustre_msg_get_flags(req->rq_reqmsg), + ev->mlength, ev->offset, req->rq_replen); } + req->rq_import->imp_last_reply_time = cfs_time_current_sec(); + +out_wake: /* NB don't unlock till after wakeup; req can disappear under us * since we don't have our own ref */ - ptlrpc_wake_client_req(req); - - spin_unlock_irqrestore (&req->rq_lock, flags); + ptlrpc_client_wake_req(req); + cfs_spin_unlock(&req->rq_lock); EXIT; } -/* +/* * Client's bulk has been written/read */ -void client_bulk_callback (ptl_event_t *ev) +void client_bulk_callback (lnet_event_t *ev) { - struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr; + struct ptlrpc_cb_id *cbid = ev->md.user_ptr; struct ptlrpc_bulk_desc *desc = cbid->cbid_arg; - unsigned long flags; ENTRY; - LASSERT ((desc->bd_type == BULK_PUT_SINK && - ev->type == PTL_EVENT_PUT_END) || + LASSERT ((desc->bd_type == BULK_PUT_SINK && + ev->type == LNET_EVENT_PUT) || (desc->bd_type == BULK_GET_SOURCE && - ev->type == PTL_EVENT_GET_END) || - ev->type == PTL_EVENT_UNLINK); + ev->type == LNET_EVENT_GET) || + ev->type == LNET_EVENT_UNLINK); LASSERT (ev->unlinked); - CDEBUG((ev->ni_fail_type == PTL_NI_OK) ? D_NET : D_ERROR, - "event type %d, status %d, desc %p\n", - ev->type, ev->ni_fail_type, desc); + CDEBUG((ev->status == 0) ? D_NET : D_ERROR, + "event type %d, status %d, desc %p\n", + ev->type, ev->status, desc); - spin_lock_irqsave (&desc->bd_lock, flags); + cfs_spin_lock(&desc->bd_lock); LASSERT(desc->bd_network_rw); desc->bd_network_rw = 0; - if (ev->type != PTL_EVENT_UNLINK && - ev->ni_fail_type == PTL_NI_OK) { + if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) { desc->bd_success = 1; desc->bd_nob_transferred = ev->mlength; + desc->bd_sender = ev->sender; } + /* release the encrypted pages for write */ + if (desc->bd_req->rq_bulk_write) + sptlrpc_enc_pool_put_pages(desc); + /* NB don't unlock till after wakeup; desc can disappear under us * otherwise */ - ptlrpc_wake_client_req(desc->bd_req); + ptlrpc_client_wake_req(desc->bd_req); - spin_unlock_irqrestore (&desc->bd_lock, flags); + cfs_spin_unlock(&desc->bd_lock); EXIT; } -/* +/* * Server's incoming request callback */ -void request_in_callback(ptl_event_t *ev) +void request_in_callback(lnet_event_t *ev) { - struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr; + struct ptlrpc_cb_id *cbid = ev->md.user_ptr; struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg; - struct ptlrpc_srv_ni *srv_ni = rqbd->rqbd_srv_ni; - struct ptlrpc_service *service = srv_ni->sni_service; + struct ptlrpc_service *service = rqbd->rqbd_service; struct ptlrpc_request *req; - long flags; ENTRY; - LASSERT (ev->type == PTL_EVENT_PUT_END || - ev->type == PTL_EVENT_UNLINK); - LASSERT ((char *)ev->mem_desc.start >= rqbd->rqbd_buffer); - LASSERT ((char *)ev->mem_desc.start + ev->offset + ev->mlength <= + LASSERT (ev->type == LNET_EVENT_PUT || + ev->type == LNET_EVENT_UNLINK); + LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer); + LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <= rqbd->rqbd_buffer + service->srv_buf_size); - CDEBUG((ev->ni_fail_type == PTL_OK) ? D_NET : D_ERROR, - "event type %d, status %d, service %s\n", - ev->type, ev->ni_fail_type, service->srv_name); + CDEBUG((ev->status == 0) ? D_NET : D_ERROR, + "event type %d, status %d, service %s\n", + ev->type, ev->status, service->srv_name); if (ev->unlinked) { /* If this is the last request message to fit in the @@ -180,16 +246,17 @@ void request_in_callback(ptl_event_t *ev) req = &rqbd->rqbd_req; memset(req, 0, sizeof (*req)); } else { - LASSERT (ev->type == PTL_EVENT_PUT_END); - if (ev->ni_fail_type != PTL_NI_OK) { + LASSERT (ev->type == LNET_EVENT_PUT); + if (ev->status != 0) { /* We moaned above already... */ return; } - OBD_ALLOC_GFP(req, sizeof(*req), GFP_ATOMIC); + OBD_ALLOC_GFP(req, sizeof(*req), CFS_ALLOC_ATOMIC_TRY); if (req == NULL) { CERROR("Can't allocate incoming request descriptor: " - "Dropping %s RPC from "LPX64"\n", - service->srv_name, ev->initiator.nid); + "Dropping %s RPC from %s\n", + service->srv_name, + libcfs_id2str(ev->initiator)); return; } } @@ -198,67 +265,81 @@ void request_in_callback(ptl_event_t *ev) * flags are reset and scalars are zero. We only set the message * size to non-zero if this was a successful receive. */ req->rq_xid = ev->match_bits; - req->rq_reqmsg = ev->mem_desc.start + ev->offset; - if (ev->type == PTL_EVENT_PUT_END && - ev->ni_fail_type == PTL_NI_OK) - req->rq_reqlen = ev->mlength; - do_gettimeofday(&req->rq_arrival_time); - req->rq_peer.peer_nid = ev->initiator.nid; - req->rq_peer.peer_ni = rqbd->rqbd_srv_ni->sni_ni; + req->rq_reqbuf = ev->md.start + ev->offset; + if (ev->type == LNET_EVENT_PUT && ev->status == 0) + req->rq_reqdata_len = ev->mlength; + cfs_gettimeofday(&req->rq_arrival_time); + req->rq_peer = ev->initiator; + req->rq_self = ev->target.nid; req->rq_rqbd = rqbd; + req->rq_phase = RQ_PHASE_NEW; +#ifdef CRAY_XT3 + req->rq_uid = ev->uid; +#endif + cfs_spin_lock_init(&req->rq_lock); + CFS_INIT_LIST_HEAD(&req->rq_timed_list); + cfs_atomic_set(&req->rq_refcount, 1); + if (ev->type == LNET_EVENT_PUT) + CDEBUG(D_RPCTRACE, "incoming req@%p x"LPU64" msgsize %u\n", + req, req->rq_xid, ev->mlength); + + CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer)); + + cfs_spin_lock(&service->srv_lock); - spin_lock_irqsave (&service->srv_lock, flags); + req->rq_history_seq = service->srv_request_seq++; + cfs_list_add_tail(&req->rq_history_list, &service->srv_request_history); if (ev->unlinked) { - srv_ni->sni_nrqbd_receiving--; - if (ev->type != PTL_EVENT_UNLINK && - srv_ni->sni_nrqbd_receiving == 0) { - /* This service is off-air on this interface because - * all its request buffers are busy. Portals will - * start dropping incoming requests until more buffers - * get posted. NB don't moan if it's because we're - * tearing down the service. */ - CWARN("All %s %s request buffers busy\n", - service->srv_name, srv_ni->sni_ni->pni_name); - } + service->srv_nrqbd_receiving--; + CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n", + service->srv_nrqbd_receiving); + + /* Normally, don't complain about 0 buffers posted; LNET won't + * drop incoming reqs since we set the portal lazy */ + if (test_req_buffer_pressure && + ev->type != LNET_EVENT_UNLINK && + service->srv_nrqbd_receiving == 0) + CWARN("All %s request buffers busy\n", + service->srv_name); + /* req takes over the network's ref on rqbd */ } else { /* req takes a ref on rqbd */ rqbd->rqbd_refcount++; } - list_add_tail(&req->rq_list, &service->srv_request_queue); + cfs_list_add_tail(&req->rq_list, &service->srv_req_in_queue); service->srv_n_queued_reqs++; /* NB everything can disappear under us once the request * has been queued and we unlock, so do the wake now... */ - wake_up(&service->srv_waitq); + cfs_waitq_signal(&service->srv_waitq); - spin_unlock_irqrestore(&service->srv_lock, flags); + cfs_spin_unlock(&service->srv_lock); EXIT; } -/* +/* * Server's outgoing reply callback */ -void reply_out_callback(ptl_event_t *ev) +void reply_out_callback(lnet_event_t *ev) { - struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr; + struct ptlrpc_cb_id *cbid = ev->md.user_ptr; struct ptlrpc_reply_state *rs = cbid->cbid_arg; - struct ptlrpc_srv_ni *sni = rs->rs_srv_ni; - struct ptlrpc_service *svc = sni->sni_service; - unsigned long flags; + struct ptlrpc_service *svc = rs->rs_service; ENTRY; - LASSERT (ev->type == PTL_EVENT_SEND_END || - ev->type == PTL_EVENT_ACK || - ev->type == PTL_EVENT_UNLINK); + LASSERT (ev->type == LNET_EVENT_SEND || + ev->type == LNET_EVENT_ACK || + ev->type == LNET_EVENT_UNLINK); if (!rs->rs_difficult) { - /* I'm totally responsible for freeing "easy" replies */ + /* 'Easy' replies have no further processing so I drop the + * net's ref on 'rs' */ LASSERT (ev->unlinked); - lustre_free_reply_state (rs); - atomic_dec (&svc->srv_outstanding_replies); + ptlrpc_rs_decref(rs); + cfs_atomic_dec (&svc->srv_outstanding_replies); EXIT; return; } @@ -266,11 +347,16 @@ void reply_out_callback(ptl_event_t *ev) LASSERT (rs->rs_on_net); if (ev->unlinked) { - /* Last network callback */ - spin_lock_irqsave (&svc->srv_lock, flags); + /* Last network callback. The net's ref on 'rs' stays put + * until ptlrpc_handle_rs() is done with it */ + cfs_spin_lock(&svc->srv_lock); + cfs_spin_lock(&rs->rs_lock); rs->rs_on_net = 0; - ptlrpc_schedule_difficult_reply (rs); - spin_unlock_irqrestore (&svc->srv_lock, flags); + if (!rs->rs_no_ack || + rs->rs_transno <= rs->rs_export->exp_obd->obd_last_committed) + ptlrpc_schedule_difficult_reply (rs); + cfs_spin_unlock(&rs->rs_lock); + cfs_spin_unlock(&svc->srv_lock); } EXIT; @@ -279,123 +365,145 @@ void reply_out_callback(ptl_event_t *ev) /* * Server's bulk completion callback */ -void server_bulk_callback (ptl_event_t *ev) +void server_bulk_callback (lnet_event_t *ev) { - struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr; + struct ptlrpc_cb_id *cbid = ev->md.user_ptr; struct ptlrpc_bulk_desc *desc = cbid->cbid_arg; - unsigned long flags; ENTRY; - LASSERT (ev->type == PTL_EVENT_SEND_END || - ev->type == PTL_EVENT_UNLINK || + LASSERT (ev->type == LNET_EVENT_SEND || + ev->type == LNET_EVENT_UNLINK || (desc->bd_type == BULK_PUT_SOURCE && - ev->type == PTL_EVENT_ACK) || + ev->type == LNET_EVENT_ACK) || (desc->bd_type == BULK_GET_SINK && - ev->type == PTL_EVENT_REPLY_END)); + ev->type == LNET_EVENT_REPLY)); - CDEBUG((ev->ni_fail_type == PTL_NI_OK) ? D_NET : D_ERROR, - "event type %d, status %d, desc %p\n", - ev->type, ev->ni_fail_type, desc); + CDEBUG((ev->status == 0) ? D_NET : D_ERROR, + "event type %d, status %d, desc %p\n", + ev->type, ev->status, desc); - spin_lock_irqsave (&desc->bd_lock, flags); - - if ((ev->type == PTL_EVENT_ACK || - ev->type == PTL_EVENT_REPLY_END) && - ev->ni_fail_type == PTL_NI_OK) { + cfs_spin_lock(&desc->bd_lock); + + if ((ev->type == LNET_EVENT_ACK || + ev->type == LNET_EVENT_REPLY) && + ev->status == 0) { /* We heard back from the peer, so even if we get this * before the SENT event (oh yes we can), we know we * read/wrote the peer buffer and how much... */ desc->bd_success = 1; desc->bd_nob_transferred = ev->mlength; + desc->bd_sender = ev->sender; } if (ev->unlinked) { /* This is the last callback no matter what... */ desc->bd_network_rw = 0; - wake_up(&desc->bd_waitq); + cfs_waitq_signal(&desc->bd_waitq); } - spin_unlock_irqrestore (&desc->bd_lock, flags); + cfs_spin_unlock(&desc->bd_lock); EXIT; } -static int ptlrpc_master_callback(ptl_event_t *ev) +static void ptlrpc_master_callback(lnet_event_t *ev) { - struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr; - void (*callback)(ptl_event_t *ev) = cbid->cbid_fn; + struct ptlrpc_cb_id *cbid = ev->md.user_ptr; + void (*callback)(lnet_event_t *ev) = cbid->cbid_fn; /* Honestly, it's best to find out early. */ - LASSERT (cbid->cbid_arg != (void *)0x5a5a5a5a5a5a5a5a); + LASSERT (cbid->cbid_arg != LP_POISON); LASSERT (callback == request_out_callback || callback == reply_in_callback || callback == client_bulk_callback || callback == request_in_callback || callback == reply_out_callback || callback == server_bulk_callback); - + callback (ev); - return (0); } -int ptlrpc_uuid_to_peer (struct obd_uuid *uuid, struct ptlrpc_peer *peer) +int ptlrpc_uuid_to_peer (struct obd_uuid *uuid, + lnet_process_id_t *peer, lnet_nid_t *self) { - struct ptlrpc_ni *pni; - ptl_nid_t peer_nid; - ptl_handle_ni_t peer_ni; - int i; - char str[20]; - int rc = lustre_uuid_to_peer(uuid->uuid, - &peer_ni, &peer_nid); - if (rc != 0) - RETURN (rc); - - for (i = 0; i < ptlrpc_ninterfaces; i++) { - pni = &ptlrpc_interfaces[i]; - - if (!memcmp(&peer_ni, &pni->pni_ni_h, - sizeof (peer_ni))) { - peer->peer_nid = peer_nid; - peer->peer_ni = pni; - return (0); + int best_dist = 0; + __u32 best_order = 0; + int count = 0; + int rc = -ENOENT; + int portals_compatibility; + int dist; + __u32 order; + lnet_nid_t dst_nid; + lnet_nid_t src_nid; + + portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL); + + peer->pid = LUSTRE_SRV_LNET_PID; + + /* Choose the matching UUID that's closest */ + while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) { + dist = LNetDist(dst_nid, &src_nid, &order); + if (dist < 0) + continue; + + if (dist == 0) { /* local! use loopback LND */ + peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0); + rc = 0; + break; + } + + if (rc < 0 || + dist < best_dist || + (dist == best_dist && order < best_order)) { + best_dist = dist; + best_order = order; + + if (portals_compatibility > 1) { + /* Strong portals compatibility: Zero the nid's + * NET, so if I'm reading new config logs, or + * getting configured by (new) lconf I can + * still talk to old servers. */ + dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid)); + src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid)); + } + peer->nid = dst_nid; + *self = src_nid; + rc = 0; } } - PtlSnprintHandle(str, sizeof(str), peer_ni); - CERROR("Can't find ptlrpc interface for "LPX64" ni %s\n", - peer_nid, str); - return (-ENOENT); + CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer)); + return rc; } -void ptlrpc_ni_fini(struct ptlrpc_ni *pni) +void ptlrpc_ni_fini(void) { - wait_queue_head_t waitq; + cfs_waitq_t waitq; struct l_wait_info lwi; int rc; int retries; - + /* Wait for the event queue to become idle since there may still be * messages in flight with pending events (i.e. the fire-and-forget * messages == client requests and "non-difficult" server * replies */ for (retries = 0;; retries++) { - rc = PtlEQFree(pni->pni_eq_h); + rc = LNetEQFree(ptlrpc_eq_h); switch (rc) { default: LBUG(); - case PTL_OK: - kportal_put_ni (pni->pni_number); + case 0: + LNetNIFini(); return; - - case PTL_EQ_IN_USE: + + case -EBUSY: if (retries != 0) - CWARN("Event queue for %s still busy\n", - pni->pni_name); - + CWARN("Event queue still busy\n"); + /* Wait for a bit */ - init_waitqueue_head(&waitq); - lwi = LWI_TIMEOUT(2*HZ, NULL, NULL); + cfs_waitq_init(&waitq); + lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL); l_wait_event(waitq, 0, &lwi); break; } @@ -403,92 +511,136 @@ void ptlrpc_ni_fini(struct ptlrpc_ni *pni) /* notreached */ } -int ptlrpc_ni_init(int number, char *name, struct ptlrpc_ni *pni) +lnet_pid_t ptl_get_pid(void) { - int rc; - char str[20]; - ptl_handle_ni_t *nip = kportal_get_ni (number); + lnet_pid_t pid; - if (nip == NULL) { - CDEBUG (D_NET, "Network interface %s not loaded\n", name); - return (-ENOENT); - } - - PtlSnprintHandle(str, sizeof(str), *nip); - CDEBUG (D_NET, "init %d %s: %s\n", number, name, str); +#ifndef __KERNEL__ + pid = getpid(); +#else + pid = LUSTRE_SRV_LNET_PID; +#endif + return pid; +} - pni->pni_name = name; - pni->pni_number = number; - pni->pni_ni_h = *nip; +int ptlrpc_ni_init(void) +{ + int rc; + lnet_pid_t pid; - pni->pni_eq_h = PTL_INVALID_HANDLE; + pid = ptl_get_pid(); + CDEBUG(D_NET, "My pid is: %x\n", pid); - rc = PtlEQAlloc(pni->pni_ni_h, PTLRPC_NUM_EQ, PTLRPC_EQ_CALLBACK, - &pni->pni_eq_h); + /* We're not passing any limits yet... */ + rc = LNetNIInit(pid); + if (rc < 0) { + CDEBUG (D_NET, "Can't init network interface: %d\n", rc); + return (-ENOENT); + } - if (rc != PTL_OK) - GOTO (fail, rc = -ENOMEM); + /* CAVEAT EMPTOR: how we process portals events is _radically_ + * different depending on... */ +#ifdef __KERNEL__ + /* kernel portals calls our master callback when events are added to + * the event queue. In fact lustre never pulls events off this queue, + * so it's only sized for some debug history. */ + rc = LNetEQAlloc(1024, ptlrpc_master_callback, &ptlrpc_eq_h); +#else + /* liblustre calls the master callback when it removes events from the + * event queue. The event queue has to be big enough not to drop + * anything */ + rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &ptlrpc_eq_h); +#endif + if (rc == 0) + return 0; - return (0); - fail: - CERROR ("Failed to initialise network interface %s: %d\n", - name, rc); + CERROR ("Failed to allocate event queue: %d\n", rc); + LNetNIFini(); - /* OK to do complete teardown since we invalidated the handles above */ - ptlrpc_ni_fini (pni); - return (rc); + return (-ENOMEM); } #ifndef __KERNEL__ -LIST_HEAD(liblustre_wait_callbacks); +CFS_LIST_HEAD(liblustre_wait_callbacks); +CFS_LIST_HEAD(liblustre_idle_callbacks); void *liblustre_services_callback; void * -liblustre_register_wait_callback (int (*fn)(void *arg), void *arg) +liblustre_register_waitidle_callback (cfs_list_t *callback_list, + const char *name, + int (*fn)(void *arg), void *arg) { struct liblustre_wait_callback *llwc; - + OBD_ALLOC(llwc, sizeof(*llwc)); LASSERT (llwc != NULL); - + + llwc->llwc_name = name; llwc->llwc_fn = fn; llwc->llwc_arg = arg; - list_add_tail(&llwc->llwc_list, &liblustre_wait_callbacks); - + cfs_list_add_tail(&llwc->llwc_list, callback_list); + return (llwc); } void -liblustre_deregister_wait_callback (void *opaque) +liblustre_deregister_waitidle_callback (void *opaque) { struct liblustre_wait_callback *llwc = opaque; - - list_del(&llwc->llwc_list); + + cfs_list_del(&llwc->llwc_list); OBD_FREE(llwc, sizeof(*llwc)); } +void * +liblustre_register_wait_callback (const char *name, + int (*fn)(void *arg), void *arg) +{ + return liblustre_register_waitidle_callback(&liblustre_wait_callbacks, + name, fn, arg); +} + +void +liblustre_deregister_wait_callback (void *opaque) +{ + liblustre_deregister_waitidle_callback(opaque); +} + +void * +liblustre_register_idle_callback (const char *name, + int (*fn)(void *arg), void *arg) +{ + return liblustre_register_waitidle_callback(&liblustre_idle_callbacks, + name, fn, arg); +} + +void +liblustre_deregister_idle_callback (void *opaque) +{ + liblustre_deregister_waitidle_callback(opaque); +} + int liblustre_check_events (int timeout) { - ptl_event_t ev; + lnet_event_t ev; int rc; int i; ENTRY; - rc = PtlEQPoll(&ptlrpc_interfaces[0].pni_eq_h, 1, timeout * 1000, - &ev, &i); - if (rc == PTL_EQ_EMPTY) + rc = LNetEQPoll(&ptlrpc_eq_h, 1, timeout * 1000, &ev, &i); + if (rc == 0) RETURN(0); - - LASSERT (rc == PTL_EQ_DROPPED || rc == PTL_OK); - - /* liblustre: no asynch callback so we can't affort to miss any + + LASSERT (rc == -EOVERFLOW || rc == 1); + + /* liblustre: no asynch callback so we can't afford to miss any * events... */ - if (rc == PTL_EQ_DROPPED) { + if (rc == -EOVERFLOW) { CERROR ("Dropped an event!!!\n"); abort(); } - + ptlrpc_master_callback (&ev); RETURN(1); } @@ -498,7 +650,7 @@ int liblustre_waiting = 0; int liblustre_wait_event (int timeout) { - struct list_head *tmp; + cfs_list_t *tmp; struct liblustre_wait_callback *llwc; int found_something = 0; @@ -511,10 +663,11 @@ liblustre_wait_event (int timeout) found_something = 1; /* Give all registered callbacks a bite at the cherry */ - list_for_each(tmp, &liblustre_wait_callbacks) { - llwc = list_entry(tmp, struct liblustre_wait_callback, - llwc_list); - + cfs_list_for_each(tmp, &liblustre_wait_callbacks) { + llwc = cfs_list_entry(tmp, + struct liblustre_wait_callback, + llwc_list); + if (llwc->llwc_fn(llwc->llwc_arg)) found_something = 1; } @@ -533,64 +686,66 @@ liblustre_wait_event (int timeout) return found_something; } -static int cray_portals_callback(ptl_event_t *ev) +void +liblustre_wait_idle(void) { - /* We get a callback from the client Cray portals implementation - * whenever anyone calls PtlEQPoll(), and an event queue with a - * callback handler has outstanding events. - * - * If it's not liblustre calling PtlEQPoll(), this lets us know we - * have outstanding events which we handle with - * liblustre_wait_event(). - * - * Otherwise, we're already eagerly consuming events and we'd - * handle events out of order if we recursed. */ - if (liblustre_waiting) - return; - - liblustre_wait_event(0); + static int recursed = 0; + + cfs_list_t *tmp; + struct liblustre_wait_callback *llwc; + int idle = 0; + + LASSERT(!recursed); + recursed = 1; + + do { + liblustre_wait_event(0); + + idle = 1; + + cfs_list_for_each(tmp, &liblustre_idle_callbacks) { + llwc = cfs_list_entry(tmp, + struct liblustre_wait_callback, + llwc_list); + + if (!llwc->llwc_fn(llwc->llwc_arg)) { + idle = 0; + break; + } + } + + } while (!idle); + + recursed = 0; } + #endif /* __KERNEL__ */ int ptlrpc_init_portals(void) { - /* Add new portals network interfaces here. - * Order is irrelevent! */ - static struct { - int number; - char *name; - } ptl_nis[] = { - {QSWNAL, "qswnal"}, - {SOCKNAL, "socknal"}, - {GMNAL, "gmnal"}, - {IBNAL, "ibnal"}, - {TCPNAL, "tcpnal"}, - {SCIMACNAL, "scimacnal"}}; - int rc; - int i; - - LASSERT(ptlrpc_ninterfaces == 0); - - for (i = 0; i < sizeof (ptl_nis) / sizeof (ptl_nis[0]); i++) { - LASSERT(ptlrpc_ninterfaces < (sizeof(ptlrpc_interfaces) / - sizeof(ptlrpc_interfaces[0]))); - - rc = ptlrpc_ni_init(ptl_nis[i].number, ptl_nis[i].name, - &ptlrpc_interfaces[ptlrpc_ninterfaces]); - if (rc == 0) - ptlrpc_ninterfaces++; - } + int rc = ptlrpc_ni_init(); - if (ptlrpc_ninterfaces == 0) { - CERROR("network initialisation failed: is a NAL module " - "loaded?\n"); + if (rc != 0) { + CERROR("network initialisation failed\n"); return -EIO; } #ifndef __KERNEL__ - liblustre_services_callback = - liblustre_register_wait_callback(&liblustre_check_services, NULL); + liblustre_services_callback = + liblustre_register_wait_callback("liblustre_check_services", + &liblustre_check_services, + NULL); + cfs_init_completion_module(liblustre_wait_event); +#endif + rc = ptlrpcd_addref(); + if (rc == 0) + return 0; + + CERROR("rpcd initialisation failed\n"); +#ifndef __KERNEL__ + liblustre_deregister_wait_callback(liblustre_services_callback); #endif - return 0; + ptlrpc_ni_fini(); + return rc; } void ptlrpc_exit_portals(void) @@ -598,6 +753,6 @@ void ptlrpc_exit_portals(void) #ifndef __KERNEL__ liblustre_deregister_wait_callback(liblustre_services_callback); #endif - while (ptlrpc_ninterfaces > 0) - ptlrpc_ni_fini (&ptlrpc_interfaces[--ptlrpc_ninterfaces]); + ptlrpcd_decref(); + ptlrpc_ni_fini(); }