-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Copyright (C) 2002 Cluster File Systems, Inc.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * This file is part of Lustre, http://www.lustre.org.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
*
+ * Copyright (c) 2012, 2013, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_RPC
-#include <linux/module.h>
-#include <linux/obd_support.h>
-#include <linux/lustre_net.h>
+#ifndef __KERNEL__
+# include <liblustre.h>
+#else
+# include <libcfs/libcfs.h>
+# ifdef __mips64__
+# include <linux/kernel.h>
+# endif
+#endif
-ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq, bulk_source_eq,
- bulk_sink_eq;
-static const ptl_handle_ni_t *socknal_nip = NULL, *toenal_nip = NULL,
- *qswnal_nip = NULL, *gmnal_nip = NULL;
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lustre_sec.h>
+#include "ptlrpc_internal.h"
+
+lnet_handle_eq_t ptlrpc_eq_h;
/*
- * Free the packet when it has gone out
+ * Client's outgoing request callback
*/
-static int request_out_callback(ptl_event_t *ev)
+void request_out_callback(lnet_event_t *ev)
{
- struct ptlrpc_request *req = ev->mem_desc.user_ptr;
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_request *req = cbid->cbid_arg;
ENTRY;
- LASSERT ((ev->mem_desc.options & PTL_MD_IOV) == 0); /* requests always contiguous */
+ LASSERT (ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT (ev->unlinked);
+
+ DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
- if (ev->type != PTL_EVENT_SENT) {
- // XXX make sure we understand all events, including ACK's
- CERROR("Unknown event %d\n", ev->type);
- LBUG();
+ sptlrpc_request_out_callback(req);
+ spin_lock(&req->rq_lock);
+ req->rq_real_sent = cfs_time_current_sec();
+ if (ev->unlinked)
+ req->rq_req_unlink = 0;
+
+ if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
+
+ /* Failed send: make it seem like the reply timed out, just
+ * like failing sends in client.c does currently... */
+
+ req->rq_net_err = 1;
+ ptlrpc_client_wake_req(req);
}
+ spin_unlock(&req->rq_lock);
- /* this balances the atomic_inc in ptl_send_rpc */
ptlrpc_req_finished(req);
- RETURN(1);
-}
+ EXIT;
+}
/*
- * Free the packet when it has gone out
+ * Client's incoming reply callback
*/
-static int reply_out_callback(ptl_event_t *ev)
+void reply_in_callback(lnet_event_t *ev)
{
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_request *req = cbid->cbid_arg;
ENTRY;
- LASSERT ((ev->mem_desc.options & PTL_MD_IOV) == 0); /* replies always contiguous */
+ DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
+
+ LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
+ LASSERT (ev->md.start == req->rq_repbuf);
+ LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
+ /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
+ for adaptive timeouts' early reply. */
+ LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
- if (ev->type == PTL_EVENT_SENT) {
- OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
+ spin_lock(&req->rq_lock);
+
+ req->rq_receiving_reply = 0;
+ req->rq_early = 0;
+ if (ev->unlinked)
+ req->rq_reply_unlink = 0;
+
+ if (ev->status)
+ goto out_wake;
+
+ if (ev->type == LNET_EVENT_UNLINK) {
+ LASSERT(ev->unlinked);
+ DEBUG_REQ(D_NET, req, "unlink");
+ goto out_wake;
+ }
+
+ if (ev->mlength < ev->rlength ) {
+ CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
+ req->rq_replen, ev->rlength, ev->offset);
+ req->rq_reply_truncate = 1;
+ req->rq_replied = 1;
+ req->rq_status = -EOVERFLOW;
+ req->rq_nob_received = ev->rlength + ev->offset;
+ goto out_wake;
+ }
+
+ if ((ev->offset == 0) &&
+ ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
+ /* Early reply */
+ DEBUG_REQ(D_ADAPTTO, req,
+ "Early reply received: mlen=%u offset=%d replen=%d "
+ "replied=%d unlinked=%d", ev->mlength, ev->offset,
+ req->rq_replen, req->rq_replied, ev->unlinked);
+
+ req->rq_early_count++; /* number received, client side */
+
+ if (req->rq_replied) /* already got the real reply */
+ goto out_wake;
+
+ req->rq_early = 1;
+ req->rq_reply_off = ev->offset;
+ req->rq_nob_received = ev->mlength;
+ /* And we're still receiving */
+ req->rq_receiving_reply = 1;
} else {
- // XXX make sure we understand all events, including ACK's
- CERROR("Unknown event %d\n", ev->type);
- LBUG();
+ /* Real reply */
+ req->rq_rep_swab_mask = 0;
+ req->rq_replied = 1;
+ /* Got reply, no resend required */
+ req->rq_resend = 0;
+ req->rq_reply_off = ev->offset;
+ req->rq_nob_received = ev->mlength;
+ /* LNetMDUnlink can't be called under the LNET_LOCK,
+ so we must unlink in ptlrpc_unregister_reply */
+ DEBUG_REQ(D_INFO, req,
+ "reply in flags=%x mlen=%u offset=%d replen=%d",
+ lustre_msg_get_flags(req->rq_reqmsg),
+ ev->mlength, ev->offset, req->rq_replen);
}
- RETURN(1);
+ req->rq_import->imp_last_reply_time = cfs_time_current_sec();
+
+out_wake:
+ /* NB don't unlock till after wakeup; req can disappear under us
+ * since we don't have our own ref */
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
+ EXIT;
}
/*
- * Wake up the thread waiting for the reply once it comes in.
+ * Client's bulk has been written/read
*/
-static int reply_in_callback(ptl_event_t *ev)
+void client_bulk_callback (lnet_event_t *ev)
{
- struct ptlrpc_request *req = ev->mem_desc.user_ptr;
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
+ struct ptlrpc_request *req;
ENTRY;
- LASSERT ((ev->mem_desc.options & PTL_MD_IOV) == 0); /* replies always contiguous */
+ LASSERT ((desc->bd_type == BULK_PUT_SINK &&
+ ev->type == LNET_EVENT_PUT) ||
+ (desc->bd_type == BULK_GET_SOURCE &&
+ ev->type == LNET_EVENT_GET) ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT (ev->unlinked);
+
+ if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
+ ev->status = -EIO;
+
+ if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
+ ev->status = -EIO;
+
+ CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
+ "event type %d, status %d, desc %p\n",
+ ev->type, ev->status, desc);
+
+ spin_lock(&desc->bd_lock);
+ req = desc->bd_req;
+ LASSERT(desc->bd_md_count > 0);
+ desc->bd_md_count--;
+
+ if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
+ desc->bd_nob_transferred += ev->mlength;
+ desc->bd_sender = ev->sender;
+ } else {
+ /* start reconnect and resend if network error hit */
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
+ }
+
+ if (ev->status != 0)
+ desc->bd_failure = 1;
+
+ /* NB don't unlock till after wakeup; desc can disappear under us
+ * otherwise */
+ if (desc->bd_md_count == 0)
+ ptlrpc_client_wake_req(desc->bd_req);
+
+ spin_unlock(&desc->bd_lock);
+ EXIT;
+}
- if (req->rq_xid == 0x5a5a5a5a5a5a5a5a) {
- CERROR("Reply received for freed request! Probably a missing "
- "ptlrpc_abort()\n");
- LBUG();
- }
+/*
+ * We will have percpt request history list for ptlrpc service in upcoming
+ * patches because we don't want to be serialized by current per-service
+ * history operations. So we require history ID can (somehow) show arriving
+ * order w/o grabbing global lock, and user can sort them in userspace.
+ *
+ * This is how we generate history ID for ptlrpc_request:
+ * ----------------------------------------------------
+ * | 32 bits | 16 bits | (16 - X)bits | X bits |
+ * ----------------------------------------------------
+ * | seconds | usec / 16 | sequence | CPT id |
+ * ----------------------------------------------------
+ *
+ * it might not be precise but should be good enough.
+ */
- if (req->rq_xid != ev->match_bits) {
- CERROR("Reply packet for wrong request\n");
- LBUG();
- }
+#define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits)
- if (ev->type == PTL_EVENT_PUT) {
- req->rq_repmsg = ev->mem_desc.start + ev->offset;
- barrier();
- wake_up(&req->rq_wait_for_rep);
- } else {
- // XXX make sure we understand all events, including ACK's
- CERROR("Unknown event %d\n", ev->type);
- LBUG();
- }
+#define REQS_SEC_SHIFT 32
+#define REQS_USEC_SHIFT 16
+#define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt)
- RETURN(1);
+static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_request *req)
+{
+ __u64 sec = req->rq_arrival_time.tv_sec;
+ __u32 usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
+ __u64 new_seq;
+
+ /* set sequence ID for request and add it to history list,
+ * it must be called with hold svcpt::scp_lock */
+
+ new_seq = (sec << REQS_SEC_SHIFT) |
+ (usec << REQS_USEC_SHIFT) |
+ (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
+
+ if (new_seq > svcpt->scp_hist_seq) {
+ /* This handles the initial case of scp_hist_seq == 0 or
+ * we just jumped into a new time window */
+ svcpt->scp_hist_seq = new_seq;
+ } else {
+ LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
+ /* NB: increase sequence number in current usec bucket,
+ * however, it's possible that we used up all bits for
+ * sequence and jumped into the next usec bucket (future time),
+ * then we hope there will be less RPCs per bucket at some
+ * point, and sequence will catch up again */
+ svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
+ new_seq = svcpt->scp_hist_seq;
+ }
+
+ req->rq_history_seq = new_seq;
+
+ list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
}
-int request_in_callback(ptl_event_t *ev)
+/*
+ * Server's incoming request callback
+ */
+void request_in_callback(lnet_event_t *ev)
{
- struct ptlrpc_request_buffer_desc *rqbd = ev->mem_desc.user_ptr;
- struct ptlrpc_service *service = rqbd->rqbd_service;
-
- LASSERT ((ev->mem_desc.options & PTL_MD_IOV) == 0); /* requests always contiguous */
- LASSERT (ev->type == PTL_EVENT_PUT); /* we only enable puts */
- LASSERT (atomic_read (&service->srv_nrqbds_receiving) > 0);
- LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
-
- if (ev->rlength != ev->mlength)
- CERROR("Warning: Possibly truncated rpc (%d/%d)\n",
- ev->mlength, ev->rlength);
-
- if (ptl_is_valid_handle (&ev->unlinked_me))
- {
- /* This is the last request to be received into this
- * request buffer. We don't bump the refcount, since the
- * thread servicing this event is effectively taking over
- * portals' reference.
- */
-#warning ev->unlinked_me.nal_idx is not set properly in a callback
- LASSERT (ev->unlinked_me.handle_idx == rqbd->rqbd_me_h.handle_idx);
-
- if (atomic_dec_and_test (&service->srv_nrqbds_receiving)) /* we're off-air */
- {
- CERROR ("All request buffers busy\n");
- /* we'll probably start dropping packets in portals soon */
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
+ struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
+ struct ptlrpc_service *service = svcpt->scp_service;
+ struct ptlrpc_request *req;
+ ENTRY;
+
+ LASSERT (ev->type == LNET_EVENT_PUT ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
+ LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
+ rqbd->rqbd_buffer + service->srv_buf_size);
+
+ CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
+ "event type %d, status %d, service %s\n",
+ ev->type, ev->status, service->srv_name);
+
+ if (ev->unlinked) {
+ /* If this is the last request message to fit in the
+ * request buffer we can use the request object embedded in
+ * rqbd. Note that if we failed to allocate a request,
+ * we'd have to re-post the rqbd, which we can't do in this
+ * context. */
+ req = &rqbd->rqbd_req;
+ memset(req, 0, sizeof (*req));
+ } else {
+ LASSERT (ev->type == LNET_EVENT_PUT);
+ if (ev->status != 0) {
+ /* We moaned above already... */
+ return;
+ }
+ req = ptlrpc_request_cache_alloc(ALLOC_ATOMIC_TRY);
+ if (req == NULL) {
+ CERROR("Can't allocate incoming request descriptor: "
+ "Dropping %s RPC from %s\n",
+ service->srv_name,
+ libcfs_id2str(ev->initiator));
+ return;
}
}
- else
- atomic_inc (&rqbd->rqbd_refcount); /* +1 ref for service thread */
- wake_up(&service->srv_waitq);
+ /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
+ * flags are reset and scalars are zero. We only set the message
+ * size to non-zero if this was a successful receive. */
+ req->rq_xid = ev->match_bits;
+ req->rq_reqbuf = ev->md.start + ev->offset;
+ if (ev->type == LNET_EVENT_PUT && ev->status == 0)
+ req->rq_reqdata_len = ev->mlength;
+ do_gettimeofday(&req->rq_arrival_time);
+ req->rq_peer = ev->initiator;
+ req->rq_self = ev->target.nid;
+ req->rq_rqbd = rqbd;
+ req->rq_phase = RQ_PHASE_NEW;
+ spin_lock_init(&req->rq_lock);
+ INIT_LIST_HEAD(&req->rq_timed_list);
+ INIT_LIST_HEAD(&req->rq_exp_list);
+ atomic_set(&req->rq_refcount, 1);
+ if (ev->type == LNET_EVENT_PUT)
+ CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n",
+ req, req->rq_xid, ev->mlength);
+
+ CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
+
+ spin_lock(&svcpt->scp_lock);
+
+ ptlrpc_req_add_history(svcpt, req);
+
+ if (ev->unlinked) {
+ svcpt->scp_nrqbds_posted--;
+ CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
+ svcpt->scp_nrqbds_posted);
+
+ /* Normally, don't complain about 0 buffers posted; LNET won't
+ * drop incoming reqs since we set the portal lazy */
+ if (test_req_buffer_pressure &&
+ ev->type != LNET_EVENT_UNLINK &&
+ svcpt->scp_nrqbds_posted == 0)
+ CWARN("All %s request buffers busy\n",
+ service->srv_name);
+
+ /* req takes over the network's ref on rqbd */
+ } else {
+ /* req takes a ref on rqbd */
+ rqbd->rqbd_refcount++;
+ }
+
+ list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
+ svcpt->scp_nreqs_incoming++;
+
+ /* NB everything can disappear under us once the request
+ * has been queued and we unlock, so do the wake now... */
+ wake_up(&svcpt->scp_waitq);
- return 0;
+ spin_unlock(&svcpt->scp_lock);
+ EXIT;
}
-static int bulk_source_callback(ptl_event_t *ev)
+/*
+ * Server's outgoing reply callback
+ */
+void reply_out_callback(lnet_event_t *ev)
{
- struct ptlrpc_bulk_desc *desc = ev->mem_desc.user_ptr;
- struct ptlrpc_bulk_page *bulk;
- struct list_head *tmp;
- struct list_head *next;
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_reply_state *rs = cbid->cbid_arg;
+ struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
ENTRY;
- CDEBUG(D_NET, "got %s event %d\n",
- (ev->type == PTL_EVENT_SENT) ? "SENT" :
- (ev->type == PTL_EVENT_ACK) ? "ACK" : "UNEXPECTED", ev->type);
+ LASSERT (ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_ACK ||
+ ev->type == LNET_EVENT_UNLINK);
+
+ if (!rs->rs_difficult) {
+ /* 'Easy' replies have no further processing so I drop the
+ * net's ref on 'rs' */
+ LASSERT (ev->unlinked);
+ ptlrpc_rs_decref(rs);
+ EXIT;
+ return;
+ }
+
+ LASSERT (rs->rs_on_net);
- LASSERT (ev->type == PTL_EVENT_SENT || ev->type == PTL_EVENT_ACK);
+ if (ev->unlinked) {
+ /* Last network callback. The net's ref on 'rs' stays put
+ * until ptlrpc_handle_rs() is done with it */
+ spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&rs->rs_lock);
- LASSERT (atomic_read (&desc->bd_source_callback_count) > 0 &&
- atomic_read (&desc->bd_source_callback_count) <= 2);
+ rs->rs_on_net = 0;
+ if (!rs->rs_no_ack ||
+ rs->rs_transno <=
+ rs->rs_export->exp_obd->obd_last_committed)
+ ptlrpc_schedule_difficult_reply(rs);
- /* 1 fragment for each page always */
- LASSERT (ev->mem_desc.niov == desc->bd_page_count);
+ spin_unlock(&rs->rs_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
+ }
+ EXIT;
+}
- if (atomic_dec_and_test (&desc->bd_source_callback_count)) {
- list_for_each_safe(tmp, next, &desc->bd_page_list) {
- bulk = list_entry(tmp, struct ptlrpc_bulk_page,
- bp_link);
+#ifdef HAVE_SERVER_SUPPORT
+/*
+ * Server's bulk completion callback
+ */
+void server_bulk_callback (lnet_event_t *ev)
+{
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
+ ENTRY;
+
+ LASSERT(ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_UNLINK ||
+ (desc->bd_type == BULK_PUT_SOURCE &&
+ ev->type == LNET_EVENT_ACK) ||
+ (desc->bd_type == BULK_GET_SINK &&
+ ev->type == LNET_EVENT_REPLY));
+
+ CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
+ "event type %d, status %d, desc %p\n",
+ ev->type, ev->status, desc);
+
+ spin_lock(&desc->bd_lock);
+
+ LASSERT(desc->bd_md_count > 0);
+
+ if ((ev->type == LNET_EVENT_ACK ||
+ ev->type == LNET_EVENT_REPLY) &&
+ ev->status == 0) {
+ /* We heard back from the peer, so even if we get this
+ * before the SENT event (oh yes we can), we know we
+ * read/wrote the peer buffer and how much... */
+ desc->bd_nob_transferred += ev->mlength;
+ desc->bd_sender = ev->sender;
+ }
+
+ if (ev->status != 0)
+ desc->bd_failure = 1;
+
+ if (ev->unlinked) {
+ desc->bd_md_count--;
+ /* This is the last callback no matter what... */
+ if (desc->bd_md_count == 0)
+ wake_up(&desc->bd_waitq);
+ }
+
+ spin_unlock(&desc->bd_lock);
+ EXIT;
+}
+#endif
- if (bulk->bp_cb != NULL)
- bulk->bp_cb(bulk);
+static void ptlrpc_master_callback(lnet_event_t *ev)
+{
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
+
+ /* Honestly, it's best to find out early. */
+ LASSERT (cbid->cbid_arg != LP_POISON);
+ LASSERT (callback == request_out_callback ||
+ callback == reply_in_callback ||
+ callback == client_bulk_callback ||
+ callback == request_in_callback ||
+ callback == reply_out_callback
+#ifdef HAVE_SERVER_SUPPORT
+ || callback == server_bulk_callback
+#endif
+ );
+
+ callback (ev);
+}
+
+int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
+ lnet_process_id_t *peer, lnet_nid_t *self)
+{
+ int best_dist = 0;
+ __u32 best_order = 0;
+ int count = 0;
+ int rc = -ENOENT;
+ int portals_compatibility;
+ int dist;
+ __u32 order;
+ lnet_nid_t dst_nid;
+ lnet_nid_t src_nid;
+
+ portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
+
+ peer->pid = LUSTRE_SRV_LNET_PID;
+
+ /* Choose the matching UUID that's closest */
+ while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
+ dist = LNetDist(dst_nid, &src_nid, &order);
+ if (dist < 0)
+ continue;
+
+ if (dist == 0) { /* local! use loopback LND */
+ peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
+ rc = 0;
+ break;
+ }
+
+ if (rc < 0 ||
+ dist < best_dist ||
+ (dist == best_dist && order < best_order)) {
+ best_dist = dist;
+ best_order = order;
+
+ if (portals_compatibility > 1) {
+ /* Strong portals compatibility: Zero the nid's
+ * NET, so if I'm reading new config logs, or
+ * getting configured by (new) lconf I can
+ * still talk to old servers. */
+ dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
+ src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
+ }
+ peer->nid = dst_nid;
+ *self = src_nid;
+ rc = 0;
}
- desc->bd_flags |= PTL_BULK_FL_SENT;
- wake_up(&desc->bd_waitq);
- if (desc->bd_cb != NULL)
- desc->bd_cb(desc, desc->bd_cb_data);
}
- RETURN(0);
+ CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
+ return rc;
}
-static int bulk_sink_callback(ptl_event_t *ev)
+void ptlrpc_ni_fini(void)
{
- struct ptlrpc_bulk_desc *desc = ev->mem_desc.user_ptr;
- struct ptlrpc_bulk_page *bulk;
- struct list_head *tmp;
- struct list_head *next;
- ptl_size_t total = 0;
- ENTRY;
+ wait_queue_head_t waitq;
+ struct l_wait_info lwi;
+ int rc;
+ int retries;
+
+ /* Wait for the event queue to become idle since there may still be
+ * messages in flight with pending events (i.e. the fire-and-forget
+ * messages == client requests and "non-difficult" server
+ * replies */
+
+ for (retries = 0;; retries++) {
+ rc = LNetEQFree(ptlrpc_eq_h);
+ switch (rc) {
+ default:
+ LBUG();
+
+ case 0:
+ LNetNIFini();
+ return;
+
+ case -EBUSY:
+ if (retries != 0)
+ CWARN("Event queue still busy\n");
+
+ /* Wait for a bit */
+ init_waitqueue_head(&waitq);
+ lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
+ l_wait_event(waitq, 0, &lwi);
+ break;
+ }
+ }
+ /* notreached */
+}
- if (ev->type == PTL_EVENT_PUT) {
- /* put with zero offset */
- LASSERT (ev->offset == 0);
- /* used iovs */
- LASSERT ((ev->mem_desc.options & PTL_MD_IOV) != 0);
- /* 1 fragment for each page always */
- LASSERT (ev->mem_desc.niov == desc->bd_page_count);
+lnet_pid_t ptl_get_pid(void)
+{
+ lnet_pid_t pid;
+
+#ifndef __KERNEL__
+ pid = getpid();
+#else
+ pid = LUSTRE_SRV_LNET_PID;
+#endif
+ return pid;
+}
- list_for_each_safe (tmp, next, &desc->bd_page_list) {
- bulk = list_entry(tmp, struct ptlrpc_bulk_page,
- bp_link);
+int ptlrpc_ni_init(void)
+{
+ int rc;
+ lnet_pid_t pid;
- total += bulk->bp_buflen;
+ pid = ptl_get_pid();
+ CDEBUG(D_NET, "My pid is: %x\n", pid);
- if (bulk->bp_cb != NULL)
- bulk->bp_cb(bulk);
- }
+ /* We're not passing any limits yet... */
+ rc = LNetNIInit(pid);
+ if (rc < 0) {
+ CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
+ return rc;
+ }
- LASSERT (ev->mem_desc.length == total);
+ /* CAVEAT EMPTOR: how we process portals events is _radically_
+ * different depending on... */
+#ifdef __KERNEL__
+ /* kernel LNet calls our master callback when there are new event,
+ * because we are guaranteed to get every event via callback,
+ * so we just set EQ size to 0 to avoid overhread of serializing
+ * enqueue/dequeue operations in LNet. */
+ rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
+#else
+ /* liblustre calls the master callback when it removes events from the
+ * event queue. The event queue has to be big enough not to drop
+ * anything */
+ rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &ptlrpc_eq_h);
+#endif
+ if (rc == 0)
+ return 0;
+
+ CERROR ("Failed to allocate event queue: %d\n", rc);
+ LNetNIFini();
+
+ return rc;
+}
- desc->bd_flags |= PTL_BULK_FL_RCVD;
- wake_up(&desc->bd_waitq);
- if (desc->bd_cb != NULL)
- desc->bd_cb(desc, desc->bd_cb_data);
- } else {
- CERROR("Unexpected event type!\n");
- LBUG();
+#ifndef __KERNEL__
+struct list_head liblustre_wait_callbacks;
+struct list_head liblustre_idle_callbacks;
+void *liblustre_services_callback;
+
+void *
+liblustre_register_waitidle_callback(struct list_head *callback_list,
+ const char *name,
+ int (*fn)(void *arg), void *arg)
+{
+ struct liblustre_wait_callback *llwc;
+
+ OBD_ALLOC(llwc, sizeof(*llwc));
+ LASSERT (llwc != NULL);
+
+ llwc->llwc_name = name;
+ llwc->llwc_fn = fn;
+ llwc->llwc_arg = arg;
+ list_add_tail(&llwc->llwc_list, callback_list);
+
+ return (llwc);
+}
+
+void
+liblustre_deregister_waitidle_callback (void *opaque)
+{
+ struct liblustre_wait_callback *llwc = opaque;
+
+ list_del(&llwc->llwc_list);
+ OBD_FREE(llwc, sizeof(*llwc));
+}
+
+void *
+liblustre_register_wait_callback (const char *name,
+ int (*fn)(void *arg), void *arg)
+{
+ return liblustre_register_waitidle_callback(&liblustre_wait_callbacks,
+ name, fn, arg);
+}
+
+void
+liblustre_deregister_wait_callback (void *opaque)
+{
+ liblustre_deregister_waitidle_callback(opaque);
+}
+
+void *
+liblustre_register_idle_callback (const char *name,
+ int (*fn)(void *arg), void *arg)
+{
+ return liblustre_register_waitidle_callback(&liblustre_idle_callbacks,
+ name, fn, arg);
+}
+
+void
+liblustre_deregister_idle_callback (void *opaque)
+{
+ liblustre_deregister_waitidle_callback(opaque);
+}
+
+int
+liblustre_check_events (int timeout)
+{
+ lnet_event_t ev;
+ int rc;
+ int i;
+ ENTRY;
+
+ rc = LNetEQPoll(&ptlrpc_eq_h, 1, timeout * 1000, &ev, &i);
+ if (rc == 0)
+ RETURN(0);
+
+ LASSERT (rc == -EOVERFLOW || rc == 1);
+
+ /* liblustre: no asynch callback so we can't afford to miss any
+ * events... */
+ if (rc == -EOVERFLOW) {
+ CERROR ("Dropped an event!!!\n");
+ abort();
}
+ ptlrpc_master_callback (&ev);
RETURN(1);
}
-int ptlrpc_init_portals(void)
+int liblustre_waiting = 0;
+
+int
+liblustre_wait_event (int timeout)
{
- int rc;
- ptl_handle_ni_t ni;
-
- /* Use the qswnal if it's there */
- if ((qswnal_nip = inter_module_get("kqswnal_ni")) != NULL)
- ni = *qswnal_nip;
- else if ((gmnal_nip = inter_module_get("kgmnal_ni")) != NULL)
- ni = *gmnal_nip;
- else if ((socknal_nip = inter_module_get("ksocknal_ni")) != NULL)
- ni = *socknal_nip;
- else if ((toenal_nip = inter_module_get("ktoenal_ni")) != NULL)
- ni = *toenal_nip;
- else {
- CERROR("get_ni failed: is a NAL module loaded?\n");
- return -EIO;
+ struct list_head *tmp;
+ struct liblustre_wait_callback *llwc;
+ int found_something = 0;
+
+ /* single threaded recursion check... */
+ liblustre_waiting = 1;
+
+ for (;;) {
+ /* Deal with all pending events */
+ while (liblustre_check_events(0))
+ found_something = 1;
+
+ /* Give all registered callbacks a bite at the cherry */
+ list_for_each(tmp, &liblustre_wait_callbacks) {
+ llwc = list_entry(tmp,
+ struct liblustre_wait_callback,
+ llwc_list);
+
+ if (llwc->llwc_fn(llwc->llwc_arg))
+ found_something = 1;
+ }
+
+ if (found_something || timeout == 0)
+ break;
+
+ /* Nothing so far, but I'm allowed to block... */
+ found_something = liblustre_check_events(timeout);
+ if (!found_something) /* still nothing */
+ break; /* I timed out */
}
- rc = PtlEQAlloc(ni, 1024, request_out_callback, &request_out_eq);
- if (rc != PTL_OK)
- CERROR("PtlEQAlloc failed: %d\n", rc);
+ liblustre_waiting = 0;
+
+ return found_something;
+}
+
+void
+liblustre_wait_idle(void)
+{
+ static int recursed = 0;
+ struct list_head *tmp;
+ struct liblustre_wait_callback *llwc;
+ int idle = 0;
+
+ LASSERT(!recursed);
+ recursed = 1;
+
+ do {
+ liblustre_wait_event(0);
+
+ idle = 1;
+
+ list_for_each(tmp, &liblustre_idle_callbacks) {
+ llwc = list_entry(tmp, struct liblustre_wait_callback,
+ llwc_list);
+ if (!llwc->llwc_fn(llwc->llwc_arg)) {
+ idle = 0;
+ break;
+ }
+ }
- rc = PtlEQAlloc(ni, 1024, reply_out_callback, &reply_out_eq);
- if (rc != PTL_OK)
- CERROR("PtlEQAlloc failed: %d\n", rc);
+ } while (!idle);
- rc = PtlEQAlloc(ni, 1024, reply_in_callback, &reply_in_eq);
- if (rc != PTL_OK)
- CERROR("PtlEQAlloc failed: %d\n", rc);
+ recursed = 0;
+}
- rc = PtlEQAlloc(ni, 1024, bulk_source_callback, &bulk_source_eq);
- if (rc != PTL_OK)
- CERROR("PtlEQAlloc failed: %d\n", rc);
+#endif /* __KERNEL__ */
- rc = PtlEQAlloc(ni, 1024, bulk_sink_callback, &bulk_sink_eq);
- if (rc != PTL_OK)
- CERROR("PtlEQAlloc failed: %d\n", rc);
+int ptlrpc_init_portals(void)
+{
+ int rc = ptlrpc_ni_init();
+ if (rc != 0) {
+ CERROR("network initialisation failed\n");
+ return rc;
+ }
+#ifndef __KERNEL__
+ INIT_LIST_HEAD(&liblustre_wait_callbacks);
+ INIT_LIST_HEAD(&liblustre_idle_callbacks);
+
+ liblustre_services_callback =
+ liblustre_register_wait_callback("liblustre_check_services",
+ &liblustre_check_services,
+ NULL);
+ init_completion_module(liblustre_wait_event);
+#endif
+ rc = ptlrpcd_addref();
+ if (rc == 0)
+ return 0;
+
+ CERROR("rpcd initialisation failed\n");
+#ifndef __KERNEL__
+ liblustre_deregister_wait_callback(liblustre_services_callback);
+#endif
+ ptlrpc_ni_fini();
return rc;
}
void ptlrpc_exit_portals(void)
{
- PtlEQFree(request_out_eq);
- PtlEQFree(reply_out_eq);
- PtlEQFree(reply_in_eq);
- PtlEQFree(bulk_source_eq);
- PtlEQFree(bulk_sink_eq);
-
- if (qswnal_nip != NULL)
- inter_module_put("kqswnal_ni");
- if (socknal_nip != NULL)
- inter_module_put("ksocknal_ni");
- if (gmnal_nip != NULL)
- inter_module_put("kgmnal_ni");
+#ifndef __KERNEL__
+ liblustre_deregister_wait_callback(liblustre_services_callback);
+#endif
+ ptlrpcd_decref();
+ ptlrpc_ni_fini();
}