Whamcloud - gitweb
land 0.5.20.3 b_devel onto HEAD (b_devel will remain)
[fs/lustre-release.git] / lustre / ptlrpc / events.c
index 38bcd9c..4a6eb67 100644 (file)
@@ -1,7 +1,7 @@
 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
  * vim:expandtab:shiftwidth=8:tabstop=8:
  *
- *  Copyright (C) 2002 Cluster File Systems, Inc.
+ *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
  *
  *   This file is part of Lustre, http://www.lustre.org.
  *
 
 #define DEBUG_SUBSYSTEM S_RPC
 
+#ifdef __KERNEL__
 #include <linux/module.h>
-#include <linux/obd_support.h>
+#else
+#include <liblustre.h>
+#endif
+#include <linux/obd_class.h>
 #include <linux/lustre_net.h>
 
-ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq, bulk_source_eq,
-        bulk_sink_eq;
-static const ptl_handle_ni_t *socknal_nip = NULL, *toenal_nip = NULL, 
-        *qswnal_nip = NULL, *gmnal_nip = NULL;
+struct ptlrpc_ni  ptlrpc_interfaces[NAL_MAX_NR];
+int               ptlrpc_ninterfaces;
 
 /*
  *  Free the packet when it has gone out
@@ -39,7 +41,8 @@ static int request_out_callback(ptl_event_t *ev)
         struct ptlrpc_request *req = ev->mem_desc.user_ptr;
         ENTRY;
 
-        LASSERT ((ev->mem_desc.options & PTL_MD_IOV) == 0); /* requests always contiguous */
+        /* requests always contiguous */
+        LASSERT((ev->mem_desc.options & PTL_MD_IOV) == 0);
 
         if (ev->type != PTL_EVENT_SENT) {
                 // XXX make sure we understand all events, including ACK's
@@ -60,12 +63,22 @@ static int reply_out_callback(ptl_event_t *ev)
 {
         ENTRY;
 
-        LASSERT ((ev->mem_desc.options & PTL_MD_IOV) == 0); /* replies always contiguous */
+        /* replies always contiguous */
+        LASSERT((ev->mem_desc.options & PTL_MD_IOV) == 0);
 
         if (ev->type == PTL_EVENT_SENT) {
                 OBD_FREE(ev->mem_desc.start, ev->mem_desc.length);
+        } else if (ev->type == PTL_EVENT_ACK) {
+                struct ptlrpc_request *req = ev->mem_desc.user_ptr;
+                if (req->rq_flags & PTL_RPC_FL_WANT_ACK) {
+                        req->rq_flags &= ~PTL_RPC_FL_WANT_ACK;
+                        wake_up(&req->rq_wait_for_rep);
+                } else {
+                        DEBUG_REQ(D_ERROR, req,
+                                  "ack received for reply, not wanted");
+                }
         } else {
-                // XXX make sure we understand all events, including ACK's
+                // XXX make sure we understand all events
                 CERROR("Unknown event %d\n", ev->type);
                 LBUG();
         }
@@ -76,12 +89,13 @@ static int reply_out_callback(ptl_event_t *ev)
 /*
  * Wake up the thread waiting for the reply once it comes in.
  */
-static int reply_in_callback(ptl_event_t *ev)
+int reply_in_callback(ptl_event_t *ev)
 {
         struct ptlrpc_request *req = ev->mem_desc.user_ptr;
         ENTRY;
 
-        LASSERT ((ev->mem_desc.options & PTL_MD_IOV) == 0); /* replies always contiguous */
+        /* replies always contiguous */
+        LASSERT((ev->mem_desc.options & PTL_MD_IOV) == 0);
 
         if (req->rq_xid == 0x5a5a5a5a5a5a5a5a) {
                 CERROR("Reply received for freed request!  Probably a missing "
@@ -110,42 +124,44 @@ static int reply_in_callback(ptl_event_t *ev)
 int request_in_callback(ptl_event_t *ev)
 {
         struct ptlrpc_request_buffer_desc *rqbd = ev->mem_desc.user_ptr;
-        struct ptlrpc_service *service = rqbd->rqbd_service;
+        struct ptlrpc_srv_ni  *srv_ni = rqbd->rqbd_srv_ni;
+        struct ptlrpc_service *service = srv_ni->sni_service;
+
+        /* requests always contiguous */
+        LASSERT((ev->mem_desc.options & PTL_MD_IOV) == 0);
+        /* we only enable puts */
+        LASSERT(ev->type == PTL_EVENT_PUT);
+        LASSERT(atomic_read(&srv_ni->sni_nrqbds_receiving) > 0);
+        LASSERT(atomic_read(&rqbd->rqbd_refcount) > 0);
 
-        LASSERT ((ev->mem_desc.options & PTL_MD_IOV) == 0); /* requests always contiguous */
-        LASSERT (ev->type == PTL_EVENT_PUT);    /* we only enable puts */
-        LASSERT (atomic_read (&service->srv_nrqbds_receiving) > 0);
-        LASSERT (atomic_read (&rqbd->rqbd_refcount) > 0);
-        
         if (ev->rlength != ev->mlength)
                 CERROR("Warning: Possibly truncated rpc (%d/%d)\n",
                        ev->mlength, ev->rlength);
 
-        if (ptl_is_valid_handle (&ev->unlinked_me))
-        {
+        if (ptl_is_valid_handle(&ev->unlinked_me)) {
                 /* This is the last request to be received into this
                  * request buffer.  We don't bump the refcount, since the
                  * thread servicing this event is effectively taking over
                  * portals' reference.
                  */
 #warning ev->unlinked_me.nal_idx is not set properly in a callback
-                LASSERT (ev->unlinked_me.handle_idx == rqbd->rqbd_me_h.handle_idx);
+                LASSERT(ev->unlinked_me.handle_idx==rqbd->rqbd_me_h.handle_idx);
 
-                if (atomic_dec_and_test (&service->srv_nrqbds_receiving)) /* we're off-air */
-                {
-                        CERROR ("All request buffers busy\n");
-                        /* we'll probably start dropping packets in portals soon */
-                }
+                /* we're off the air */
+                /* we'll probably start dropping packets in portals soon */
+                if (atomic_dec_and_test(&srv_ni->sni_nrqbds_receiving))
+                        CERROR("All request buffers busy\n");
+        } else {
+                /* +1 ref for service thread */
+                atomic_inc(&rqbd->rqbd_refcount);
         }
-        else
-                atomic_inc (&rqbd->rqbd_refcount); /* +1 ref for service thread */
 
         wake_up(&service->srv_waitq);
 
         return 0;
 }
 
-static int bulk_source_callback(ptl_event_t *ev)
+static int bulk_put_source_callback(ptl_event_t *ev)
 {
         struct ptlrpc_bulk_desc *desc = ev->mem_desc.user_ptr;
         struct ptlrpc_bulk_page *bulk;
@@ -157,15 +173,17 @@ static int bulk_source_callback(ptl_event_t *ev)
                (ev->type == PTL_EVENT_SENT) ? "SENT" :
                (ev->type == PTL_EVENT_ACK)  ? "ACK"  : "UNEXPECTED", ev->type);
 
-        LASSERT (ev->type == PTL_EVENT_SENT || ev->type == PTL_EVENT_ACK);
+        LASSERT(ev->type == PTL_EVENT_SENT || ev->type == PTL_EVENT_ACK);
 
-        LASSERT (atomic_read (&desc->bd_source_callback_count) > 0 &&
-                 atomic_read (&desc->bd_source_callback_count) <= 2);
+        LASSERT(atomic_read(&desc->bd_source_callback_count) > 0 &&
+                atomic_read(&desc->bd_source_callback_count) <= 2);
 
         /* 1 fragment for each page always */
-        LASSERT (ev->mem_desc.niov == desc->bd_page_count);
+        LASSERT(ev->mem_desc.niov == desc->bd_page_count);
+
+        if (atomic_dec_and_test(&desc->bd_source_callback_count)) {
+                void (*event_handler)(struct ptlrpc_bulk_desc *);
 
-        if (atomic_dec_and_test (&desc->bd_source_callback_count)) {
                 list_for_each_safe(tmp, next, &desc->bd_page_list) {
                         bulk = list_entry(tmp, struct ptlrpc_bulk_page,
                                           bp_link);
@@ -173,110 +191,309 @@ static int bulk_source_callback(ptl_event_t *ev)
                         if (bulk->bp_cb != NULL)
                                 bulk->bp_cb(bulk);
                 }
+
+                /* We need to make a note of whether there's an event handler
+                 * before we call wake_up, because if there is no event handler,
+                 * 'desc' might be freed before we're scheduled again. */
+                event_handler = desc->bd_ptl_ev_hdlr;
+
                 desc->bd_flags |= PTL_BULK_FL_SENT;
                 wake_up(&desc->bd_waitq);
-                if (desc->bd_cb != NULL)
-                        desc->bd_cb(desc, desc->bd_cb_data);
+                if (event_handler) {
+                        LASSERT(desc->bd_ptl_ev_hdlr == event_handler);
+                        event_handler(desc);
+                }
         }
 
         RETURN(0);
 }
 
-static int bulk_sink_callback(ptl_event_t *ev)
+static int bulk_put_sink_callback(ptl_event_t *ev)
 {
         struct ptlrpc_bulk_desc *desc = ev->mem_desc.user_ptr;
         struct ptlrpc_bulk_page *bulk;
         struct list_head        *tmp;
         struct list_head        *next;
         ptl_size_t               total = 0;
+        void                   (*event_handler)(struct ptlrpc_bulk_desc *);
         ENTRY;
 
-        if (ev->type == PTL_EVENT_PUT) {
-                /* put with zero offset */
-                LASSERT (ev->offset == 0);
-                /* used iovs */
-                LASSERT ((ev->mem_desc.options & PTL_MD_IOV) != 0);
-                /* 1 fragment for each page always */
-                LASSERT (ev->mem_desc.niov == desc->bd_page_count);
-
-                list_for_each_safe (tmp, next, &desc->bd_page_list) {
+        LASSERT(ev->type == PTL_EVENT_PUT);
+
+        /* put with zero offset */
+        LASSERT(ev->offset == 0);
+        /* used iovs */
+        LASSERT((ev->mem_desc.options & PTL_MD_IOV) != 0);
+        /* 1 fragment for each page always */
+        LASSERT(ev->mem_desc.niov == desc->bd_page_count);
+
+        list_for_each_safe (tmp, next, &desc->bd_page_list) {
+                bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
+
+                total += bulk->bp_buflen;
+
+                if (bulk->bp_cb != NULL)
+                        bulk->bp_cb(bulk);
+        }
+
+        LASSERT(ev->mem_desc.length == total);
+
+        /* We need to make a note of whether there's an event handler
+         * before we call wake_up, because if there is no event
+         * handler, 'desc' might be freed before we're scheduled again. */
+        event_handler = desc->bd_ptl_ev_hdlr;
+
+        desc->bd_flags |= PTL_BULK_FL_RCVD;
+        wake_up(&desc->bd_waitq);
+        if (event_handler) {
+                LASSERT(desc->bd_ptl_ev_hdlr == event_handler);
+                event_handler(desc);
+        }
+
+        RETURN(1);
+}
+
+static int bulk_get_source_callback(ptl_event_t *ev)
+{
+        struct ptlrpc_bulk_desc *desc = ev->mem_desc.user_ptr;
+        struct ptlrpc_bulk_page *bulk;
+        struct list_head        *tmp;
+        struct list_head        *next;
+        ptl_size_t               total = 0;
+        void                   (*event_handler)(struct ptlrpc_bulk_desc *);
+        ENTRY;
+
+        LASSERT(ev->type == PTL_EVENT_GET);
+
+        /* put with zero offset */
+        LASSERT(ev->offset == 0);
+        /* used iovs */
+        LASSERT((ev->mem_desc.options & PTL_MD_IOV) != 0);
+        /* 1 fragment for each page always */
+        LASSERT(ev->mem_desc.niov == desc->bd_page_count);
+
+        list_for_each_safe (tmp, next, &desc->bd_page_list) {
+                bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
+
+                total += bulk->bp_buflen;
+
+                if (bulk->bp_cb != NULL)
+                        bulk->bp_cb(bulk);
+        }
+
+        LASSERT(ev->mem_desc.length == total);
+
+        /* We need to make a note of whether there's an event handler
+         * before we call wake_up, because if there is no event
+         * handler, 'desc' might be freed before we're scheduled again. */
+        event_handler = desc->bd_ptl_ev_hdlr;
+
+        desc->bd_flags |= PTL_BULK_FL_SENT;
+        wake_up(&desc->bd_waitq);
+        if (event_handler) {
+                LASSERT(desc->bd_ptl_ev_hdlr == event_handler);
+                event_handler(desc);
+        }
+
+        RETURN(1);
+}
+
+
+static int bulk_get_sink_callback(ptl_event_t *ev)
+{
+        struct ptlrpc_bulk_desc *desc = ev->mem_desc.user_ptr;
+        struct ptlrpc_bulk_page *bulk;
+        struct list_head        *tmp;
+        struct list_head        *next;
+        ENTRY;
+
+        CDEBUG(D_NET, "got %s event %d\n",
+               (ev->type == PTL_EVENT_SENT) ? "SENT" :
+               (ev->type == PTL_EVENT_REPLY)  ? "REPLY"  : "UNEXPECTED", 
+               ev->type);
+
+        LASSERT(ev->type == PTL_EVENT_SENT || ev->type == PTL_EVENT_REPLY);
+
+        LASSERT(atomic_read(&desc->bd_source_callback_count) > 0 &&
+                atomic_read(&desc->bd_source_callback_count) <= 2);
+
+        /* 1 fragment for each page always */
+        LASSERT(ev->mem_desc.niov == desc->bd_page_count);
+
+        if (atomic_dec_and_test(&desc->bd_source_callback_count)) {
+                void (*event_handler)(struct ptlrpc_bulk_desc *);
+
+                list_for_each_safe(tmp, next, &desc->bd_page_list) {
                         bulk = list_entry(tmp, struct ptlrpc_bulk_page,
                                           bp_link);
 
-                        total += bulk->bp_buflen;
-
                         if (bulk->bp_cb != NULL)
                                 bulk->bp_cb(bulk);
                 }
 
-                LASSERT (ev->mem_desc.length == total);
+                /* We need to make a note of whether there's an event handler
+                 * before we call wake_up, because if there is no event handler,
+                 * 'desc' might be freed before we're scheduled again. */
+                event_handler = desc->bd_ptl_ev_hdlr;
 
                 desc->bd_flags |= PTL_BULK_FL_RCVD;
                 wake_up(&desc->bd_waitq);
-                if (desc->bd_cb != NULL)
-                        desc->bd_cb(desc, desc->bd_cb_data);
-        } else {
-                CERROR("Unexpected event type!\n");
-                LBUG();
+                if (event_handler) {
+                        LASSERT(desc->bd_ptl_ev_hdlr == event_handler);
+                        event_handler(desc);
+                }
         }
 
-        RETURN(1);
+        RETURN(0);
 }
 
-int ptlrpc_init_portals(void)
+int ptlrpc_uuid_to_peer (struct obd_uuid *uuid, struct ptlrpc_peer *peer) 
 {
-        int rc;
-        ptl_handle_ni_t ni;
-
-        /* Use the qswnal if it's there */
-        if ((qswnal_nip = inter_module_get("kqswnal_ni")) != NULL)
-                ni = *qswnal_nip;
-        else if ((gmnal_nip = inter_module_get("kgmnal_ni")) != NULL)
-                ni = *gmnal_nip;
-        else if ((socknal_nip = inter_module_get("ksocknal_ni")) != NULL)
-                ni = *socknal_nip;
-        else if ((toenal_nip = inter_module_get("ktoenal_ni")) != NULL)
-                ni = *toenal_nip;
-        else {
-                CERROR("get_ni failed: is a NAL module loaded?\n");
-                return -EIO;
+        struct ptlrpc_ni   *pni;
+        struct lustre_peer  lpeer;
+        int                 i;
+        int                 rc = lustre_uuid_to_peer (uuid->uuid, &lpeer);
+        
+        if (rc != 0)
+                RETURN (rc);
+        
+        for (i = 0; i < ptlrpc_ninterfaces; i++) {
+                pni = &ptlrpc_interfaces[i];
+
+                if (!memcmp (&lpeer.peer_ni, &pni->pni_ni_h,
+                             sizeof (lpeer.peer_ni))) {
+                        peer->peer_nid = lpeer.peer_nid;
+                        peer->peer_ni = pni;
+                        return (0);
+                }
         }
+        
+        CERROR ("Can't find ptlrpc interface for "LPX64" ni handle %08lx %08lx\n",
+                lpeer.peer_nid, lpeer.peer_ni.nal_idx, lpeer.peer_ni.handle_idx);
+        return (-ENOENT);
+}
 
-        rc = PtlEQAlloc(ni, 1024, request_out_callback, &request_out_eq);
-        if (rc != PTL_OK)
-                CERROR("PtlEQAlloc failed: %d\n", rc);
+void ptlrpc_ni_fini (struct ptlrpc_ni *pni) 
+{
+        PtlEQFree(pni->pni_request_out_eq_h);
+        PtlEQFree(pni->pni_reply_out_eq_h);
+        PtlEQFree(pni->pni_reply_in_eq_h);
+        PtlEQFree(pni->pni_bulk_put_source_eq_h);
+        PtlEQFree(pni->pni_bulk_put_sink_eq_h);
+        PtlEQFree(pni->pni_bulk_get_source_eq_h);
+        PtlEQFree(pni->pni_bulk_get_sink_eq_h);
+        
+        inter_module_put(pni->pni_name);
+}
 
-        rc = PtlEQAlloc(ni, 1024, reply_out_callback, &reply_out_eq);
-        if (rc != PTL_OK)
-                CERROR("PtlEQAlloc failed: %d\n", rc);
+int ptlrpc_ni_init (char *name, struct ptlrpc_ni *pni) 
+{
+        int              rc;
+        ptl_handle_ni_t *nip;
 
-        rc = PtlEQAlloc(ni, 1024, reply_in_callback, &reply_in_eq);
+        nip = (ptl_handle_ni_t *)inter_module_get (name);
+        if (nip == NULL) {
+                CDEBUG (D_NET, "Network interface %s not loaded\n", name);
+                return (-ENOENT);
+        }
+        
+        CDEBUG (D_NET, "init %s: nal_idx %ld\n", name, nip->nal_idx);
+                
+        pni->pni_name = name;
+        pni->pni_ni_h = *nip;
+
+        ptl_set_inv_handle (&pni->pni_request_out_eq_h);
+        ptl_set_inv_handle (&pni->pni_reply_out_eq_h);
+        ptl_set_inv_handle (&pni->pni_reply_in_eq_h);
+        ptl_set_inv_handle (&pni->pni_bulk_put_source_eq_h);
+        ptl_set_inv_handle (&pni->pni_bulk_put_sink_eq_h);
+        ptl_set_inv_handle (&pni->pni_bulk_get_source_eq_h);
+        ptl_set_inv_handle (&pni->pni_bulk_get_sink_eq_h);
+        
+        /* NB We never actually PtlEQGet() out of these events queues since
+         * we're only interested in the event callback, so we can just let
+         * them wrap.  Their sizes aren't a big deal, apart from providing
+         * a little history for debugging... */
+        
+        rc = PtlEQAlloc(pni->pni_ni_h, 1024, request_out_callback, 
+                        &pni->pni_request_out_eq_h);
         if (rc != PTL_OK)
-                CERROR("PtlEQAlloc failed: %d\n", rc);
-
-        rc = PtlEQAlloc(ni, 1024, bulk_source_callback, &bulk_source_eq);
+                GOTO (fail, rc = -ENOMEM);
+                
+        rc = PtlEQAlloc(pni->pni_ni_h, 1024, reply_out_callback, 
+                        &pni->pni_reply_out_eq_h);
         if (rc != PTL_OK)
-                CERROR("PtlEQAlloc failed: %d\n", rc);
-
-        rc = PtlEQAlloc(ni, 1024, bulk_sink_callback, &bulk_sink_eq);
+                GOTO (fail, rc = -ENOMEM);
+        
+        rc = PtlEQAlloc(pni->pni_ni_h, 1024, reply_in_callback,
+                        &pni->pni_reply_in_eq_h);
+        if (rc != PTL_OK)
+                GOTO (fail, rc = -ENOMEM);
+                
+        rc = PtlEQAlloc(pni->pni_ni_h, 1024, bulk_put_source_callback,
+                        &pni->pni_bulk_put_source_eq_h);
         if (rc != PTL_OK)
-                CERROR("PtlEQAlloc failed: %d\n", rc);
+                GOTO (fail, rc = -ENOMEM);
+                
+        rc = PtlEQAlloc(pni->pni_ni_h, 1024, bulk_put_sink_callback,
+                        &pni->pni_bulk_put_sink_eq_h);
+        if (rc != PTL_OK)
+                GOTO (fail, rc = -ENOMEM);
+                
+        rc = PtlEQAlloc(pni->pni_ni_h, 1024, bulk_get_source_callback,
+                        &pni->pni_bulk_get_source_eq_h);
+        if (rc != PTL_OK)
+                GOTO (fail, rc = -ENOMEM);
+                
+        rc = PtlEQAlloc(pni->pni_ni_h, 1024, bulk_get_sink_callback,
+                        &pni->pni_bulk_get_sink_eq_h);
+        if (rc != PTL_OK)
+                GOTO (fail, rc = -ENOMEM);
+        
+        return (0);
+ fail: 
+        CERROR ("Failed to initialise network interface %s: %d\n",
+                name, rc);
+
+        /* OK to do complete teardown since we invalidated the handles above... */
+        ptlrpc_ni_fini (pni);
+        return (rc);
+}
 
-        return rc;
+int ptlrpc_init_portals(void)
+{
+        /* Add new portals network interface names here.
+         * Order is irrelevent! */
+        char *ni_names[] = { "kqswnal_ni",
+                             "kgmnal_ni",
+                             "ksocknal_ni",
+                             "ktoenal_ni",
+                             "tcpnal_ni",
+                             NULL };
+        int   rc;
+        int   i;
+        
+        LASSERT (ptlrpc_ninterfaces == 0);
+
+        for (i = 0; ni_names[i] != NULL; i++) {
+                LASSERT (ptlrpc_ninterfaces < 
+                         sizeof (ptlrpc_interfaces)/sizeof (ptlrpc_interfaces[0]));
+                
+                rc = ptlrpc_ni_init (ni_names[i],
+                                     &ptlrpc_interfaces[ptlrpc_ninterfaces]);
+                if (rc == 0)
+                        ptlrpc_ninterfaces++;
+        }
+        
+        if (ptlrpc_ninterfaces == 0) {
+                CERROR("network initialisation failed: is a NAL module loaded?\n");
+                return -EIO;
+        }
+        return 0;
 }
 
 void ptlrpc_exit_portals(void)
 {
-        PtlEQFree(request_out_eq);
-        PtlEQFree(reply_out_eq);
-        PtlEQFree(reply_in_eq);
-        PtlEQFree(bulk_source_eq);
-        PtlEQFree(bulk_sink_eq);
-
-        if (qswnal_nip != NULL)
-                inter_module_put("kqswnal_ni");
-        if (socknal_nip != NULL)
-                inter_module_put("ksocknal_ni");
-        if (gmnal_nip != NULL)
-                inter_module_put("kgmnal_ni");
+        while (ptlrpc_ninterfaces > 0)
+                ptlrpc_ni_fini (&ptlrpc_interfaces[--ptlrpc_ninterfaces]);
 }