From 5c89556f842cb2a51749a075d18fdffc61ea56d3 Mon Sep 17 00:00:00 2001 From: ericm Date: Fri, 7 Nov 2003 15:50:56 +0000 Subject: [PATCH] merge b_devel to b_eq: 20031107 kernel passed sanity, but liblustre broken since lconf changes. need fix by robert. --- lnet/archdep.m4 | 24 +- lnet/include/linux/kp30.h | 12 + lnet/include/linux/portals_lib.h | 5 + lnet/include/lnet/lib-types.h | 84 +- lnet/klnds/Makefile.am | 4 +- lnet/klnds/iblnd/.cvsignore | 3 + lnet/klnds/iblnd/Makefile.am | 10 + lnet/klnds/iblnd/ibnal.c | 2146 ++++++++ lnet/klnds/iblnd/ibnal.h | 564 ++ lnet/klnds/iblnd/ibnal_cb.c | 1288 +++++ lnet/klnds/iblnd/ibnal_send_recv_self_testing.c | 116 + lnet/klnds/iblnd/uagent.c | 391 ++ lnet/klnds/qswlnd/qswlnd.c | 2 +- lnet/klnds/qswlnd/qswlnd_cb.c | 13 +- lnet/klnds/scimaclnd/scimacnal.c | 2 +- lnet/klnds/scimaclnd/scimacnal_cb.c | 2 +- lnet/klnds/socklnd/socklnd.c | 277 +- lnet/klnds/socklnd/socklnd.h | 39 +- lnet/klnds/socklnd/socklnd_cb.c | 345 +- lnet/klnds/toelnd/toenal_cb.c | 21 +- lnet/libcfs/debug.c | 109 +- lnet/libcfs/module.c | 139 +- lnet/lnet/api-init.c | 2 +- lnet/lnet/lib-init.c | 30 +- lnet/lnet/lib-move.c | 201 +- lnet/lnet/lib-msg.c | 2 +- lnet/router/router.c | 12 +- lnet/tests/ping_cli.c | 8 +- lnet/tests/sping_cli.c | 7 +- lnet/utils/acceptor.c | 172 +- lnet/utils/portals.c | 224 +- lnet/utils/ptlctl.c | 4 +- .../patches/iod-stock-exports-2.4.22.patch | 52 + .../patches/kernel_text_address-2.4.20-rh.patch | 68 + .../kernel_text_address-2.4.20-vanilla.patch | 56 + .../patches/linux-2.4.22-xattr-0.8.54.patch | 5460 ++++++++++++++++++++ .../patches/nfs_export_kernel-2.4.22.patch | 746 +++ lustre/kernel_patches/series/vanilla-2.4.22 | 30 + lustre/portals/archdep.m4 | 24 +- lustre/portals/include/linux/kp30.h | 12 + lustre/portals/include/linux/portals_lib.h | 5 + lustre/portals/include/portals/lib-types.h | 84 +- lustre/portals/knals/Makefile.am | 4 +- lustre/portals/knals/ibnal/.cvsignore | 3 + lustre/portals/knals/ibnal/Makefile.am | 10 + lustre/portals/knals/ibnal/ibnal.c | 2146 ++++++++ lustre/portals/knals/ibnal/ibnal.h | 564 ++ lustre/portals/knals/ibnal/ibnal_cb.c | 1288 +++++ .../knals/ibnal/ibnal_send_recv_self_testing.c | 116 + lustre/portals/knals/ibnal/uagent.c | 391 ++ lustre/portals/knals/qswnal/qswnal.c | 2 +- lustre/portals/knals/qswnal/qswnal_cb.c | 13 +- lustre/portals/knals/scimacnal/scimacnal.c | 2 +- lustre/portals/knals/scimacnal/scimacnal_cb.c | 2 +- lustre/portals/knals/socknal/socknal.c | 277 +- lustre/portals/knals/socknal/socknal.h | 39 +- lustre/portals/knals/socknal/socknal_cb.c | 345 +- lustre/portals/knals/toenal/toenal_cb.c | 21 +- lustre/portals/libcfs/debug.c | 109 +- lustre/portals/libcfs/module.c | 139 +- lustre/portals/portals/api-init.c | 2 +- lustre/portals/portals/lib-init.c | 30 +- lustre/portals/portals/lib-move.c | 201 +- lustre/portals/portals/lib-msg.c | 2 +- lustre/portals/router/router.c | 12 +- lustre/portals/tests/ping_cli.c | 8 +- lustre/portals/tests/sping_cli.c | 7 +- lustre/portals/utils/acceptor.c | 172 +- lustre/portals/utils/portals.c | 224 +- lustre/portals/utils/ptlctl.c | 4 +- 70 files changed, 17366 insertions(+), 1562 deletions(-) create mode 100644 lnet/klnds/iblnd/.cvsignore create mode 100644 lnet/klnds/iblnd/Makefile.am create mode 100644 lnet/klnds/iblnd/ibnal.c create mode 100644 lnet/klnds/iblnd/ibnal.h create mode 100644 lnet/klnds/iblnd/ibnal_cb.c create mode 100644 lnet/klnds/iblnd/ibnal_send_recv_self_testing.c create mode 100644 lnet/klnds/iblnd/uagent.c create mode 100644 lustre/kernel_patches/patches/iod-stock-exports-2.4.22.patch create mode 100644 lustre/kernel_patches/patches/kernel_text_address-2.4.20-rh.patch create mode 100644 lustre/kernel_patches/patches/kernel_text_address-2.4.20-vanilla.patch create mode 100644 lustre/kernel_patches/patches/linux-2.4.22-xattr-0.8.54.patch create mode 100644 lustre/kernel_patches/patches/nfs_export_kernel-2.4.22.patch create mode 100644 lustre/kernel_patches/series/vanilla-2.4.22 create mode 100644 lustre/portals/knals/ibnal/.cvsignore create mode 100644 lustre/portals/knals/ibnal/Makefile.am create mode 100644 lustre/portals/knals/ibnal/ibnal.c create mode 100644 lustre/portals/knals/ibnal/ibnal.h create mode 100644 lustre/portals/knals/ibnal/ibnal_cb.c create mode 100644 lustre/portals/knals/ibnal/ibnal_send_recv_self_testing.c create mode 100644 lustre/portals/knals/ibnal/uagent.c diff --git a/lnet/archdep.m4 b/lnet/archdep.m4 index 0aa83b7..a9c4ba8 100644 --- a/lnet/archdep.m4 +++ b/lnet/archdep.m4 @@ -297,6 +297,28 @@ AC_SUBST(with_gm) AC_SUBST(GMNAL) +#fixme: where are the default IB includes? +default_ib_include_dir=/usr/local/ib/include +an_ib_include_file=vapi.h + +AC_ARG_WITH(ib, [ --with-ib=[yes/no/path] Path to IB includes], with_ib=$withval, with_ib=$default_ib) +AC_MSG_CHECKING(if IB headers are present) +if test "$with_ib" = yes; then + with_ib=$default_ib_include_dir +fi +if test "$with_ib" != no -a -f ${with_ib}/${an_ib_include_file}; then + AC_MSG_RESULT(yes) + IBNAL="ibnal" + with_ib="-I${with_ib}" +else + AC_MSG_RESULT(no) + IBNAL="" + with_ib="" +fi +AC_SUBST(IBNAL) +AC_SUBST(with_ib) + + def_scamac=/opt/scali/include AC_ARG_WITH(scamac, [ --with-scamac=[yes/no/path] Path to ScaMAC includes (default=/opt/scali/include)], with_scamac=$withval, with_scamac=$def_scamac) AC_MSG_CHECKING(if ScaMAC headers are present) @@ -317,7 +339,7 @@ AC_SUBST(with_scamac) AC_SUBST(SCIMACNAL) CFLAGS="$KCFLAGS" -CPPFLAGS="$KINCFLAGS $KCPPFLAGS $MFLAGS $enable_zerocopy $enable_affinity $with_quadrics $with_gm $with_scamac " +CPPFLAGS="$KINCFLAGS $KCPPFLAGS $MFLAGS $enable_zerocopy $enable_affinity $with_quadrics $with_gm $with_scamac $with_ib" AM_CONDITIONAL(LIBLUSTRE, test x$host_cpu = xlib) AC_SUBST(MOD_LINK) diff --git a/lnet/include/linux/kp30.h b/lnet/include/linux/kp30.h index 37bf8ce..f676c35 100644 --- a/lnet/include/linux/kp30.h +++ b/lnet/include/linux/kp30.h @@ -43,6 +43,7 @@ extern unsigned int portal_cerror; #define S_GMNAL (1 << 19) #define S_PTLROUTER (1 << 20) #define S_COBD (1 << 21) +#define S_IBNAL (1 << 22) /* If you change these values, please keep portals/utils/debug.c * up to date! */ @@ -77,6 +78,8 @@ extern unsigned int portal_cerror; # define THREAD_SIZE 8192 #endif +#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5) + #ifdef __KERNEL__ # ifdef __ia64__ # define CDEBUG_STACK (THREAD_SIZE - \ @@ -595,6 +598,10 @@ extern struct prof_ent prof_ents[MAX_PROFS]; #endif /* PORTALS_PROFILING */ /* debug.c */ +extern spinlock_t stack_backtrace_lock; + +char *portals_debug_dumpstack(void); +char *portals_nid2str(int nal, ptl_nid_t nid, char *str); void portals_run_upcall(char **argv); void portals_run_lbug_upcall(char * file, const char *fn, const int line); void portals_debug_dumplog(void); @@ -1034,6 +1041,8 @@ enum { TOENAL, TCPNAL, SCIMACNAL, + ROUTER, + IBNAL, NAL_ENUM_END_MARKER }; @@ -1042,9 +1051,12 @@ extern ptl_handle_ni_t kqswnal_ni; extern ptl_handle_ni_t ksocknal_ni; extern ptl_handle_ni_t ktoenal_ni; extern ptl_handle_ni_t kgmnal_ni; +extern ptl_handle_ni_t kibnal_ni; extern ptl_handle_ni_t kscimacnal_ni; #endif +#define PTL_NALFMT_SIZE 16 + #define NAL_MAX_NR (NAL_ENUM_END_MARKER - 1) #define NAL_CMD_REGISTER_PEER_FD 100 diff --git a/lnet/include/linux/portals_lib.h b/lnet/include/linux/portals_lib.h index 14d60c6..609290d 100644 --- a/lnet/include/linux/portals_lib.h +++ b/lnet/include/linux/portals_lib.h @@ -47,6 +47,11 @@ static inline int size_round16(int val) return (val + 0xf) & (~0xf); } +static inline int size_round32(int val) +{ + return (val + 0x1f) & (~0x1f); +} + static inline int size_round0(int val) { if (!val) diff --git a/lnet/include/lnet/lib-types.h b/lnet/include/lnet/lib-types.h index e5447d7..30e56af 100644 --- a/lnet/include/lnet/lib-types.h +++ b/lnet/include/lnet/lib-types.h @@ -54,72 +54,68 @@ typedef enum { PTL_MSG_HELLO, } ptl_msg_type_t; -/* Each of these structs should start with an odd number of - * __u32, or the compiler could add its own padding and confuse - * everyone. - * - * Also, "length" needs to be at offset 28 of each struct. - */ +/* The variant fields of the portals message header are aligned on an 8 + * byte boundary in the message header. Note that all types used in these + * wire structs MUST be fixed size and the smaller types are placed at the + * end. */ typedef struct ptl_ack { - ptl_size_t mlength; - ptl_handle_wire_t dst_wmd; - ptl_match_bits_t match_bits; - ptl_size_t length; /* common length (0 for acks) moving out RSN */ + ptl_handle_wire_t dst_wmd; + ptl_match_bits_t match_bits; + ptl_size_t mlength; } WIRE_ATTR ptl_ack_t; typedef struct ptl_put { - ptl_pt_index_t ptl_index; - ptl_handle_wire_t ack_wmd; - ptl_match_bits_t match_bits; - ptl_size_t length; /* common length moving out RSN */ - ptl_size_t offset; - ptl_hdr_data_t hdr_data; + ptl_handle_wire_t ack_wmd; + ptl_match_bits_t match_bits; + ptl_hdr_data_t hdr_data; + ptl_pt_index_t ptl_index; + ptl_size_t offset; } WIRE_ATTR ptl_put_t; typedef struct ptl_get { - ptl_pt_index_t ptl_index; - ptl_handle_wire_t return_wmd; - ptl_match_bits_t match_bits; - ptl_size_t length; /* common length (0 for gets) moving out RSN */ - ptl_size_t src_offset; - ptl_size_t return_offset; /* unused: going RSN */ - ptl_size_t sink_length; + ptl_handle_wire_t return_wmd; + ptl_match_bits_t match_bits; + ptl_pt_index_t ptl_index; + ptl_size_t src_offset; + ptl_size_t sink_length; } WIRE_ATTR ptl_get_t; typedef struct ptl_reply { - __u32 unused1; /* unused fields going RSN */ - ptl_handle_wire_t dst_wmd; - ptl_size_t dst_offset; /* unused: going RSN */ - __u32 unused2; - ptl_size_t length; /* common length moving out RSN */ + ptl_handle_wire_t dst_wmd; } WIRE_ATTR ptl_reply_t; +typedef struct ptl_hello { + __u64 incarnation; + __u32 type; +} WIRE_ATTR ptl_hello_t; + typedef struct { - ptl_nid_t dest_nid; - ptl_nid_t src_nid; - ptl_pid_t dest_pid; - ptl_pid_t src_pid; - __u32 type; /* ptl_msg_type_t */ + ptl_nid_t dest_nid; + ptl_nid_t src_nid; + ptl_pid_t dest_pid; + ptl_pid_t src_pid; + __u32 type; /* ptl_msg_type_t */ + __u32 payload_length; /* payload data to follow */ + /*<------__u64 aligned------->*/ union { - ptl_ack_t ack; - ptl_put_t put; - ptl_get_t get; + ptl_ack_t ack; + ptl_put_t put; + ptl_get_t get; ptl_reply_t reply; + ptl_hello_t hello; } msg; } WIRE_ATTR ptl_hdr_t; -/* All length fields in individual unions at same offset */ -/* LASSERT for same in lib-move.c */ -#define PTL_HDR_LENGTH(h) ((h)->msg.ack.length) - /* A HELLO message contains the portals magic number and protocol version * code in the header's dest_nid, the peer's NID in the src_nid, and - * PTL_MSG_HELLO in the type field. All other fields are zero (including - * PTL_HDR_LENGTH; i.e. no payload). + * PTL_MSG_HELLO in the type field. All other common fields are zero + * (including payload_size; i.e. no payload). * This is for use by byte-stream NALs (e.g. TCP/IP) to check the peer is * running the same protocol and to find out its NID, so that hosts with * multiple IP interfaces can have a single NID. These NALs should exchange - * HELLO messages when a connection is first established. */ + * HELLO messages when a connection is first established. + * Individual NALs can put whatever else they fancy in ptl_hdr_t::msg. + */ typedef struct { __u32 magic; /* PORTALS_PROTO_MAGIC */ __u16 version_major; /* increment on incompatible change */ @@ -129,7 +125,7 @@ typedef struct { #define PORTALS_PROTO_MAGIC 0xeebc0ded #define PORTALS_PROTO_VERSION_MAJOR 0 -#define PORTALS_PROTO_VERSION_MINOR 1 +#define PORTALS_PROTO_VERSION_MINOR 3 typedef struct { long recv_count, recv_length, send_count, send_length, drop_count, diff --git a/lnet/klnds/Makefile.am b/lnet/klnds/Makefile.am index fed2785..25aab9d 100644 --- a/lnet/klnds/Makefile.am +++ b/lnet/klnds/Makefile.am @@ -3,5 +3,5 @@ # This code is issued under the GNU General Public License. # See the file COPYING in this distribution -DIST_SUBDIRS= socknal toenal qswnal gmnal scimacnal -SUBDIRS= socknal toenal @QSWNAL@ @GMNAL@ @SCIMACNAL@ +DIST_SUBDIRS= socknal toenal qswnal gmnal scimacnal ibnal +SUBDIRS= socknal toenal @QSWNAL@ @GMNAL@ @SCIMACNAL@ @IBNAL@ diff --git a/lnet/klnds/iblnd/.cvsignore b/lnet/klnds/iblnd/.cvsignore new file mode 100644 index 0000000..e995588 --- /dev/null +++ b/lnet/klnds/iblnd/.cvsignore @@ -0,0 +1,3 @@ +.deps +Makefile +Makefile.in diff --git a/lnet/klnds/iblnd/Makefile.am b/lnet/klnds/iblnd/Makefile.am new file mode 100644 index 0000000..84818dc --- /dev/null +++ b/lnet/klnds/iblnd/Makefile.am @@ -0,0 +1,10 @@ +include ../../Rules.linux + +MODULE = kibnal +modulenet_DATA = kibnal.o +EXTRA_PROGRAMS = kibnal + + +DEFS = +CPPFLAGS=@CPPFLAGS@ @with_ib@ +kibnal_SOURCES = ibnal.h ibnal.c ibnal_cb.c diff --git a/lnet/klnds/iblnd/ibnal.c b/lnet/klnds/iblnd/ibnal.c new file mode 100644 index 0000000..948badf --- /dev/null +++ b/lnet/klnds/iblnd/ibnal.c @@ -0,0 +1,2146 @@ +/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- + * vim:expandtab:shiftwidth=8:tabstop=8: + * + * Based on ksocknal, qswnal, and gmnal + * + * Copyright (C) 2003 LANL + * Author: HB Chen + * Los Alamos National Lab + * + * Portals is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * Portals is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Portals; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include "ibnal.h" + +// portal handle ID for this IB-NAL +ptl_handle_ni_t kibnal_ni; + +// message send buffer mutex +spinlock_t MSBuf_mutex[NUM_MBUF]; + +// message recv buffer mutex +spinlock_t MRBuf_mutex[NUM_MBUF]; + +// IB-NAL API information +nal_t kibnal_api; + +// nal's private data +kibnal_data_t kibnal_data; + +int ibnal_debug = 0; +VAPI_pd_hndl_t Pd_hndl; +unsigned int Num_posted_recv_buf; + +// registered send buffer list +Memory_buffer_info MSbuf_list[NUM_MBUF]; + +// registered recv buffer list +Memory_buffer_info MRbuf_list[NUM_MBUF]; + +// +// for router +// currently there is no need fo IBA +// +kpr_nal_interface_t kibnal_router_interface = { + kprni_nalid: IBNAL, + kprni_arg: &kibnal_data, + kprni_fwd: kibnal_fwd_packet, // forward data to router + // is router invloving the + // data transmision +}; + + +// Queue-pair list +QP_info QP_list[NUM_QPS]; + +// information associated with a HCA +HCA_info Hca_data; + +// something about HCA +VAPI_hca_hndl_t Hca_hndl; // assume we only use one HCA now +VAPI_hca_vendor_t Hca_vendor; +VAPI_hca_cap_t Hca_cap; +VAPI_hca_port_t Hca_port_1_props; +VAPI_hca_port_t Hca_port_2_props; +VAPI_hca_attr_t Hca_attr; +VAPI_hca_attr_mask_t Hca_attr_mask; +VAPI_cq_hndl_t Cq_RQ_hndl; // CQ's handle +VAPI_cq_hndl_t Cq_SQ_hndl; // CQ's handle +VAPI_cq_hndl_t Cq_hndl; // CQ's handle +Remote_QP_Info L_QP_data; +Remote_QP_Info R_QP_data; + + +// +// forward API +// +int +kibnal_forward(nal_t *nal, + int id, + void *args, + size_t args_len, + void *ret, + size_t ret_len) +{ + kibnal_data_t *knal_data = nal->nal_data; + nal_cb_t *nal_cb = knal_data->kib_cb; + + // ASSERT checking + LASSERT (nal == &kibnal_api); + LASSERT (knal_data == &kibnal_data); + LASSERT (nal_cb == &kibnal_lib); + + // dispatch forward API function + + CDEBUG(D_NET,"kibnal_forward: function id = %d\n", id); + + lib_dispatch(nal_cb, knal_data, id, args, ret); + + CDEBUG(D_TRACE,"IBNAL- Done kibnal_forward\n"); + + return PTL_OK; // always return PTL_OK +} + +// +// lock API +// +void +kibnal_lock(nal_t *nal, unsigned long *flags) +{ + kibnal_data_t *knal_data = nal->nal_data; + nal_cb_t *nal_cb = knal_data->kib_cb; + + // ASSERT checking + LASSERT (nal == &kibnal_api); + LASSERT (knal_data == &kibnal_data); + LASSERT (nal_cb == &kibnal_lib); + + // disable logical interrrupt + nal_cb->cb_cli(nal_cb,flags); + + CDEBUG(D_TRACE,"IBNAL-Done kibnal_lock\n"); + +} + +// +// unlock API +// +void +kibnal_unlock(nal_t *nal, unsigned long *flags) +{ + kibnal_data_t *k = nal->nal_data; + nal_cb_t *nal_cb = k->kib_cb; + + // ASSERT checking + LASSERT (nal == &kibnal_api); + LASSERT (k == &kibnal_data); + LASSERT (nal_cb == &kibnal_lib); + + // enable logical interrupt + nal_cb->cb_sti(nal_cb,flags); + + CDEBUG(D_TRACE,"IBNAL-Done kibnal_unlock"); + +} + +// +// shutdown API +// showdown this network interface +// +int +kibnal_shutdown(nal_t *nal, int ni) +{ + VAPI_ret_t vstat; + kibnal_data_t *k = nal->nal_data; + nal_cb_t *nal_cb = k->kib_cb; + + // assert checking + LASSERT (nal == &kibnal_api); + LASSERT (k == &kibnal_data); + LASSERT (nal_cb == &kibnal_lib); + + // take down this IB network interface + // there is not corresponding cb function to hande this + // do we actually need this one + // reference to IB network interface shutdown + // + + vstat = IB_Close_HCA(); + + if (vstat != VAPI_OK) { + CERROR("Failed to close HCA - %s\n",VAPI_strerror(vstat)); + return (~PTL_OK); + } + + CDEBUG(D_TRACE,"IBNAL- Done kibnal_shutdown\n"); + + return PTL_OK; +} + +// +// yield +// when do we call this yield function +// +void +kibnal_yield( nal_t *nal ) +{ + kibnal_data_t *k = nal->nal_data; + nal_cb_t *nal_cb = k->kib_cb; + + // assert checking + LASSERT (nal == &kibnal_api); + LASSERT (k == &kibnal_data); + LASSERT (nal_cb == &kibnal_lib); + + // check under what condition that we need to + // call schedule() + // who set this need_resched + if (current->need_resched) + schedule(); + + CDEBUG(D_TRACE,"IBNAL-Done kibnal_yield"); + + return; +} + +// +// ibnal init +// +nal_t * +kibnal_init(int interface, // no use here + ptl_pt_index_t ptl_size, + ptl_ac_index_t ac_size, + ptl_pid_t requested_pid // no use here + ) +{ + nal_t *nal = NULL; + nal_cb_t *nal_cb = NULL; + kibnal_data_t *nal_data = NULL; + int rc; + + unsigned int nnids = 1; // number of nids + // do we know how many nodes are in this + // system related to this kib_nid + // + + CDEBUG(D_NET, "kibnal_init:calling lib_init with nid 0x%u\n", + kibnal_data.kib_nid); + + + CDEBUG(D_NET, "kibnal_init: interface [%d], ptl_size [%d], ac_size[%d]\n", + interface, ptl_size, ac_size); + CDEBUG(D_NET, "kibnal_init: &kibnal_lib 0x%X\n", &kibnal_lib); + CDEBUG(D_NET, "kibnal_init: kibnal_data.kib_nid %d\n", kibnal_data.kib_nid); + + rc = lib_init(&kibnal_lib, + kibnal_data.kib_nid, + 0, // process id is set as 0 + nnids, + ptl_size, + ac_size); + + if(rc != PTL_OK) { + CERROR("kibnal_init: Failed lib_init with nid 0x%u, rc=%d\n", + kibnal_data.kib_nid,rc); + } + else { + CDEBUG(D_NET,"kibnal_init: DONE lib_init with nid 0x%x%x\n", + kibnal_data.kib_nid); + } + + return &kibnal_api; + +} + + +// +// called before remove ibnal kernel module +// +void __exit +kibnal_finalize(void) +{ + struct list_head *tmp; + + inter_module_unregister("kibnal_ni"); + + // release resources allocated to this Infiniband network interface + PtlNIFini(kibnal_ni); + + lib_fini(&kibnal_lib); + + IB_Close_HCA(); + + // how much do we need to do here? + list_for_each(tmp, &kibnal_data.kib_list) { + kibnal_rx_t *conn; + conn = list_entry(tmp, kibnal_rx_t, krx_item); + CDEBUG(D_IOCTL, "freeing conn %p\n",conn); + tmp = tmp->next; + list_del(&conn->krx_item); + PORTAL_FREE(conn, sizeof(*conn)); + } + + CDEBUG(D_MALLOC,"done kmem %d\n",atomic_read(&portal_kmemory)); + CDEBUG(D_TRACE,"IBNAL-Done kibnal_finalize\n"); + + return; +} + + +// +// * k_server_thread is a kernel thread +// use a shared memory ro exchange HCA's data with a pthread in user +// address space +// * will be replaced when CM is used to handle communication management +// + +void k_server_thread(Remote_QP_Info *hca_data) +{ + int segment_id; + const int shared_segment_size = sizeof(Remote_QP_Info); + key_t key = HCA_EXCHANGE_SHM_KEY; + unsigned long raddr; + int exchanged_done = NO; + int i; + + Remote_QP_Info *exchange_hca_data; + + long *n; + long *uaddr; + long ret = 0; + + // create a shared memory with pre-agreement key + segment_id = sys_shmget(key, + shared_segment_size, + IPC_CREAT | 0666); + + + // attached to shared memoru + // raddr is pointed to an user address space + // use this address to update shared menory content + ret = sys_shmat(segment_id, 0 , SHM_RND, &raddr); + +#ifdef IBNAL_DEBUG + if(ret >= 0) { + CDEBUG(D_NET,"k_server_thread: Shared memory attach success ret = 0X%d,&raddr" + " 0X%x (*(&raddr))=0x%x \n", ret, &raddr, (*(&raddr))); + printk("k_server_thread: Shared memory attach success ret = 0X%d, &raddr" + " 0X%x (*(&raddr))=0x%x \n", ret, &raddr, (*(&raddr))); + } + else { + CERROR("k_server_thread: Shared memory attach failed ret = 0x%d \n", ret); + printk("k_server_thread: Shared memory attach failed ret = 0x%d \n", ret); + return; + } +#endif + + n = &raddr; + uaddr = *n; // get the U-address + /* cast uaddr to exchange_hca_data */ + exchange_hca_data = (Remote_QP_Info *) uaddr; + + /* copy data from local HCA to shared memory */ + exchange_hca_data->opcode = hca_data->opcode; + exchange_hca_data->length = hca_data->length; + + for(i=0; i < NUM_QPS; i++) { + exchange_hca_data->dlid[i] = hca_data->dlid[i]; + exchange_hca_data->rqp_num[i] = hca_data->rqp_num[i]; + } + + // periodically check shared memory until get updated + // remote HCA's data from user mode pthread + while(exchanged_done == NO) { + if(exchange_hca_data->opcode == RECV_QP_INFO){ + exchanged_done = YES; + /* copy data to local buffer from shared memory */ + hca_data->opcode = exchange_hca_data->opcode; + hca_data->length = exchange_hca_data->length; + + for(i=0; i < NUM_QPS; i++) { + hca_data->dlid[i] = exchange_hca_data->dlid[i]; + hca_data->rqp_num[i] = exchange_hca_data->rqp_num[i]; + } + break; + } + else { + schedule_timeout(1000); + } + } + + // detached shared memory + sys_shmdt(uaddr); + + CDEBUG(D_NET, "Exit from kernel thread: k_server_thread \n"); + printk("Exit from kernel thread: k_server_thread \n"); + + return; + +} + +// +// create QP +// +VAPI_ret_t +create_qp(QP_info *qp, int qp_index) +{ + + VAPI_ret_t vstat; + VAPI_qp_init_attr_t qp_init_attr; + VAPI_qp_prop_t qp_prop; + + qp->hca_hndl = Hca_hndl; + qp->port = 1; // default + qp->slid = Hca_port_1_props.lid; + qp->hca_port = Hca_port_1_props; + + + /* Queue Pair Creation Attributes */ + qp_init_attr.cap.max_oust_wr_rq = NUM_WQE; + qp_init_attr.cap.max_oust_wr_sq = NUM_WQE; + qp_init_attr.cap.max_sg_size_rq = NUM_SG; + qp_init_attr.cap.max_sg_size_sq = NUM_SG; + qp_init_attr.pd_hndl = qp->pd_hndl; + qp_init_attr.rdd_hndl = 0; + qp_init_attr.rq_cq_hndl = qp->rq_cq_hndl; + /* we use here polling */ + //qp_init_attr.rq_sig_type = VAPI_SIGNAL_REQ_WR; + qp_init_attr.rq_sig_type = VAPI_SIGNAL_ALL_WR; + qp_init_attr.sq_cq_hndl = qp->sq_cq_hndl; + /* we use here polling */ + //qp_init_attr.sq_sig_type = VAPI_SIGNAL_REQ_WR; + qp_init_attr.sq_sig_type = VAPI_SIGNAL_ALL_WR; + // transport servce - reliable connection + + qp_init_attr.ts_type = VAPI_TS_RC; + + vstat = VAPI_create_qp(qp->hca_hndl, + &qp_init_attr, + &qp->qp_hndl, &qp_prop); + + if (vstat != VAPI_OK) { + CERROR("Failed creating QP. Return Failed - %s\n",VAPI_strerror(vstat)); + return vstat; + } + + qp->qp_num = qp_prop.qp_num; // the qp number + qp->last_posted_send_id = 0; // user defined work request ID + qp->last_posted_rcv_id = 0; // user defined work request ID + qp->cur_send_outstanding = 0; + qp->cur_posted_rcv_bufs = 0; + qp->snd_rcv_balance = 0; + + CDEBUG(D_OTHER, "create_qp: qp_num = %d, slid = %d, qp_hndl = 0X%X", + qp->qp_num, qp->slid, qp->qp_hndl); + + // initialize spin-lock mutex variables + spin_lock_init(&(qp->snd_mutex)); + spin_lock_init(&(qp->rcv_mutex)); + spin_lock_init(&(qp->bl_mutex)); + spin_lock_init(&(qp->cln_mutex)); + // number of outstanding requests on the send Q + qp->cur_send_outstanding = 0; + // number of posted receive buffers + qp->cur_posted_rcv_bufs = 0; + qp->snd_rcv_balance = 0; + + return(VAPI_OK); + +} + +// +// initialize a UD qp state to RTR and RTS +// +VAPI_ret_t +init_qp_UD(QP_info *qp, int qp_index) +{ + VAPI_qp_attr_t qp_attr; + VAPI_qp_init_attr_t qp_init_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + VAPI_ret_t vstat; + + /* Move from RST to INIT */ + /* Change QP to INIT */ + + CDEBUG(D_OTHER, "Changing QP state to INIT qp-index = %d\n", qp_index); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + qp_attr.qp_state = VAPI_INIT; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.pkey_ix = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX); + + CDEBUG(D_OTHER, "pkey_ix qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.port = qp->port; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT); + + CDEBUG(D_OTHER, "port qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.qkey = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QKEY); + + CDEBUG(D_OTHER, "qkey qp_attr_mask = 0X%x\n", qp_attr_mask); + + /* If I do not set this mask, I get an error from HH. QPM should catch it */ + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from RST to INIT. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + CDEBUG(D_OTHER, "Modifying QP from RST to INIT.\n"); + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* Move from INIT to RTR */ + /* Change QP to RTR */ + CDEBUG(D_OTHER, "Changing QP state to RTR\n"); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + qp_attr.qp_state = VAPI_RTR; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + CDEBUG(D_OTHER, "INIT to RTR- qp_state : qp_attr_mask = 0X%x\n", qp_attr_mask); + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from INIT to RTR. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + CDEBUG(D_OTHER, "Modifying QP from INIT to RTR.\n"); + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* RTR to RTS - Change QP to RTS */ + CDEBUG(D_OTHER, "Changing QP state to RTS\n"); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + qp_attr.qp_state = VAPI_RTS; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + qp_attr.sq_psn = START_SQ_PSN; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_SQ_PSN); + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from RTR to RTS. %s:%s\n", + VAPI_strerror_sym(vstat), + VAPI_strerror(vstat)); + return(vstat); + } + + CDEBUG(D_OTHER, "Modifying QP from RTR to RTS. \n"); + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + // + // a QP is at RTS state NOW + // + + CDEBUG(D_OTHER, "IBNAL- UD qp is at RTS NOW\n"); + + return(vstat); + +} + + + +// +// initialize a RC qp state to RTR and RTS +// RC transport service +// +VAPI_ret_t +init_qp_RC(QP_info *qp, int qp_index) +{ + VAPI_qp_attr_t qp_attr; + VAPI_qp_init_attr_t qp_init_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + VAPI_ret_t vstat; + + /* Move from RST to INIT */ + /* Change QP to INIT */ + + CDEBUG(D_OTHER, "Changing QP state to INIT qp-index = %d\n", qp_index); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + qp_attr.qp_state = VAPI_INIT; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.pkey_ix = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX); + + CDEBUG(D_OTHER, "pkey_ix qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.port = qp->port; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT); + + CDEBUG(D_OTHER, "port qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.remote_atomic_flags = VAPI_EN_REM_WRITE | VAPI_EN_REM_READ; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_REMOTE_ATOMIC_FLAGS); + + CDEBUG(D_OTHER, "remote_atomic_flags qp_attr_mask = 0X%x\n", qp_attr_mask); + + /* If I do not set this mask, I get an error from HH. QPM should catch it */ + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from RST to INIT. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* Move from INIT to RTR */ + /* Change QP to RTR */ + CDEBUG(D_OTHER, "Changing QP state to RTR qp_indexi %d\n", qp_index); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + qp_attr.qp_state = VAPI_RTR; + + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.av.sl = 0;/* RESPONDER_SL */ + qp_attr.av.grh_flag = FALSE; + qp_attr.av.dlid = qp->dlid;/*RESPONDER_LID;*/ + qp_attr.av.static_rate = 0; + qp_attr.av.src_path_bits = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_AV); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.path_mtu = MTU_2048;// default is MTU_2048 + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PATH_MTU); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.rq_psn = START_RQ_PSN; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RQ_PSN); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.qp_ous_rd_atom = NUM_WQE; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_OUS_RD_ATOM); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.pkey_ix = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.min_rnr_timer = 10; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_MIN_RNR_TIMER); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.dest_qp_num = qp->rqp_num; + + CDEBUG(D_OTHER, "remore qp num %d\n", qp->rqp_num); + + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_DEST_QP_NUM); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from INIT to RTR. qp_index %d - %s\n", + qp_index, VAPI_strerror(vstat)); + return(vstat); + } + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* RTR to RTS - Change QP to RTS */ + CDEBUG(D_OTHER, "Changing QP state to RTS\n"); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + qp_attr.qp_state = VAPI_RTS; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + qp_attr.sq_psn = START_SQ_PSN; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_SQ_PSN); + + qp_attr.timeout = 0x18; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_TIMEOUT); + + qp_attr.retry_count = 10; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RETRY_COUNT); + + qp_attr.rnr_retry = 14; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RNR_RETRY); + + qp_attr.ous_dst_rd_atom = 100; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_OUS_DST_RD_ATOM); + + qp_attr.min_rnr_timer = 5; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_MIN_RNR_TIMER); + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from RTR to RTS. %s:%s\n", + VAPI_strerror_sym(vstat), VAPI_strerror(vstat)); + return(vstat); + } + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + // + // a QP is at RTS state NOW + // + + CDEBUG(D_OTHER, "IBNAL- RC qp is at RTS NOW\n"); + + return(vstat); +} + + + +VAPI_ret_t +IB_Open_HCA(kibnal_data_t *kib_data) +{ + + VAPI_ret_t vstat; + VAPI_cqe_num_t cqe_active_num; + QP_info *qp; + int i; + int Num_posted_recv_buf; + + /* Open HCA */ + CDEBUG(D_PORTALS, "Opening an HCA\n"); + + vstat = VAPI_open_hca(HCA_ID, &Hca_hndl); + vstat = EVAPI_get_hca_hndl(HCA_ID, &Hca_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed opening the HCA: %s. %s...\n",HCA_ID,VAPI_strerror(vstat)); + return(vstat); + } + + /* Get HCA CAP */ + vstat = VAPI_query_hca_cap(Hca_hndl, &Hca_vendor, &Hca_cap); + if (vstat != VAPI_OK) { + CERROR("Failed query hca cap %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* Get port 1 info */ + vstat = VAPI_query_hca_port_prop(Hca_hndl, HCA_PORT_1 , &Hca_port_1_props); + if (vstat != VAPI_OK) { + CERROR("Failed query port cap %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* Get port 2 info */ + vstat = VAPI_query_hca_port_prop(Hca_hndl, HCA_PORT_2, &Hca_port_2_props); + if (vstat != VAPI_OK) { + CERROR("Failed query port cap %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + // Get a PD + CDEBUG(D_PORTALS, "Allocating PD \n"); + vstat = VAPI_alloc_pd(Hca_hndl,&Pd_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed allocating a PD. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + vstat = createMemRegion(Hca_hndl, Pd_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed registering a memory region.%s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* Create CQ for RQ*/ + CDEBUG(D_PORTALS, "Creating a send completion queue\n"); + + vstat = VAPI_create_cq(Hca_hndl, + NUM_CQE, + &Cq_hndl, + &cqe_active_num); + + if (vstat != VAPI_OK) { + CERROR("Failed creating a CQ. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + if(NUM_CQE == cqe_active_num) { + CERROR("VAPI_create_cq: NUM_CQE EQ cqe_active_num \n"); + } + else { + CDEBUG(D_NET, "VAPI_create_cq: NUM_CQE %d , actual cqe_active_num %d \n", + NUM_CQE, cqe_active_num); + } + + Cq_SQ_hndl = Cq_hndl; + Cq_RQ_hndl = Cq_hndl; + + // + // create QPs + // + for(i=0; i < NUM_QPS; i++) { + QP_list[i].pd_hndl = Pd_hndl; + QP_list[i].hca_hndl = Hca_hndl; + // sq rq use the same Cq_hndl + QP_list[i].sq_cq_hndl = Cq_hndl; + QP_list[i].rq_cq_hndl = Cq_hndl; + vstat = create_qp(&QP_list[i], i); + if (vstat != VAPI_OK) { + CERROR("Failed creating a QP %d %s\n",i, VAPI_strerror(vstat)); + return(vstat); + } + } + + // + // record HCA data + // + + Hca_data.hca_hndl = Hca_hndl; // HCA handle + Hca_data.pd_hndl = Pd_hndl; // protection domain + Hca_data.port = 1; // port number + Hca_data.num_qp = NUM_QPS; // number of qp used + + for(i=0; i < NUM_QPS; i++) { + Hca_data.qp_ptr[i] = &QP_list[i]; // point to QP_list + } + + Hca_data.num_cq = NUM_CQ; // number of cq used + Hca_data.cq_hndl = Cq_hndl; // + Hca_data.sq_cq_hndl = Cq_SQ_hndl; // + Hca_data.rq_cq_hndl = Cq_RQ_hndl; // + Hca_data.kib_data = kib_data; // + Hca_data.slid = QP_list[0].slid;// + + // prepare L_QP_data + +#ifdef USE_SHARED_MEMORY_AND_SOCKET + + /* + * + use a shared-memory between a user thread and a kernel thread + * for HCA's data exchange on the same node + * + use socket in user mode to exhange HCA's data with a remote node + */ + + + R_QP_data.opcode = SEND_QP_INFO; + R_QP_data.length = sizeof(L_QP_data); + + for(i=0; i < NUM_QPS; i++) { + // my slid will be used in a remote node as dlid + R_QP_data.dlid[i] = QP_list[i].slid; + // my qp_num will be used in remode node as remote_qp_number + // RC is used here so we need dlid and rqp_num + R_QP_data.rqp_num[i] = QP_list[i].qp_num ; + } + + // create a kernel thread for exchanging HCA's data + // R_QP_data will be exchanged with a remoe node + + kernel_thread(k_server_thread, &R_QP_data, 0); // + // check if the HCA'data have been updated by kernel_thread + // loop until the HCA's data is updated + // make sure that uagent is running + + // QP info is exchanged with a remote node + while (1) { + schedule_timeout(1000); + if(R_QP_data.opcode == RECV_QP_INFO) { + CDEBUG(D_NET, "HCA's data is being updated\n"); + break; + } + } + +#endif + +#ifdef USE_SHARED_MEMORY_AND_MULTICAST + + /* + * + use a shared-memory between a user thread and a kernel thread + * for HCA's data exchange on the same node + * + use Infinoband UR/multicast in user mode to exhange HCA's data with i + * a remote node + */ + + // use CM, opemSM + +#endif + + // + for(i=0; i < NUM_QPS; i++) { + qp = (QP_info *) &QP_list[i]; + QP_list[i].rqp_num = R_QP_data.rqp_num[i]; // remoter qp number + QP_list[i].dlid = R_QP_data.dlid[i]; // remote dlid + } + + // already have remote_qp_num adn dlid information + // initialize QP to RTR/RTS state + // + for(i=0; i < NUM_QPS; i++) { + vstat = init_qp_RC(&QP_list[i], i); + if (vstat != VAPI_OK) { + CERROR("Failed change a QP %d to RTS state%s\n", + i,VAPI_strerror(vstat)); + return(vstat); + } + } + + // post receiving buffer before any send happened + + Num_posted_recv_buf = post_recv_bufs( (VAPI_wr_id_t ) START_RECV_WRQ_ID); + + // for irregular completion event or some unexpected failure event + vstat = IB_Set_Async_Event_Handler(Hca_data, &kibnal_data); + if (vstat != VAPI_OK) { + CERROR("IB_Set_Async_Event_Handler failed: %d\n", vstat); + return vstat; + } + + + CDEBUG(D_PORTALS, "IBNAL- done with IB_Open_HCA\n"); + + for(i=0; i < NUM_MBUF; i++) { + spin_lock_init(&MSB_mutex[i]); + } + + return(VAPI_OK); + +} + + +/* + Function: IB_Set_Event_Handler() + + IN Hca_info hca_data + IN kibnal_data_t *kib_data -- private data + OUT NONE + + return: VAPI_OK - success + else - fail + +*/ + +VAPI_ret_t +IB_Set_Event_Handler(HCA_info hca_data, kibnal_data_t *kib_data) +{ + VAPI_ret_t vstat; + EVAPI_compl_handler_hndl_t comp_handler_hndl; + + // register CQE_Event_Hnadler + // VAPI function + vstat = VAPI_set_comp_event_handler(hca_data.hca_hndl, + CQE_event_handler, + &hca_data); + + /* + or use extended VAPI function + vstat = EVAPI_set_comp_eventh(hca_data.hca_hndl, + hca_data.cq_hndl, + CQE_event_handler, + &hca_data, + &comp_handler_hndl + ); + */ + + if (vstat != VAPI_OK) { + CERROR("IB_Set_Event_Handler: failed EVAPI_set_comp_eventh for" + " HCA ID = %s (%s).\n", HCA_ID, VAPI_strerror(vstat)); + return vstat; + } + + // issue a request for completion ievent notification + vstat = VAPI_req_comp_notif(hca_data.hca_hndl, + hca_data.cq_hndl, + VAPI_NEXT_COMP); + + if (vstat != VAPI_OK) { + CERROR("IB_Set_Event_Handler: failed VAPI_req_comp_notif for HCA ID" + " = %s (%s).\n", HCA_ID, VAPI_strerror(vstat)); + } + + return vstat; +} + + + +/* + Function: IB_Set_Async_Event_Handler() + + IN HCA_info hca_data + IN kibnal_data_t *kib_data -- private data + OUT NONE + + return: VAPI_OK - success + else - fail + +*/ + + +VAPI_ret_t +IB_Set_Async_Event_Handler(HCA_info hca_data, kibnal_data_t *kib_data) +{ + VAPI_ret_t vstat; + + // + // register an asynchronous event handler for this HCA + // + + vstat= VAPI_set_async_event_handler(hca_data.hca_hndl, + async_event_handler, + kib_data); + + if (vstat != VAPI_OK) { + CERROR("IB_Set_Async_Event_Handler: failed VAPI_set_async_comp_event_handler" + " for HCA ID = %s (%s).\n", HCA_ID, VAPI_strerror(vstat)); + } + + return vstat; +} + +// +// IB_Close_HCA +// close this Infiniband HCA interface +// release allocated resources to system +// +VAPI_ret_t +IB_Close_HCA(void ) +{ + + VAPI_ret_t vstat; + int ok = 1; + int i; + + /* Destroy QP */ + CDEBUG(D_PORTALS, "Destroying QP\n"); + + for(i=0; i < NUM_QPS; i++) { + vstat = VAPI_destroy_qp(QP_list[i].hca_hndl, QP_list[i].qp_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed destroying QP %d. %s\n", i, VAPI_strerror(vstat)); + ok = 0; + } + } + + if (ok) { + /* Destroy CQ */ + CDEBUG(D_PORTALS, "Destroying CQ\n"); + for(i=0; i < NUM_QPS; i++) { + // send_cq adn receive_cq are shared the same CQ + // so only destroy one of them + vstat = VAPI_destroy_cq(QP_list[i].hca_hndl, QP_list[i].sq_cq_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed destroying CQ %d. %s\n", i, VAPI_strerror(vstat)); + ok = 0; + } + } + } + + if (ok) { + /* Destroy Memory Region */ + CDEBUG(D_PORTALS, "Deregistering MR\n"); + for(i=0; i < NUM_QPS; i++) { + vstat = deleteMemRegion(&QP_list[i], i); + if (vstat != VAPI_OK) { + CERROR("Failed deregister mem reg %d. %s\n",i, VAPI_strerror(vstat)); + ok = 0; + break; + } + } + } + + if (ok) { + // finally + /* Close HCA */ + CDEBUG(D_PORTALS, "Closing HCA\n"); + vstat = VAPI_close_hca(Hca_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed to close HCA. %s\n", VAPI_strerror(vstat)); + ok = 0; + } + } + + CDEBUG(D_PORTALS, "IBNAL- Done with closing HCA \n"); + + return vstat; +} + + +VAPI_ret_t +createMemRegion(VAPI_hca_hndl_t hca_hndl, + VAPI_pd_hndl_t pd_hndl) +{ + VAPI_ret_t vstat; + VAPI_mrw_t mrw; + VAPI_mrw_t rep_mr; + VAPI_mr_hndl_t rep_mr_hndl; + int buf_size; + char *bufptr; + int i; + + // send registered memory region + for(i=0; i < NUM_ENTRY; i++) { + MSbuf_list[i].buf_size = KB_32; + PORTAL_ALLOC(bufptr, MSbuf_list[i].buf_size); + if(bufptr == NULL) { + CDEBUG(D_MALLOC,"Failed to malloc a block of send memory, qix %d size %d\n", + i, MSbuf_list[i].buf_size); + CERROR("Failed to malloc a block of send memory, qix %d size %d\n", + i, MSbuf_list[i].buf_size); + return(VAPI_ENOMEM); + } + + mrw.type = VAPI_MR; + mrw.pd_hndl= pd_hndl; + mrw.start = MSbuf_list[i].buf_addr = (VAPI_virt_addr_t)(MT_virt_addr_t) bufptr; + mrw.size = MSbuf_list[i].buf_size; + mrw.acl = VAPI_EN_LOCAL_WRITE | + VAPI_EN_REMOTE_WRITE | + VAPI_EN_REMOTE_READ; + + // register send memory region + vstat = VAPI_register_mr(hca_hndl, + &mrw, + &rep_mr_hndl, + &rep_mr); + + // this memory region is going to be reused until deregister is called + if(vstat != VAPI_OK) { + CERROR("Failed registering a mem region qix %d Addr=%p, Len=%d. %s\n", + i, mrw.start, mrw.size, VAPI_strerror(vstat)); + return(vstat); + } + + MSbuf_list[i].mr = rep_mr; + MSbuf_list[i].mr_hndl = rep_mr_hndl; + MSbuf_list[i].bufptr = bufptr; + MSbuf_list[i].buf_addr = rep_mr.start; + MSbuf_list[i].status = BUF_REGISTERED; + MSbuf_list[i].ref_count = 0; + MSbuf_list[i].buf_type = REG_BUF; + MSbuf_list[i].raddr = 0x0; + MSbuf_list[i].rkey = 0x0; + } + + // RDAM buffer is not reserved for RDAM WRITE/READ + + for(i=NUM_ENTRY; i< NUM_MBUF; i++) { + MSbuf_list[i].status = BUF_UNREGISTERED; + MSbuf_list[i].buf_type = RDMA_BUF; + } + + + // recv registered memory region + for(i=0; i < NUM_ENTRY; i++) { + MRbuf_list[i].buf_size = KB_32; + PORTAL_ALLOC(bufptr, MRbuf_list[i].buf_size); + + if(bufptr == NULL) { + CDEBUG(D_MALLOC, "Failed to malloc a block of send memory, qix %d size %d\n", + i, MRbuf_list[i].buf_size); + return(VAPI_ENOMEM); + } + + mrw.type = VAPI_MR; + mrw.pd_hndl= pd_hndl; + mrw.start = (VAPI_virt_addr_t)(MT_virt_addr_t) bufptr; + mrw.size = MRbuf_list[i].buf_size; + mrw.acl = VAPI_EN_LOCAL_WRITE | + VAPI_EN_REMOTE_WRITE | + VAPI_EN_REMOTE_READ; + + // register send memory region + vstat = VAPI_register_mr(hca_hndl, + &mrw, + &rep_mr_hndl, + &rep_mr); + + // this memory region is going to be reused until deregister is called + if(vstat != VAPI_OK) { + CERROR("Failed registering a mem region qix %d Addr=%p, Len=%d. %s\n", + i, mrw.start, mrw.size, VAPI_strerror(vstat)); + return(vstat); + } + + MRbuf_list[i].mr = rep_mr; + MRbuf_list[i].mr_hndl = rep_mr_hndl; + MRbuf_list[i].bufptr = bufptr; + MRbuf_list[i].buf_addr = rep_mr.start; + MRbuf_list[i].status = BUF_REGISTERED; + MRbuf_list[i].ref_count = 0; + MRbuf_list[i].buf_type = REG_BUF; + MRbuf_list[i].raddr = 0x0; + MRbuf_list[i].rkey = rep_mr.r_key; + MRbuf_list[i].lkey = rep_mr.l_key; + + } + + // keep extra information for a qp + for(i=0; i < NUM_QPS; i++) { + QP_list[i].mr_hndl = MSbuf_list[i].mr_hndl; + QP_list[i].mr = MSbuf_list[i].mr; + QP_list[i].bufptr = MSbuf_list[i].bufptr; + QP_list[i].buf_addr = MSbuf_list[i].buf_addr; + QP_list[i].buf_size = MSbuf_list[i].buf_size; + QP_list[i].raddr = MSbuf_list[i].raddr; + QP_list[i].rkey = MSbuf_list[i].rkey; + QP_list[i].lkey = MSbuf_list[i].lkey; + } + + CDEBUG(D_PORTALS, "IBNAL- done VAPI_ret_t createMemRegion \n"); + + return vstat; + +} /* createMemRegion */ + + + +VAPI_ret_t +deleteMemRegion(QP_info *qp, int qix) +{ + VAPI_ret_t vstat; + + // + // free send memory assocaited with this memory region + // + PORTAL_FREE(MSbuf_list[qix].bufptr, MSbuf_list[qix].buf_size); + + // de-register it + vstat = VAPI_deregister_mr(qp->hca_hndl, MSbuf_list[qix].mr_hndl); + + if(vstat != VAPI_OK) { + CERROR("Failed deregistering a send mem region qix %d %s\n", + qix, VAPI_strerror(vstat)); + return vstat; + } + + // + // free recv memory assocaited with this memory region + // + PORTAL_FREE(MRbuf_list[qix].bufptr, MRbuf_list[qix].buf_size); + + // de-register it + vstat = VAPI_deregister_mr(qp->hca_hndl, MRbuf_list[qix].mr_hndl); + + if(vstat != VAPI_OK) { + CERROR("Failed deregistering a recv mem region qix %d %s\n", + qix, VAPI_strerror(vstat)); + return vstat; + } + + return vstat; +} + + +// +// polling based event handling +// + a daemon process +// + poll the CQ and check what is in the CQ +// + process incoming CQ event +// + +// + + +RDMA_Info_Exchange Rdma_info; +int Cts_Message_arrived = NO; + +void k_recv_thread(HCA_info *hca_data) +{ + VAPI_ret_t vstat; + VAPI_wc_desc_t comp_desc; + unsigned long polling_count = 0; + u_int32_t timeout_usec; + unsigned int priority = 100; + unsigned int length; + VAPI_wr_id_t wrq_id; + u_int32_t transferred_data_length; /* Num. of bytes transferred */ + void *bufdata; + VAPI_virt_addr_t bufaddr; + unsigned long buf_size = 0; + QP_info *qp; // point to QP_list + + kportal_daemonize("k_recv_thread"); // make it as a daemon process + + // tuning variable + timeout_usec = 100; // how is the impact on the performance + + // send Q and receive Q are using the same CQ + // so only poll one CQ for both operations + + CDEBUG(D_NET, "IBNAL- enter kibnal_recv_thread\n"); + CDEBUG(D_NET, "hca_hndl = 0X%x, cq_hndl=0X%x\n", + hca_data->hca_hndl,hca_data->cq_hndl); + + qp = hca_data->qp_ptr; + if(qp == NULL) { + CDEBUG(D_NET, "in recv_thread qp is NULL\n"); + CDEBUG(D_NET, "Exit from recv_thread qp is NULL\n"); + return; + } + else { + CDEBUG(D_NET, "in recv_thread qp is 0X%X\n", qp); + } + + CDEBUG(D_NET, "kibnal_recv_thread - enter event driver polling loop\n"); + + // + // use event driver + // + + + + while(1) { + polling_count++; + + // + // send Q and receive Q are using the same CQ + // so only poll one CQ for both operations + // + + vstat = VAPI_poll_cq(hca_data->hca_hndl,hca_data->cq_hndl, &comp_desc); + + if (vstat == VAPI_CQ_EMPTY) { + // there is no event in CQE + continue; + } + else { + if (vstat != (VAPI_OK)) { + CERROR("error while polling completion queuei vstat %d \n", vstat); + return; + } + } + + // process the complete event + switch(comp_desc.opcode) { + case VAPI_CQE_SQ_SEND_DATA: + // about the Send Q ,POST SEND completion + // who needs this information + // get wrq_id + // mark MSbuf_list[wr_id].status = BUF_REGISTERED + + wrq_id = comp_desc.id; + + if(RDMA_OP_ID < wrq_id) { + // this RDMA message id, adjust it to the right entry + wrq_id = wrq_id - RDMA_OP_ID; + vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.send_rdma_mr_hndl); + } + + if(vstat != VAPI_OK) { + CERROR("VAPI_CQE_SQ_SEND_DATA: Failed deregistering a RDMAi recv" " mem region %s\n", VAPI_strerror(vstat)); + } + + if((RDMA_CTS_ID <= wrq_id) && (RDMA_OP_ID < wrq_id)) { + // RTS or CTS send complete, release send buffer + if(wrq_id >= RDMA_RTS_ID) + wrq_id = wrq_id - RDMA_RTS_ID; + else + wrq_id = wrq_id - RDMA_CTS_ID; + } + + spin_lock(&MSB_mutex[(int) wrq_id]); + MRbuf_list[wrq_id].status = BUF_REGISTERED; + spin_unlock(&MSB_mutex[(int) wrq_id]); + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_SEND_DATA\n"); + break; + + case VAPI_CQE_SQ_RDMA_WRITE: + // about the Send Q, RDMA write completion + // who needs this information + // data is successfully write from pource to destionation + + // get wr_id + // mark MSbuf_list[wr_id].status = BUF_REGISTERED + // de-register rdma buffer + // + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_RDMA_WRITE\n"); + break; + + case VAPI_CQE_SQ_RDMA_READ: + // about the Send Q + // RDMA read completion + // who needs this information + // data is successfully read from destionation to source + CDEBUG(D_NET, "CQE opcode- VAPI_CQE_SQ_RDMA_READ\n"); + break; + + case VAPI_CQE_SQ_COMP_SWAP: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_COMP_SWAP\n"); + break; + + case VAPI_CQE_SQ_FETCH_ADD: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_FETCH_ADD\n"); + break; + + case VAPI_CQE_SQ_BIND_MRW: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_BIND_MRW\n"); + break; + + case VAPI_CQE_RQ_SEND_DATA: + // about the Receive Q + // process the incoming data and + // forward it to ..... + // a completion recevie event is arriving at CQ + // issue a recevie to get this arriving data out from CQ + // pass the receiving data for further processing + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_SEND_DATA\n"); + wrq_id = comp_desc.id ; + transferred_data_length = comp_desc.byte_len; + + if((wrq_id >= RDMA_CTS_ID) && (wrq_id < RDMA_OP_ID)) { + // this is RTS/CTS message + // process it locally and don't pass it to portals layer + // adjust wrq_id to get the right entry in MRbfu_list + + if(wrq_id >= RDMA_RTS_ID) + wrq_id = wrq_id - RDMA_RTS_ID; + else + wrq_id = wrq_id - RDMA_CTS_ID; + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[wrq_id].buf_addr; + MRbuf_list[wrq_id].status = BUF_INUSE; + memcpy(&Rdma_info, &bufaddr, sizeof(RDMA_Info_Exchange)); + + if(Ready_To_send == Rdma_info.opcode) + // an RTS request message from remote node + // prepare local RDMA buffer and send local rdma info to + // remote node + CTS_handshaking_protocol(&Rdma_info); + else + if((Clear_To_send == Rdma_info.opcode) && + (RDMA_BUFFER_RESERVED == Rdma_info.flag)) + Cts_Message_arrived = YES; + else + if(RDMA_BUFFER_UNAVAILABLE == Rdma_info.flag) + CERROR("RDMA operation abort-RDMA_BUFFER_UNAVAILABLE\n"); + } + else { + // + // this is an incoming mesage for portals layer + // move to PORTALS layer for further processing + // + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) + MRbuf_list[wrq_id].buf_addr; + + MRbuf_list[wrq_id].status = BUF_INUSE; + transferred_data_length = comp_desc.byte_len; + + kibnal_rx(hca_data->kib_data, + bufaddr, + transferred_data_length, + MRbuf_list[wrq_id].buf_size, + priority); + } + + // repost this receiving buffer and makr it at BUF_REGISTERED + + vstat = repost_recv_buf(qp, wrq_id); + if(vstat != (VAPI_OK)) { + CERROR("error while polling completion queue\n"); + } + else { + MRbuf_list[wrq_id].status = BUF_REGISTERED; + } + + break; + + case VAPI_CQE_RQ_RDMA_WITH_IMM: + // about the Receive Q + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n"); + + wrq_id = comp_desc.id ; + transferred_data_length = comp_desc.byte_len; + + if(wrq_id == RDMA_OP_ID) { + // this is RDAM op , locate the RDAM memory buffer address + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) Local_rdma_info.raddr; + + transferred_data_length = comp_desc.byte_len; + + kibnal_rx(hca_data->kib_data, + bufaddr, + transferred_data_length, + Local_rdma_info.buf_length, + priority); + + // de-regiser this RDAM receiving memory buffer + // too early ?? test & check + vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.recv_rdma_mr_hndl); + if(vstat != VAPI_OK) { + CERROR("VAPI_CQE_RQ_RDMA_WITH_IMM: Failed deregistering a RDMA" + " recv mem region %s\n", VAPI_strerror(vstat)); + } + } + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n"); + break; + + case VAPI_CQE_INVAL_OPCODE: + // + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_INVAL_OPCODE\n"); + break; + + default : + CDEBUG(D_NET, "CQE opcode-unknown opcode\n"); + break; + } // switch + + schedule_timeout(RECEIVING_THREAD_TIMEOUT);//how often do we need to poll CQ + + }// receiving while loop + + +} + + +void CQE_event_handler(VAPI_hca_hndl_t hca_hndl, + VAPI_cq_hndl_t cq_hndl, + void *private) +{ + VAPI_ret_t vstat; + VAPI_wc_desc_t comp_desc; + unsigned long polling_count = 0; + u_int32_t timeout_usec; + unsigned int priority = 100; + unsigned int length; + VAPI_wr_id_t wrq_id; + u_int32_t transferred_data_length; /* Num. of bytes transferred */ + void *bufdata; + VAPI_virt_addr_t bufaddr; + unsigned long buf_size = 0; + QP_info *qp; // point to QP_list + HCA_info *hca_data; + + // send Q and receive Q are using the same CQ + // so only poll one CQ for both operations + + CDEBUG(D_NET, "IBNAL- enter CQE_event_handler\n"); + printk("IBNAL- enter CQE_event_handler\n"); + + hca_data = (HCA_info *) private; + + // + // use event driven + // + + + vstat = VAPI_poll_cq(hca_data->hca_hndl,hca_data->cq_hndl, &comp_desc); + + if (vstat == VAPI_CQ_EMPTY) { + CDEBUG(D_NET, "CQE_event_handler: there is no event in CQE, how could" + " this " "happened \n"); + printk("CQE_event_handler: there is no event in CQE, how could" + " this " "happened \n"); + + } + else { + if (vstat != (VAPI_OK)) { + CDEBUG(D_NET, "error while polling completion queue vstat %d - %s\n", + vstat, VAPI_strerror(vstat)); + printk("error while polling completion queue vstat %d - %s\n", + vstat, VAPI_strerror(vstat)); + return; + } + } + + // process the complete event + switch(comp_desc.opcode) { + case VAPI_CQE_SQ_SEND_DATA: + // about the Send Q ,POST SEND completion + // who needs this information + // get wrq_id + // mark MSbuf_list[wr_id].status = BUF_REGISTERED + + wrq_id = comp_desc.id; + +#ifdef IBNAL_SELF_TESTING + if(wrq_id == SEND_RECV_TEST_ID) { + printk("IBNAL_SELF_TESTING - VAPI_CQE_SQ_SEND_DATA \n"); + } +#else + if(RDMA_OP_ID < wrq_id) { + // this RDMA message id, adjust it to the right entry + wrq_id = wrq_id - RDMA_OP_ID; + vstat = VAPI_deregister_mr(qp->hca_hndl, + Local_rdma_info.send_rdma_mr_hndl); + } + + if(vstat != VAPI_OK) { + CERROR(" VAPI_CQE_SQ_SEND_DATA: Failed deregistering a RDMA" + " recv mem region %s\n", VAPI_strerror(vstat)); + } + + if((RDMA_CTS_ID <= wrq_id) && (RDMA_OP_ID < wrq_id)) { + // RTS or CTS send complete, release send buffer + if(wrq_id >= RDMA_RTS_ID) + wrq_id = wrq_id - RDMA_RTS_ID; + else + wrq_id = wrq_id - RDMA_CTS_ID; + } + + spin_lock(&MSB_mutex[(int) wrq_id]); + MRbuf_list[wrq_id].status = BUF_REGISTERED; + spin_unlock(&MSB_mutex[(int) wrq_id]); +#endif + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_SEND_DATA\n"); + + break; + + case VAPI_CQE_SQ_RDMA_WRITE: + // about the Send Q, RDMA write completion + // who needs this information + // data is successfully write from pource to destionation + + // get wr_id + // mark MSbuf_list[wr_id].status = BUF_REGISTERED + // de-register rdma buffer + // + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_RDMA_WRITE\n"); + break; + + case VAPI_CQE_SQ_RDMA_READ: + // about the Send Q + // RDMA read completion + // who needs this information + // data is successfully read from destionation to source + CDEBUG(D_NET, "CQE opcode- VAPI_CQE_SQ_RDMA_READ\n"); + break; + + case VAPI_CQE_SQ_COMP_SWAP: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_COMP_SWAP\n"); + break; + + case VAPI_CQE_SQ_FETCH_ADD: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_FETCH_ADD\n"); + break; + + case VAPI_CQE_SQ_BIND_MRW: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_BIND_MRW\n"); + break; + + case VAPI_CQE_RQ_SEND_DATA: + // about the Receive Q + // process the incoming data and + // forward it to ..... + // a completion recevie event is arriving at CQ + // issue a recevie to get this arriving data out from CQ + // pass the receiving data for further processing + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_SEND_DATA\n"); + + wrq_id = comp_desc.id ; + +#ifdef IBNAL_SELF_TESTING + + char rbuf[KB_32]; + int i; + + if(wrq_id == SEND_RECV_TEST_ID) { + printk("IBNAL_SELF_TESTING - VAPI_CQE_RQ_SEND_DATA\n"); + } + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) + MRbuf_list[ SEND_RECV_TEST_BUF_ID].buf_addr; + MRbuf_list[SEND_RECV_TEST_BUF_ID].status = BUF_INUSE; + memcpy(&rbuf, &bufaddr, KB_32); + + + for(i=0; i < 16; i++) + printk("rbuf[%d]=%c, ", rbuf[i]); + printk("\n"); + + // repost this receiving buffer and makr it at BUF_REGISTERED + vstat = repost_recv_buf(qp,SEND_RECV_TEST_BUF_ID); + if(vstat != (VAPI_OK)) { + printk("error while polling completion queue\n"); + } + else { + MRbuf_list[SEND_RECV_TEST_BUF_ID].status = BUF_REGISTERED; + } +#else + transferred_data_length = comp_desc.byte_len; + + if((wrq_id >= RDMA_CTS_ID) && (wrq_id < RDMA_OP_ID)) { + // this is RTS/CTS message + // process it locally and don't pass it to portals layer + // adjust wrq_id to get the right entry in MRbfu_list + + if(wrq_id >= RDMA_RTS_ID) + wrq_id = wrq_id - RDMA_RTS_ID; + else + wrq_id = wrq_id - RDMA_CTS_ID; + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) + MRbuf_list[wrq_id].buf_addr; + MRbuf_list[wrq_id].status = BUF_INUSE; + memcpy(&Rdma_info, &bufaddr, sizeof(RDMA_Info_Exchange)); + + if(Ready_To_send == Rdma_info.opcode) + // an RTS request message from remote node + // prepare local RDMA buffer and send local rdma info to + // remote node + CTS_handshaking_protocol(&Rdma_info); + else + if((Clear_To_send == Rdma_info.opcode) && + (RDMA_BUFFER_RESERVED == Rdma_info.flag)) + Cts_Message_arrived = YES; + else + if(RDMA_BUFFER_UNAVAILABLE == Rdma_info.flag) + CERROR("RDMA operation abort-RDMA_BUFFER_UNAVAILABLE\n"); + } + else { + // + // this is an incoming mesage for portals layer + // move to PORTALS layer for further processing + // + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) + MRbuf_list[wrq_id].buf_addr; + + MRbuf_list[wrq_id].status = BUF_INUSE; + transferred_data_length = comp_desc.byte_len; + + kibnal_rx(hca_data->kib_data, + bufaddr, + transferred_data_length, + MRbuf_list[wrq_id].buf_size, + priority); + } + + // repost this receiving buffer and makr it at BUF_REGISTERED + vstat = repost_recv_buf(qp, wrq_id); + if(vstat != (VAPI_OK)) { + CERROR("error while polling completion queue\n"); + } + else { + MRbuf_list[wrq_id].status = BUF_REGISTERED; + } +#endif + + break; + + case VAPI_CQE_RQ_RDMA_WITH_IMM: + // about the Receive Q + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n"); + + wrq_id = comp_desc.id ; + transferred_data_length = comp_desc.byte_len; + + if(wrq_id == RDMA_OP_ID) { + // this is RDAM op , locate the RDAM memory buffer address + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) Local_rdma_info.raddr; + + transferred_data_length = comp_desc.byte_len; + + kibnal_rx(hca_data->kib_data, + bufaddr, + transferred_data_length, + Local_rdma_info.buf_length, + priority); + + // de-regiser this RDAM receiving memory buffer + // too early ?? test & check + vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.recv_rdma_mr_hndl); + if(vstat != VAPI_OK) { + CERROR("VAPI_CQE_RQ_RDMA_WITH_IMM: Failed deregistering a RDMA" + " recv mem region %s\n", VAPI_strerror(vstat)); + } + } + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n"); + break; + + case VAPI_CQE_INVAL_OPCODE: + // + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_INVAL_OPCODE\n"); + break; + + default : + CDEBUG(D_NET, "CQE opcode-unknown opcode\n"); + + break; + } // switch + + // issue a new request for completion ievent notification + vstat = VAPI_req_comp_notif(hca_data->hca_hndl, + hca_data->cq_hndl, + VAPI_NEXT_COMP); + + + if(vstat != VAPI_OK) { + CERROR("PI_req_comp_notif: Failed %s\n", VAPI_strerror(vstat)); + } + + return; // end of event handler + +} + + + +int +kibnal_cmd(struct portal_ioctl_data * data, void * private) +{ + int rc ; + + CDEBUG(D_NET, "kibnal_cmd \n"); + + return YES; +} + + + +void ibnal_send_recv_self_testing(int *my_role) +{ + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + QP_info *qp; + VAPI_wr_id_t send_id; + int buf_id; + char sbuf[KB_32]; + char rbuf[KB_32]; + int i; + int buf_length = KB_32; + VAPI_wc_desc_t comp_desc; + int num_send = 1; + int loop_count = 0; + + // make it as a daemon process + // kportal_daemonize("ibnal_send_recv_self_testing"); + + printk("My role is 0X%X\n", *my_role); + +if(*my_role == TEST_SEND_MESSAGE) { + printk("Enter ibnal_send_recv_self_testing\n"); + + memset(&sbuf, 'a', KB_32); + memset(&rbuf, ' ', KB_32); + + send_id = SEND_RECV_TEST_ID; + buf_id = SEND_RECV_TEST_BUF_ID; + + qp = &QP_list[buf_id]; + + sr_desc.opcode = VAPI_SEND; + sr_desc.comp_type = VAPI_SIGNALED; + sr_desc.id = send_id; + + // scatter and gather info + sr_sg.len = KB_32; + sr_sg.lkey = MSbuf_list[buf_id].mr.l_key; // use send MR + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[buf_id].buf_addr; + + // copy data to register send buffer + memcpy(&sr_sg.addr, &sbuf, buf_length); + + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + /* + // call VAPI_post_sr to send out this data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + printk("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat)); + } + + printk("VAPI_post_sr success.\n"); + */ + + } +else { + printk("I am a receiver and doing nothing here\n"); +} + + printk("ibnal_send_recv_self_testing thread exit \n"); + + return; + +} + + +// +// ibnal initialize process +// +// 1. Bring up Infiniband network interface +// * +// 2. Initialize a PORTALS nal interface +// +// +int __init +kibnal_initialize(void) +{ + int rc; + int ntok; + unsigned long sizemask; + unsigned int nid; + VAPI_ret_t vstat; + + + portals_debug_set_level(IBNAL_DEBUG_LEVEL_1); + + CDEBUG(D_MALLOC, "start kmem %d\n", atomic_read (&portal_kmemory)); + + CDEBUG(D_PORTALS, "kibnal_initialize: Enter kibnal_initialize\n"); + + // set api functional pointers + kibnal_api.forward = kibnal_forward; + kibnal_api.shutdown = kibnal_shutdown; + kibnal_api.yield = kibnal_yield; + kibnal_api.validate = NULL; /* our api validate is a NOOP */ + kibnal_api.lock = kibnal_lock; + kibnal_api.unlock = kibnal_unlock; + kibnal_api.nal_data = &kibnal_data; // this is so called private data + kibnal_api.refct = 1; + kibnal_api.timeout = NULL; + kibnal_lib.nal_data = &kibnal_data; + + memset(&kibnal_data, 0, sizeof(kibnal_data)); + + // initialize kib_list list data structure + INIT_LIST_HEAD(&kibnal_data.kib_list); + + kibnal_data.kib_cb = &kibnal_lib; + + spin_lock_init(&kibnal_data.kib_dispatch_lock); + + + // + // bring up the IB inter-connect network interface + // setup QP, CQ + // + vstat = IB_Open_HCA(&kibnal_data); + + if(vstat != VAPI_OK) { + CERROR("kibnal_initialize: IB_Open_HCA failed: %d- %s\n", + vstat, VAPI_strerror(vstat)); + + printk("kibnal_initialize: IB_Open_HCA failed: %d- %s\n", + vstat, VAPI_strerror(vstat)); + return NO; + } + + kibnal_data.kib_nid = (__u64 )Hca_hndl;//convert Hca_hndl to 64-bit format + kibnal_data.kib_init = 1; + + CDEBUG(D_NET, " kibnal_data.kib_nid 0x%x%x\n", kibnal_data.kib_nid); + printk(" kibnal_data.kib_nid 0x%x%x\n", kibnal_data.kib_nid); + + /* Network interface ready to initialise */ + // get an entery in the PORTALS table for this IB protocol + + CDEBUG(D_PORTALS,"Call PtlNIInit to register this Infiniband Interface\n"); + printk("Call PtlNIInit to register this Infiniband Interface\n"); + + rc = PtlNIInit(kibnal_init, 32, 4, 0, &kibnal_ni); + + if(rc != PTL_OK) { + CERROR("kibnal_initialize: PtlNIInit failed %d\n", rc); + printk("kibnal_initialize: PtlNIInit failed %d\n", rc); + kibnal_finalize(); + return (-ENOMEM); + } + + CDEBUG(D_PORTALS,"kibnal_initialize: PtlNIInit DONE\n"); + printk("kibnal_initialize: PtlNIInit DONE\n"); + + + +#ifdef POLL_BASED_CQE_HANDLING + // create a receiving thread: main loopa + // this is polling based mail loop + kernel_thread(k_recv_thread, &Hca_data, 0); +#endif + +#ifdef EVENT_BASED_CQE_HANDLING + // for completion event handling, this is event based CQE handling + vstat = IB_Set_Event_Handler(Hca_data, &kibnal_data); + + if (vstat != VAPI_OK) { + CERROR("IB_Set_Event_Handler failed: %d - %s \n", + vstat, VAPI_strerror(vstat)); + return vstat; + } + + CDEBUG(D_PORTALS,"IB_Set_Event_Handler Done \n"); + printk("IB_Set_Event_Handler Done \n"); + +#endif + + PORTAL_SYMBOL_REGISTER(kibnal_ni); + +#ifdef IBNAL_SELF_TESTING + // + // test HCA send recv before normal event handling + // + int my_role; + my_role = TEST_SEND_MESSAGE; + + printk("my role is TEST_RECV_MESSAGE\n"); + + // kernel_thread(ibnal_send_recv_self_testing, &my_role, 0); + + ibnal_send_recv_self_testing(&my_role); + +#endif + + return 0; + +} + + + +MODULE_AUTHOR("Hsingbung(HB) Chen "); +MODULE_DESCRIPTION("Kernel Infiniband NAL v0.1"); +MODULE_LICENSE("GPL"); + +module_init (kibnal_initialize); +module_exit (kibnal_finalize); + +EXPORT_SYMBOL(kibnal_ni); + diff --git a/lnet/klnds/iblnd/ibnal.h b/lnet/klnds/iblnd/ibnal.h new file mode 100644 index 0000000..ff5aeb3 --- /dev/null +++ b/lnet/klnds/iblnd/ibnal.h @@ -0,0 +1,564 @@ +#ifndef _IBNAL_H +#define _IBNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG_SUBSYSTEM S_IBNAL + +#include +#include +#include + +// Infiniband VAPI/EVAPI header files +// Mellanox MT23108 VAPI +#include +#include +#include +#include + +// pick a port for this RDMA information exhange between two hosts +#define HOST_PORT 11211 +#define QUEUE_SIZE 1024 +#define HCA_PORT_1 1 +#define HCA_PORT_2 2 +#define DEBUG_SUBSYSTEM S_IBNAL + +#define START_SEND_WRQ_ID 0 +#define START_RECV_WRQ_ID 0 +#define START_RDMA_WRQ_ID 0 + +#define DEFAULT_PRIORITY 100 + +#define WAIT_FOT_R_RDMA_TIMEOUT 10000 +#define MAX_NUM_TRY 3000 + +#define MAX_NUM_POLL 300 +#define MAX_LOOP_COUNT 500 + +#define MAX_GID 32 +#define MCG_BUF_LENGTH 128 + +#define SHARED_SEGMENT_SIZE 0x10000 +#define HCA_EXCHANGE_SHM_KEY 999 // shared memory key for HCA data exchange + +// some internals opcodes for IB operations used in IBNAL +#define SEND_QP_INFO 0X00000001 +#define RECV_QP_INFO 0X00000010 + +// Mellanox InfiniHost MT23108 +// QP/CQ related information +// + +#define MTU_256 1 /* 1-256,2-512,3-1024,4-2048 */ +#define MTU_512 2 /* 1-256,2-512,3-1024,4-2048 */ +#define MTU_1024 3 /* 1-256,2-512,3-1024,4-2048 */ +#define MTU_2048 4 /* 1-256,2-512,3-1024,4-2048 */ + +// number of entries for each CQ and WQ +// how much do we need ? +#define NUM_CQE 1024 +#define NUM_WQE 1024 +#define MAX_OUT_SQ 64 +#define MAX_OUT_RQ 64 + +#define NUM_MBUF 256 +#define NUM_RDMA_RESERVED_ENTRY 128 +#define NUM_QPS 256 + +#define INVALID_WR_ID ((VAPI_wr_id_t) -1) + + +// for Vector IO +// scatter and gather +// Portals can support upto 64 IO-Vectors +// how much do we need ? +#define NUM_SGE 1 +#define NUM_SG 1 +#define NUM_CQ 1 + +#define ONE_KB 1024 +#define ONE_MB 1024 * ONE_KB +#define ONE_GB 1024 * ONE_MB + + +#define KB_4 1024 * 4 +#define KB_8 1024 * 8 +#define KB_16 1024 * 16 +#define KB_32 1024 * 32 +#define KB_64 1024 * 64 +#define KB_128 1024 * 128 +#define KB_256 1024 * 256 + +// 256 entry in registered buffer list +// small size message +#define Num_4_KB 64 +#define Num_8_KB 64 +#define Num_16_KB 40 +#define Num_32_KB 40 +#define Num_64_KB 40 +#define Num_128_KB 4 +#define Num_256_KB 4 + +#define SMALL_MSG_SIZE KB_32 + +#define MAX_MSG_SIZE ONE_MB * 512 + +// 128's 64KB bufer for send +// 128's 64KB bufer for recv +// used in RDAM operation only + +#define NUM_ENTRY 128 + +#define End_4_kb Num_4_KB +#define End_8_kb End_4_kb + Num_8_KB +#define End_16_kb End_8_kb + Num_16_KB +#define End_32_kb End_16_kb + Num_32_KB +#define End_64_kb End_32_kb + Num_64_KB +#define End_128_kb End_64_kb + Num_128_KB +#define End_256_kb End_128_kb+ Num_256_KB + + +#define SEND_BUF_SIZE KB_32 +#define RECV_BUF_SIZE SEND_BUF_SIZE + +// #define POLL_BASED_CQE_HANDLING 1 +#define EVENT_BASED_CQE_HANDLING 1 +#define IBNAL_SELF_TESTING 1 + +#ifdef IBNAL_SELF_TESTING +#undef IBNAL_SELF_TESTING +#endif + + +#define MSG_SIZE_SMALL 1 +#define MSG_SIZE_LARGE 2 + + + +// some defauly configuration values for early testing +#define DEFAULT_DLID 1 // default destination link ID +#define DEFAULT_QP_NUM 4 // default QP number +#define P_KEY 0xFFFF // do we need default value +#define PKEY_IX 0x0 // do we need default value +#define Q_KEY 0x012 // do we need default value +#define L_KEY 0x12345678 // do we need default value +#define R_KEY 0x87654321 // do we need default value +#define HCA_ID "InfiniHost0" // default +#define START_PSN 0 +#define START_SQ_PSN 0 +#define START_RQ_PSN 0 + + +#define __u_long_long unsigned long long + +#define IBNAL_DEBUG 1 + +#define USE_SHARED_MEMORY_AND_SOCKET 1 + +// operation type +#define TRY_SEND_ONLY 1 + +#define YES 1 +#define NO 0 + +// +// a common data structure for IB QP's operation +// each QP is associated with an QP_info structure +// +typedef struct QP_info +{ + VAPI_hca_hndl_t hca_hndl; // HCA handle + IB_port_t port; // port number + VAPI_qp_hndl_t qp_hndl; // QP's handle list + VAPI_qp_state_t qp_state; // QP's current state + VAPI_pd_hndl_t pd_hndl; // protection domain + VAPI_cq_hndl_t cq_hndl; // send-queue CQ's handle + VAPI_cq_hndl_t sq_cq_hndl; // send-queue CQ's handle + VAPI_cq_hndl_t rq_cq_hndl; // receive-queue CQ's handle + VAPI_ud_av_hndl_t av_hndl; // receive-queue CQ's handle + VAPI_qp_init_attr_t qp_init_attr; // QP's init attribute + VAPI_qp_attr_t qp_attr; // QP's attribute - dlid + VAPI_qp_prop_t qp_prop; // QP's propertities + VAPI_hca_port_t hca_port; + VAPI_qp_num_t qp_num; // QP's number + VAPI_qp_num_t rqp_num; // remote QP's number + IB_lid_t slid; + IB_lid_t dlid; + VAPI_gid_t src_gid; + + u_int32_t buf_size; + VAPI_virt_addr_t buf_addr; + char *bufptr; + VAPI_mrw_t mr; + VAPI_mr_hndl_t mr_hndl; + VAPI_virt_addr_t raddr; + VAPI_rkey_t rkey; + VAPI_lkey_t lkey; + + VAPI_wr_id_t last_posted_send_id; // user defined work request ID + VAPI_wr_id_t last_posted_rcv_id; // user defined work request ID + VAPI_mw_hndl_t mw_hndl; // memory window handle + VAPI_rkey_t mw_rkey; // memory window rkey + VAPI_sg_lst_entry_t sg_lst[256]; // scatter and gather list + int sg_list_sz; // set as NUM_SGE + VAPI_wr_id_t wr_id; // + spinlock_t snd_mutex; + spinlock_t rcv_mutex; + spinlock_t bl_mutex; + spinlock_t cln_mutex; + int cur_RDMA_outstanding; + int cur_send_outstanding; + int cur_posted_rcv_bufs; + int snd_rcv_balance; +} QP_info; + + +// buffer status +#define BUF_REGISTERED 0x10000000 +#define BUF_INUSE 0x01000000 +#define BUF_UNREGISTERED 0x00100000 + +// buffer type +#define REG_BUF 0x10000000 +#define RDMA_BUF 0x01000000 + +// +// IMM data +// +#define IMM_000 (0 << 32); +#define IMM_001 (1 << 32); +#define IMM_002 (2 << 32); +#define IMM_003 (3 << 32); +#define IMM_004 (4 << 32); +#define IMM_005 (5 << 32); +#define IMM_006 (6 << 32); +#define IMM_007 (7 << 32); +#define IMM_008 (8 << 32); +#define IMM_009 (9 << 32); +#define IMM_010 (10 << 32); +#define IMM_011 (11 << 32); +#define IMM_012 (12 << 32); +#define IMM_013 (13 << 32); +#define IMM_014 (14 << 32); +#define IMM_015 (15 << 32); +#define IMM_016 (16 << 32); +#define IMM_017 (17 << 32); +#define IMM_018 (18 << 32); +#define IMM_019 (19 << 32); +#define IMM_020 (20 << 32); +#define IMM_021 (21 << 32); +#define IMM_022 (22 << 32); +#define IMM_023 (23 << 32); +#define IMM_024 (24 << 32); +#define IMM_025 (25 << 32); +#define IMM_026 (26 << 32); +#define IMM_027 (27 << 32); +#define IMM_028 (28 << 32); +#define IMM_029 (29 << 32); +#define IMM_030 (30 << 32); +#define IMM_031 (31 << 32); + + + +typedef struct Memory_buffer_info{ + u_int32_t buf_size; + VAPI_virt_addr_t buf_addr; + char *bufptr; + VAPI_mrw_t mr; + VAPI_mr_hndl_t mr_hndl; + int status; + int ref_count; + int buf_type; + VAPI_virt_addr_t raddr; + VAPI_rkey_t rkey; + VAPI_lkey_t lkey; +} Memory_buffer_info; + +typedef struct RDMA_Info_Exchange { + int opcode; + int buf_length; + VAPI_mrw_t recv_rdma_mr; + VAPI_mr_hndl_t recv_rdma_mr_hndl; + VAPI_mrw_t send_rdma_mr; + VAPI_mr_hndl_t send_rdma_mr_hndl; + VAPI_virt_addr_t raddr; + VAPI_rkey_t rkey; + int flag; +} RDMA_Info_Exchange; + +// opcode for Rdma info exchange RTS/CTS +#define Ready_To_send 0x10000000 +#define Clear_To_send 0x01000000 + +#define RDMA_RTS_ID 5555 +#define RDMA_CTS_ID 7777 +#define RDMA_OP_ID 9999 +#define SEND_RECV_TEST_ID 2222 +#define SEND_RECV_TEST_BUF_ID 0 + +#define TEST_SEND_MESSAGE 0x00000001 +#define TEST_RECV_MESSAGE 0x00000002 + + +#define RTS_CTS_TIMEOUT 50 +#define RECEIVING_THREAD_TIMEOUT 50 +#define WAIT_FOR_SEND_BUF_TIMEOUT 50 + +#define IBNAL_DEBUG_LEVEL_1 0XFFFFFFFF +#define IBNAL_DEBUG_LEVEL_2 D_PORTALS | D_NET | D_WARNING | D_MALLOC | \ + D_ERROR | D_OTHER | D_TRACE | D_INFO + + +// flag for Rdma info exhange +#define RDMA_BUFFER_RESERVED 0x10000000 +#define RDMA_BUFFER_UNAVAILABLE 0x01000000 + + +// receiving data structure +typedef struct { + ptl_hdr_t *krx_buffer; // pointer to receiving buffer + unsigned long krx_len; // length of buffer + unsigned int krx_size; // + unsigned int krx_priority; // do we need this + struct list_head krx_item; +} kibnal_rx_t; + +// transmitting data structure +typedef struct { + nal_cb_t *ktx_nal; + void *ktx_private; + lib_msg_t *ktx_cookie; + char *ktx_buffer; + size_t ktx_len; + unsigned long ktx_size; + int ktx_ndx; + unsigned int ktx_priority; + unsigned int ktx_tgt_node; + unsigned int ktx_tgt_port_id; +} kibnal_tx_t; + + +typedef struct { + char kib_init; + char kib_shuttingdown; + IB_port_t port_num; // IB port information + struct list_head kib_list; + ptl_nid_t kib_nid; + nal_t *kib_nal; + nal_cb_t *kib_cb; + struct kib_trans *kib_trans; // do I need this + struct tq_struct kib_ready_tq; + spinlock_t kib_dispatch_lock; +} kibnal_data_t; + + +// +// A data structure for keeping the HCA information in system +// information related to HCA and hca_handle will be kept here +// +typedef struct HCA_Info +{ + VAPI_hca_hndl_t hca_hndl; // HCA handle + VAPI_pd_hndl_t pd_hndl; // protection domain + IB_port_t port; // port number + int num_qp; // number of qp used + QP_info *qp_ptr[NUM_QPS]; // point to QP_list + int num_cq; // number of cq used + VAPI_cq_hndl_t cq_hndl; + VAPI_cq_hndl_t sq_cq_hndl; + VAPI_cq_hndl_t rq_cq_hndl; + IB_lid_t dlid; + IB_lid_t slid; + kibnal_data_t *kib_data; // for PORTALS operations +} HCA_info; + + + + +// Remote HCA Info information +typedef struct Remote_HCA_Info { + unsigned long opcode; + unsigned long length; + IB_lid_t dlid[NUM_QPS]; + VAPI_qp_num_t rqp_num[NUM_QPS]; +} Remote_QP_Info; + +typedef struct Bucket_index{ + int start; + int end; +} Bucket_index; + +// functional prototypes +// infiniband initialization +int kib_init(kibnal_data_t *); + +// receiving thread +void kibnal_recv_thread(HCA_info *); +void recv_thread(HCA_info *); + +// forward data packet +void kibnal_fwd_packet (void *, kpr_fwd_desc_t *); + +// global data structures +extern kibnal_data_t kibnal_data; +extern ptl_handle_ni_t kibnal_ni; +extern nal_t kibnal_api; +extern nal_cb_t kibnal_lib; +extern QP_info QP_list[]; +extern QP_info CQ_list[]; +extern HCA_info Hca_data; +extern VAPI_hca_hndl_t Hca_hndl; +extern VAPI_pd_hndl_t Pd_hndl; +extern VAPI_hca_vendor_t Hca_vendor; +extern VAPI_hca_cap_t Hca_cap; +extern VAPI_hca_port_t Hca_port_1_props; +extern VAPI_hca_port_t Hca_port_2_props; +extern VAPI_hca_attr_t Hca_attr; +extern VAPI_hca_attr_mask_t Hca_attr_mask; +extern VAPI_cq_hndl_t Cq_SQ_hndl; +extern VAPI_cq_hndl_t Cq_RQ_hndl; +extern VAPI_cq_hndl_t Cq_hndl; +extern unsigned long User_Defined_Small_Msg_Size; +extern Remote_QP_Info L_HCA_RDMA_Info; +extern Remote_QP_Info R_HCA_RDMA_Info; +extern unsigned int Num_posted_recv_buf; +extern int R_RDMA_DATA_ARRIVED; +extern Memory_buffer_info MRbuf_list[]; +extern Memory_buffer_info MSbuf_list[]; +extern Bucket_index Bucket[]; +extern RDMA_Info_Exchange Rdma_info; +extern int Cts_Message_arrived; +extern RDMA_Info_Exchange Local_rdma_info; +extern spinlock_t MSB_mutex[]; + + + +// kernel NAL API function prototype +int kibnal_forward(nal_t *,int ,void *,size_t ,void *,size_t ); +void kibnal_lock(nal_t *, unsigned long *); +void kibnal_unlock(nal_t *, unsigned long *); +int kibnal_shutdown(nal_t *, int ); +void kibnal_yield( nal_t * ); +void kibnal_invalidate(nal_cb_t *,void *,size_t ,void *); +int kibnal_validate(nal_cb_t *,void *,size_t ,void **); + + + +nal_t *kibnal_init(int , ptl_pt_index_t , ptl_ac_index_t , ptl_pid_t ); +void __exit kibnal_finalize(void ); +VAPI_ret_t create_qp(QP_info *, int ); +VAPI_ret_t init_qp(QP_info *, int ); +VAPI_ret_t IB_Open_HCA(kibnal_data_t *); +VAPI_ret_t IB_Close_HCA(void ); +VAPI_ret_t createMemRegion(VAPI_hca_hndl_t, VAPI_pd_hndl_t); +VAPI_ret_t deleteMemRegion(QP_info *, int ); + +void ibnal_send_recv_self_testing(int *); + +int __init kibnal_initialize(void); + + + +/* CB NAL functions */ +int kibnal_send(nal_cb_t *, + void *, + lib_msg_t *, + ptl_hdr_t *, + int, + ptl_nid_t, + ptl_pid_t, + unsigned int, + ptl_kiov_t *, + size_t); + +int kibnal_send_pages(nal_cb_t *, + void *, + lib_msg_t *, + ptl_hdr_t *, + int, + ptl_nid_t, + ptl_pid_t, + unsigned int, + ptl_kiov_t *, + size_t); +int kibnal_recv(nal_cb_t *, void *, lib_msg_t *, + unsigned int, struct iovec *, size_t, size_t); +int kibnal_recv_pages(nal_cb_t *, void *, lib_msg_t *, + unsigned int, ptl_kiov_t *, size_t, size_t); +int kibnal_read(nal_cb_t *,void *,void *,user_ptr ,size_t ); +int kibnal_write(nal_cb_t *,void *,user_ptr ,void *,size_t ); +int kibnal_callback(nal_cb_t * , void *, lib_eq_t *, ptl_event_t *); +void *kibnal_malloc(nal_cb_t *,size_t ); +void kibnal_free(nal_cb_t *,void *,size_t ); +int kibnal_map(nal_cb_t *, unsigned int , struct iovec *, void **); +void kibnal_unmap(nal_cb_t *, unsigned int , struct iovec *, void **); +int kibnal_map_pages(nal_cb_t *, unsigned int , ptl_kiov_t *, void **); +void kibnal_unmap_pages(nal_cb_t * , unsigned int , ptl_kiov_t *, void **); +void kibnal_printf(nal_cb_t *, const char *, ...); +void kibnal_cli(nal_cb_t *,unsigned long *); +void kibnal_sti(nal_cb_t *,unsigned long *); +int kibnal_dist(nal_cb_t *,ptl_nid_t ,unsigned long *); + +void kibnal_fwd_packet (void *, kpr_fwd_desc_t *); +void kibnal_rx(kibnal_data_t *, + VAPI_virt_addr_t , + u_int32_t, + u_int32_t, + unsigned int); + +int kibnal_end(kibnal_data_t *); + +void async_event_handler(VAPI_hca_hndl_t , VAPI_event_record_t *,void *); + +void CQE_event_handler(VAPI_hca_hndl_t ,VAPI_cq_hndl_t , void *); + + +VAPI_ret_t Send_Small_Msg(char *, int ); +VAPI_ret_t Send_Large_Msg(char *, int ); + +VAPI_ret_t repost_recv_buf(QP_info *, VAPI_wr_id_t ); +int post_recv_bufs(VAPI_wr_id_t ); +int server_listen_thread(void *); +VAPI_wr_id_t RTS_handshaking_protocol(int ); +VAPI_wr_id_t CTS_handshaking_protocol(RDMA_Info_Exchange *); + +VAPI_ret_t createMemRegion_RDMA(VAPI_hca_hndl_t , + VAPI_pd_hndl_t , + char *, + int , + VAPI_mr_hndl_t *, + VAPI_mrw_t *); + + +VAPI_ret_t IB_Set_Event_Handler(HCA_info , kibnal_data_t *); + +VAPI_ret_t IB_Set_Async_Event_Handler(HCA_info ,kibnal_data_t *); + +VAPI_wr_id_t find_available_buf(int ); +VAPI_wr_id_t search_send_buf(int ); +VAPI_wr_id_t find_filler_list(int ,int ); +int insert_MRbuf_list(int ); + + +#endif /* _IBNAL_H */ diff --git a/lnet/klnds/iblnd/ibnal_cb.c b/lnet/klnds/iblnd/ibnal_cb.c new file mode 100644 index 0000000..2c07cc4 --- /dev/null +++ b/lnet/klnds/iblnd/ibnal_cb.c @@ -0,0 +1,1288 @@ +/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- + * vim:expandtab:shiftwidth=8:tabstop=8: + * + * Based on ksocknal and qswnal + * + * Author: Hsing-bung Chen + * + * This file is part of Portals, http://www.sf.net/projects/sandiaportals/ + * + * Portals is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * Portals is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Portals; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include "ibnal.h" + + + + +RDMA_Info_Exchange Rdma_nfo; +int Cts_Msg_Arrived = NO; + + +/* + * LIB functions follow + */ + +// +// read +// copy a block of data from scr_addr to dst_addr +// it all happens in kernel space - dst_addr and src_addr +// +// original definition is to read a block od data from a +// specified user address +// +// cb_read + +int kibnal_read (nal_cb_t *nal, + void *private, + void *dst_addr, + user_ptr src_addr, + size_t len) +{ + CDEBUG(D_NET, "kibnal_read: 0x%Lx: reading %ld bytes from %p -> %p\n", + nal->ni.nid, (long)len, src_addr, dst_addr ); + + memcpy( dst_addr, src_addr, len ); + + return 0; +} + +// +// it seems that read and write are doing the same thing +// because they all happen in kernel space +// why do we need two functions like read and write +// to make PORTALS API compatable +// + +// +// write +// copy a block of data from scr_addr to dst_addr +// it all happens in kernel space - dst_addr and src_addr +// +// original definition is to write a block od data to a +// specified user address +// +// cb_write + +int kibnal_write(nal_cb_t *nal, + void *private, + user_ptr dst_addr, + void *src_addr, + size_t len) +{ + CDEBUG(D_NET, "kibnal_write: 0x%Lx: writing %ld bytes from %p -> %p\n", + nal->ni.nid, (long)len, src_addr, dst_addr ); + + + memcpy( dst_addr, src_addr, len ); + + return 0; +} + +// +// malloc +// +// either vmalloc or kmalloc is used +// dynamically allocate a block of memory based on the size of buffer +// +// cb_malloc + +void * kibnal_malloc(nal_cb_t *nal, size_t length) +{ + void *buffer; + + // PORTAL_ALLOC will do the job + // allocate a buffer with size "length" + PORTAL_ALLOC(buffer, length); + + return buffer; +} + +// +// free +// release a dynamically allocated memory pointed by buffer pointer +// +// cb_free + +void kibnal_free(nal_cb_t *nal, void *buffer, size_t length) +{ + // + // release allocated buffer to system + // + PORTAL_FREE(buffer, length); +} + +// +// invalidate +// because evernthing is in kernel space (LUSTRE) +// there is no need to mark a piece of user memory as no longer in use by +// the system +// +// cb_invalidate + +void kibnal_invalidate(nal_cb_t *nal, + void *base, + size_t extent, + void *addrkey) +{ + // do nothing + CDEBUG(D_NET, "kibnal_invalidate: 0x%Lx: invalidating %p : %d\n", + nal->ni.nid, base, extent); + return; +} + + +// +// validate +// because everything is in kernel space (LUSTRE) +// there is no need to mark a piece of user memory in use by +// the system +// +// cb_validate + +int kibnal_validate(nal_cb_t *nal, + void *base, + size_t extent, + void **addrkey) +{ + // do nothing + CDEBUG(D_NET, "kibnal_validate: 0x%Lx: validating %p : %d\n", + nal->ni.nid, base, extent); + + return 0; +} + + +// +// log messages from kernel space +// printk() is used +// +// cb_printf + +void kibnal_printf(nal_cb_t *nal, const char *fmt, ...) +{ + va_list ap; + char msg[256]; + + if (portal_debug & D_NET) { + va_start( ap, fmt ); + vsnprintf( msg, sizeof(msg), fmt, ap ); + va_end( ap ); + + printk("CPUId: %d %s",smp_processor_id(), msg); + } +} + +// +// clear interrupt +// use spin_lock to lock protected area such as MD, ME... +// so a process can enter a protected area and do some works +// this won't physicall disable interrup but use a software +// spin-lock to control some protected areas +// +// cb_cli + +void kibnal_cli(nal_cb_t *nal, unsigned long *flags) +{ + kibnal_data_t *data= nal->nal_data; + + CDEBUG(D_NET, "kibnal_cli \n"); + + spin_lock_irqsave(&data->kib_dispatch_lock,*flags); + +} + +// +// set interrupt +// use spin_lock to unlock protected area such as MD, ME... +// this won't physicall enable interrup but use a software +// spin-lock to control some protected areas +// +// cb_sti + +void kibnal_sti(nal_cb_t *nal, unsigned long *flags) +{ + kibnal_data_t *data= nal->nal_data; + + CDEBUG(D_NET, "kibnal_sti \n"); + + spin_unlock_irqrestore(&data->kib_dispatch_lock,*flags); +} + + + +// +// nic distance +// +// network distance doesn't mean much for this nal +// here we only indicate +// 0 - operation is happened on the same node +// 1 - operation is happened on different nodes +// router will handle the data routing +// +// cb_dist + +int kibnal_dist(nal_cb_t *nal, ptl_nid_t nid, unsigned long *dist) +{ + CDEBUG(D_NET, "kibnal_dist \n"); + + if ( nal->ni.nid == nid ) { + *dist = 0; + } + else { + *dist = 1; + } + + return 0; // always retrun 0 +} + + +// +// This is the cb_send() on IB based interconnect system +// prepare a data package and use VAPI_post_sr() to send it +// down-link out-going message +// + + +int +kibnal_send(nal_cb_t *nal, + void *private, + lib_msg_t *cookie, + ptl_hdr_t *hdr, + int type, + ptl_nid_t nid, + ptl_pid_t pid, + unsigned int niov, + ptl_kiov_t *iov, + size_t len) +{ + + int rc=0; + void *buf = NULL; + unsigned long buf_length = sizeof(ptl_hdr_t) + len; + int expected_buf_size = 0; + VAPI_ret_t vstat; + + PROF_START(kibnal_send); // time stamp send start + + CDEBUG(D_NET,"kibnal_send: sending %d bytes from %p to nid: 0x%Lx pid %d\n", + buf_length, iov, nid, HCA_PORT_1); + + + // do I need to check the gateway information + // do I have problem to send direct + // do I have to forward a data packet to gateway + // + // The current connection is back-to-back + // I always know that data will be send from one-side to + // the other side + // + + // + // check data buffer size + // + // MSG_SIZE_SMALL + // regular post send + // + // MSG_SIZE_LARGE + // rdma write + + if(buf_length <= SMALL_MSG_SIZE) { + expected_buf_size = MSG_SIZE_SMALL; + } + else { + if(buf_length > MAX_MSG_SIZE) { + CERROR("kibnal_send:request exceeds Transmit data size (%d).\n", + MAX_MSG_SIZE); + rc = -1; + return rc; + } + else { + expected_buf_size = MSG_SIZE_LARGE; // this is a large data package + } + } + + // prepare data packet for send operation + // + // allocate a data buffer "buf" with size of buf_len(header + payload) + // --------------- + // buf | hdr | size = sizeof(ptl_hdr_t) + // -------------- + // |payload data | size = len + // --------------- + + // copy header to buf + memcpy(buf, hdr, sizeof(ptl_hdr_t)); + + // copy payload data from iov to buf + // use portals library function lib_copy_iov2buf() + + if (len != 0) + lib_copy_iov2buf(((char *)buf) + sizeof (ptl_hdr_t), + niov, + iov, + len); + + // buf is ready to do a post send + // the send method is base on the buf_size + + CDEBUG(D_NET,"ib_send %d bytes (size %d) from %p to nid: 0x%Lx " + " port %d\n", buf_length, expected_buf_size, iov, nid, HCA_PORT_1); + + switch(expected_buf_size) { + case MSG_SIZE_SMALL: + // send small message + if((vstat = Send_Small_Msg(buf, buf_length)) != VAPI_OK){ + CERROR("Send_Small_Msg() is failed\n"); + } + break; + + case MSG_SIZE_LARGE: + // send small message + if((vstat = Send_Large_Msg(buf, buf_length)) != VAPI_OK){ + CERROR("Send_Large_Msg() is failed\n"); + } + break; + + default: + CERROR("Unknown message size %d\n", expected_buf_size); + break; + } + + PROF_FINISH(kibnal_send); // time stapm of send operation + + rc = 1; + + return rc; +} + +// +// kibnal_send_pages +// +// no support +// +// do you need this +// +int kibnal_send_pages(nal_cb_t * nal, + void *private, + lib_msg_t * cookie, + ptl_hdr_t * hdr, + int type, + ptl_nid_t nid, + ptl_pid_t pid, + unsigned int niov, + ptl_kiov_t *iov, + size_t mlen) +{ + int rc = 1; + + CDEBUG(D_NET, "kibnal_send_pages\n"); + + // do nothing now for Infiniband + + return rc; +} + + + + + +// +// kibnal_fwd_packet +// +// no support +// +// do you need this +// +void kibnal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd) +{ + CDEBUG(D_NET, "forwarding not implemented\n"); + return; + +} + +// +// kibnal_callback +// +// no support +// +// do you need this +// +int kibnal_callback(nal_cb_t * nal, + void *private, + lib_eq_t *eq, + ptl_event_t *ev) +{ + CDEBUG(D_NET, "callback not implemented\n"); + return PTL_OK; +} + + +/* Process a received portals packet */ +// +// conver receiving data in to PORTALS header +// + +void kibnal_rx(kibnal_data_t *kib, + VAPI_virt_addr_t buffer_addr, + u_int32_t buffer_len, + u_int32_t buffer_size, + unsigned int priority) +{ + ptl_hdr_t *hdr = (ptl_hdr_t *) buffer_addr; // case to ptl header format + kibnal_rx_t krx; + + CDEBUG(D_NET,"kibnal_rx: buf %p, len %ld\n", buffer_addr, buffer_len); + + if ( buffer_len < sizeof( ptl_hdr_t ) ) { + /* XXX what's this for? */ + if (kib->kib_shuttingdown) + return; + CERROR("kibnal_rx: did not receive complete portal header, " + "len= %ld", buffer_len); + + return; + } + + // typedef struct { + // char *krx_buffer; // pointer to receiving buffer + // unsigned long krx_len; // length of buffer + // unsigned int krx_size; // + // unsigned int krx_priority; // do we need this + // struct list_head krx_item; + // } kibnal_rx_t; + // + krx.krx_buffer = hdr; + krx.krx_len = buffer_len; + krx.krx_size = buffer_size; + krx.krx_priority = priority; + + if ( hdr->dest_nid == kibnal_lib.ni.nid ) { + // this is my data + PROF_START(lib_parse); + + lib_parse(&kibnal_lib, (ptl_hdr_t *)krx.krx_buffer, &krx); + + PROF_FINISH(lib_parse); + } else { + /* forward to gateway */ + // Do we expect this happened ? + // + CERROR("kibnal_rx: forwarding not implemented yet"); + } + + return; +} + + + + +// +// kibnal_recv_pages +// +// no support +// +// do you need this +// +int +kibnal_recv_pages(nal_cb_t * nal, + void *private, + lib_msg_t * cookie, + unsigned int niov, + ptl_kiov_t *iov, + size_t mlen, + size_t rlen) +{ + + CDEBUG(D_NET, "recv_pages not implemented\n"); + return PTL_OK; + +} + + +int +kibnal_recv(nal_cb_t *nal, + void *private, + lib_msg_t *cookie, + unsigned int niov, + struct iovec *iov, + size_t mlen, + size_t rlen) +{ + kibnal_rx_t *krx = private; + + CDEBUG(D_NET,"kibnal_recv: mlen=%d, rlen=%d\n", mlen, rlen); + + /* What was actually received must be >= what sender claims to + * have sent. This is an LASSERT, since lib-move doesn't + * check cb return code yet. */ + LASSERT (krx->krx_len >= sizeof (ptl_hdr_t) + rlen); + LASSERT (mlen <= rlen); + + PROF_START(kibnal_recv); + + if(mlen != 0) { + PROF_START(memcpy); + lib_copy_buf2iov (niov, iov, krx->krx_buffer + + sizeof (ptl_hdr_t), mlen); + PROF_FINISH(memcpy); + } + + PROF_START(lib_finalize); + + lib_finalize(nal, private, cookie); + + PROF_FINISH(lib_finalize); + PROF_FINISH(kibnal_recv); + + return rlen; +} + +// +// kibnal_map +// no support +// do you need this +// +int kibnal_map(nal_cb_t * nal, + unsigned int niov, + struct iovec *iov, + void **addrkey) +{ + CDEBUG(D_NET, "map not implemented\n"); + return PTL_OK; +} + + + +// +// kibnal_unmap +// +// no support +// +// do you need this +// +void kibnal_unmap(nal_cb_t * nal, + unsigned int niov, + struct iovec *iov, + void **addrkey) +{ + CDEBUG(D_NET, "unmap not implemented\n"); + return; +} + + + +// +// kibnal_map_pages +// no support +// do you need this +/* as (un)map, but with a set of page fragments */ +int kibnal_map_pages(nal_cb_t * nal, + unsigned int niov, + ptl_kiov_t *iov, + void **addrkey) +{ + CDEBUG(D_NET, "map_pages not implemented\n"); + return PTL_OK; +} + + + +// +// kibnal_unmap_pages +// +// no support +// +// do you need this +// +void kibnal_unmap_pages(nal_cb_t * nal, + unsigned int niov, + ptl_kiov_t *iov, + void **addrkey) +{ + CDEBUG(D_NET, "unmap_pages not implemented\n"); + return ; +} + + +int kibnal_end(kibnal_data_t *kib) +{ + + /* wait for sends to finish ? */ + /* remove receive buffers */ + /* shutdown receive thread */ + + CDEBUG(D_NET, "kibnal_end\n"); + IB_Close_HCA(); + + return 0; +} + + +// +// +// asynchronous event handler: response to some unexpetced operation errors +// +// void async_event_handler(VAPI_hca_hndl_t hca_hndl, +// VAPI_event_record_t *event_record_p, +// void* private_data) +// the HCA drive will prepare evetn_record_p +// +// this handler is registered with VAPI_set_async_event_handler() +// VAPI_set_async_event_handler() is issued when an HCA is created +// +// +void async_event_handler(VAPI_hca_hndl_t hca_hndl, + VAPI_event_record_t *event_record_p, + void* private_data) +{ + // + // * event_record_p is prepared by the system when an async + // event happened + // * what to do with private_data + // * do we expect more async events happened if so what are they + // + // only log ERROR message now + + switch (event_record_p->type) { + case VAPI_PORT_ERROR: + printk("Got PORT_ERROR event. port number=%d\n", + event_record_p->modifier.port_num); + break; + case VAPI_PORT_ACTIVE: + printk("Got PORT_ACTIVE event. port number=%d\n", + event_record_p->modifier.port_num); + break; + case VAPI_QP_PATH_MIGRATED: /*QP*/ + printk("Got P_PATH_MIGRATED event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_EEC_PATH_MIGRATED: /*EEC*/ + printk("Got EEC_PATH_MIGRATED event. eec_hndl=%d\n", + event_record_p->modifier.eec_hndl); + break; + case VAPI_QP_COMM_ESTABLISHED: /*QP*/ + printk("Got QP_COMM_ESTABLISHED event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_EEC_COMM_ESTABLISHED: /*EEC*/ + printk("Got EEC_COMM_ESTABLISHED event. eec_hndl=%d\n", + event_record_p->modifier.eec_hndl); + break; + case VAPI_SEND_QUEUE_DRAINED: /*QP*/ + printk("Got SEND_QUEUE_DRAINED event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_CQ_ERROR: /*CQ*/ + printk("Got CQ_ERROR event. cq_hndl=%lu\n", + event_record_p->modifier.cq_hndl); + break; + case VAPI_LOCAL_WQ_INV_REQUEST_ERROR: /*QP*/ + printk("Got LOCAL_WQ_INV_REQUEST_ERROR event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR: /*QP*/ + printk("Got LOCAL_WQ_ACCESS_VIOL_ERROR event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR: /*QP*/ + printk("Got LOCAL_WQ_CATASTROPHIC_ERROR event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_PATH_MIG_REQ_ERROR: /*QP*/ + printk("Got PATH_MIG_REQ_ERROR event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_LOCAL_CATASTROPHIC_ERROR: /*none*/ + printk("Got LOCAL_CATASTROPHIC_ERROR event. \n"); + break; + default: + printk(":got non-valid event type=%d. IGNORING\n", + event_record_p->type); + } + +} + + + + +VAPI_wr_id_t +search_send_buf(int buf_length) +{ + VAPI_wr_id_t send_id = -1; + u_int32_t i; + int flag = NO; + int loop_count = 0; + + CDEBUG(D_NET, "search_send_buf \n"); + + while((flag == NO) && (loop_count < MAX_LOOP_COUNT)) { + for(i=0; i < NUM_ENTRY; i++) { + // problem about using spinlock + spin_lock(&MSB_mutex[i]); + if(MSbuf_list[i].status == BUF_REGISTERED) { + MSbuf_list[i].status = BUF_INUSE;// make send buf as inuse + flag = YES; + spin_unlock(&MSB_mutex[i]); + break; + } + else + spin_unlock(&MSB_mutex[i]); + } + + loop_count++; + schedule_timeout(200); // wait for a while + } + + if(flag == NO) { + CDEBUG(D_NET, "search_send_buf: could not locate an entry in MSbuf_list\n"); + } + + send_id = (VAPI_wr_id_t ) i; + + return send_id; +} + + + +VAPI_wr_id_t +search_RDMA_recv_buf(int buf_length) +{ + VAPI_wr_id_t recv_id = -1; + u_int32_t i; + int flag = NO; + int loop_count = 0; + + CDEBUG(D_NET, "search_RDMA_recv_buf\n"); + + while((flag == NO) && (loop_count < MAX_LOOP_COUNT)) { + + for(i=NUM_ENTRY; i < NUM_MBUF; i++) { + + spin_lock(&MSB_mutex[i]); + + if((MRbuf_list[i].status == BUF_REGISTERED) && + (MRbuf_list[i].buf_size >= buf_length)) { + MSbuf_list[i].status = BUF_INUSE;// make send buf as inuse + flag = YES; + spin_unlock(&MSB_mutex[i]); + break; + } + else + spin_unlock(&MSB_mutex[i]); + } + + loop_count++; + + schedule_timeout(200); // wait for a while + } + + if(flag == NO) { + CERROR("search_RDMA_recv_buf: could not locate an entry in MBbuf_list\n"); + } + + recv_id = (VAPI_wr_id_t ) i; + + return recv_id; + +} + + + + + + + +VAPI_ret_t Send_Small_Msg(char *buf, int buf_length) +{ + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + QP_info *qp; + VAPI_wr_id_t send_id; + + CDEBUG(D_NET, "Send_Small_Msg\n"); + + send_id = search_send_buf(buf_length); + + if(send_id < 0){ + CERROR("Send_Small_Msg: Can not find a QP \n"); + return(~VAPI_OK); + } + + qp = &QP_list[(int) send_id]; + + // find a suitable/registered send_buf from MSbuf_list + CDEBUG(D_NET, "Send_Small_Msg: current send id %d \n", send_id); + + sr_desc.opcode = VAPI_SEND; + sr_desc.comp_type = VAPI_SIGNALED; + sr_desc.id = send_id; + + + // scatter and gather info + sr_sg.len = buf_length; + sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR + + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr; + + // copy data to register send buffer + memcpy(&sr_sg.addr, buf, buf_length); + + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + // call VAPI_post_sr to send out this data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + CERROR("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat)); + } + + CDEBUG(D_NET, "VAPI_post_sr success.\n"); + + return (vstat); + +} + + + + +VAPI_wr_id_t +RTS_handshaking_protocol(int buf_length) +{ + + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + VAPI_wr_id_t send_id; + + RDMA_Info_Exchange rdma_info; + + rdma_info.opcode = Ready_To_send; + rdma_info.buf_length = buf_length; + rdma_info.raddr = (VAPI_virt_addr_t) 0; + rdma_info.rkey = (VAPI_rkey_t) 0 ; + + QP_info *qp; + + CDEBUG(D_NET, "RTS_handshaking_protocol\n"); + + // find a suitable/registered send_buf from MSbuf_list + send_id = search_send_buf(sizeof(RDMA_Info_Exchange)); + + qp = &QP_list[(int) send_id]; + + CDEBUG(D_NET, "RTS_CTS: current send id %d \n", send_id); + sr_desc.opcode = VAPI_SEND; + sr_desc.comp_type = VAPI_SIGNALED; + sr_desc.id = send_id + RDMA_RTS_ID;// this RTS mesage ID + + // scatter and gather info + sr_sg.len = sizeof(RDMA_Info_Exchange); + sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr; + + // copy data to register send buffer + memcpy(&sr_sg.addr, &rdma_info, sizeof(RDMA_Info_Exchange)); + + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + // call VAPI_post_sr to send out this RTS message data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + CERROR("RTS: VAPI_post_sr failed (%s).\n",VAPI_strerror_sym(vstat)); + } + + return send_id; + +} + + + +// create local receiving Memory Region for a HCA +VAPI_ret_t +createMemRegion_RDMA(VAPI_hca_hndl_t hca_hndl, + VAPI_pd_hndl_t pd_hndl, + char *bufptr, + int buf_length, + VAPI_mr_hndl_t *rep_mr_hndl, + VAPI_mrw_t *rep_mr) +{ + VAPI_ret_t vstat; + VAPI_mrw_t mrw; + + CDEBUG(D_NET, "createMemRegion_RDMA\n"); + + // memory region address and size of memory region + // allocate a block of memory for this HCA + // RDMA data buffer + + + if(bufptr == NULL) { + // need to allcate a local buffer to receive data from a + // remore VAPI_RDMA_WRITE_IMM + PORTAL_ALLOC(bufptr, buf_length); + } + + if(bufptr == NULL) { + CDEBUG(D_MALLOC, "Failed to malloc a block of RDMA receiving memory, size %d\n", + buf_length); + return(VAPI_ENOMEM); + } + + /* Register RDAM data Memory region */ + CDEBUG(D_NET, "Register a RDMA data memory region\n"); + + mrw.type = VAPI_MR; + mrw.pd_hndl= pd_hndl; + mrw.start = (VAPI_virt_addr_t )(MT_virt_addr_t )bufptr; + mrw.size = buf_length; + mrw.acl = VAPI_EN_LOCAL_WRITE | + VAPI_EN_REMOTE_WRITE | + VAPI_EN_REMOTE_READ; + + // register send memory region + vstat = VAPI_register_mr(hca_hndl, + &mrw, + rep_mr_hndl, + rep_mr); + + // this memory region is going to be reused until deregister is called + if (vstat != VAPI_OK) { + CERROR("Failed registering a mem region Addr=%p, Len=%d. %s\n", + bufptr, buf_length, VAPI_strerror(vstat)); + } + + return(vstat); + +} + + + +RDMA_Info_Exchange Local_rdma_info; + +int insert_MRbuf_list(int buf_lenght) +{ + int recv_id = NUM_ENTRY; + + CDEBUG(D_NET, "insert_MRbuf_list\n"); + + for(recv_id= NUM_ENTRY; recv_id < NUM_MBUF; recv_id++){ + if(BUF_UNREGISTERED == MRbuf_list[recv_id].status) { + MRbuf_list[recv_id].status = BUF_UNREGISTERED; + MRbuf_list[recv_id].buf_size = buf_lenght; + break; + } + } + + return recv_id; + +} + +VAPI_wr_id_t +CTS_handshaking_protocol(RDMA_Info_Exchange *rdma_info) +{ + + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + QP_info *qp; + VAPI_wr_id_t send_id; + VAPI_mr_hndl_t rep_mr_hndl; + VAPI_mrw_t rep_mr; + int recv_id; + char *bufptr = NULL; + + // search MRbuf_list for an available entry that + // has registered data buffer with size equal to rdma_info->buf_lenght + + CDEBUG(D_NET, "CTS_handshaking_protocol\n"); + + // register memory buffer for RDAM operation + + vstat = createMemRegion_RDMA(Hca_hndl, + Pd_hndl, + bufptr, + rdma_info->buf_length, + &rep_mr_hndl, + &rep_mr); + + + Local_rdma_info.opcode = Clear_To_send; + Local_rdma_info.recv_rdma_mr = rep_mr; + Local_rdma_info.recv_rdma_mr_hndl = rep_mr_hndl; + + if (vstat != VAPI_OK) { + CERROR("CST_handshaking_protocol: Failed registering a mem region" + "Len=%d. %s\n", rdma_info->buf_length, VAPI_strerror(vstat)); + Local_rdma_info.flag = RDMA_BUFFER_UNAVAILABLE; + } + else { + // successfully allcate reserved RDAM data buffer + recv_id = insert_MRbuf_list(rdma_info->buf_length); + + if(recv_id >= NUM_ENTRY) { + MRbuf_list[recv_id].buf_addr = rep_mr.start; + MRbuf_list[recv_id].mr = rep_mr; + MRbuf_list[recv_id].mr_hndl = rep_mr_hndl; + MRbuf_list[recv_id].ref_count = 0; + Local_rdma_info.flag = RDMA_BUFFER_RESERVED; + Local_rdma_info.buf_length = rdma_info->buf_length; + Local_rdma_info.raddr = rep_mr.start; + Local_rdma_info.rkey = rep_mr.r_key; + } + else { + CERROR("Can not find an entry in MRbuf_list - how could this happen\n"); + } + } + + // find a suitable/registered send_buf from MSbuf_list + send_id = search_send_buf(sizeof(RDMA_Info_Exchange)); + CDEBUG(D_NET, "CTS: current send id %d \n", send_id); + sr_desc.opcode = VAPI_SEND; + sr_desc.comp_type = VAPI_SIGNALED; + sr_desc.id = send_id + RDMA_CTS_ID; // this CST message ID + + // scatter and gather info + sr_sg.len = sizeof(RDMA_Info_Exchange); + sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr; + + // copy data to register send buffer + memcpy(&sr_sg.addr, &Local_rdma_info, sizeof(RDMA_Info_Exchange)); + + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + // call VAPI_post_sr to send out this RTS message data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + CERROR("CTS: VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat)); + } + + +} + + + +VAPI_ret_t Send_Large_Msg(char *buf, int buf_length) +{ + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + QP_info *qp; + VAPI_mrw_t rep_mr; + VAPI_mr_hndl_t rep_mr_hndl; + int send_id; + VAPI_imm_data_t imm_data = 0XAAAA5555; + + + CDEBUG(D_NET, "Send_Large_Msg: Enter\n"); + + // register this large buf + // don't need to copy this buf to send buffer + vstat = createMemRegion_RDMA(Hca_hndl, + Pd_hndl, + buf, + buf_length, + &rep_mr_hndl, + &rep_mr); + + if (vstat != VAPI_OK) { + CERROR("Send_Large_M\sg: createMemRegion_RDMAi() failed (%s).\n", + VAPI_strerror(vstat)); + } + + + Local_rdma_info.send_rdma_mr = rep_mr; + Local_rdma_info.send_rdma_mr_hndl = rep_mr_hndl; + + // + // Prepare descriptor for send queue + // + + // ask for a remote rdma buffer with size buf_lenght + send_id = RTS_handshaking_protocol(buf_length); + + qp = &QP_list[send_id]; + + // wait for CTS message receiving from remote node + while(1){ + if(YES == Cts_Message_arrived) { + // receive CST message from remote node + // Rdma_info is available for use + break; + } + schedule_timeout(RTS_CTS_TIMEOUT); + } + + sr_desc.id = send_id + RDMA_OP_ID; + sr_desc.opcode = VAPI_RDMA_WRITE_WITH_IMM; + sr_desc.comp_type = VAPI_SIGNALED; + + // scatter and gather info + sr_sg.len = buf_length; + + // rdma mr + sr_sg.lkey = rep_mr.l_key; + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) rep_mr.start; + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + + // immediate data - not used here + sr_desc.imm_data = imm_data; + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + // RDAM operation only + // raddr and rkey is receiving from remote node + sr_desc.remote_addr = Rdma_info.raddr; + sr_desc.r_key = Rdma_info.rkey; + + // call VAPI_post_sr to send out this data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + CERROR("VAPI_post_sr failed (%s).\n",VAPI_strerror_sym(vstat)); + } + +} + + + + + + +// +// repost_recv_buf +// post a used recv buffer back to recv WQE list +// wrq_id is used to indicate the starting position of recv-buffer +// +VAPI_ret_t +repost_recv_buf(QP_info *qp, + VAPI_wr_id_t wrq_id) +{ + VAPI_rr_desc_t rr; + VAPI_sg_lst_entry_t sg_entry; + VAPI_ret_t ret; + + CDEBUG(D_NET, "repost_recv_buf\n"); + + sg_entry.lkey = MRbuf_list[wrq_id].mr.l_key; + sg_entry.len = MRbuf_list[wrq_id].buf_size; + sg_entry.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[wrq_id].buf_addr; + rr.opcode = VAPI_RECEIVE; + rr.comp_type = VAPI_SIGNALED; /* All with CQE (IB compliant) */ + rr.sg_lst_len = 1; /* single buffers */ + rr.sg_lst_p = &sg_entry; + rr.id = wrq_id; /* WQE id used is the index to buffers ptr array */ + + ret= VAPI_post_rr(qp->hca_hndl,qp->qp_hndl,&rr); + + if (ret != VAPI_OK){ + CERROR("failed reposting RQ WQE (%s) buffer \n",VAPI_strerror_sym(ret)); + return ret; + } + + CDEBUG(D_NET, "Successfully reposting an RQ WQE %d recv bufer \n", wrq_id); + + return ret ; +} + +// +// post_recv_bufs +// post "num_o_bufs" for receiving data +// each receiving buf (buffer starting address, size of buffer) +// each buffer is associated with an id +// +int +post_recv_bufs(VAPI_wr_id_t start_id) +{ + int i; + VAPI_rr_desc_t rr; + VAPI_sg_lst_entry_t sg_entry; + VAPI_ret_t ret; + + CDEBUG(D_NET, "post_recv_bufs\n"); + + for(i=0; i< NUM_ENTRY; i++) { + sg_entry.lkey = MRbuf_list[i].mr.l_key; + sg_entry.len = MRbuf_list[i].buf_size; + sg_entry.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[i].buf_addr; + rr.opcode = VAPI_RECEIVE; + rr.comp_type = VAPI_SIGNALED; /* All with CQE (IB compliant) */ + rr.sg_lst_len = 1; /* single buffers */ + rr.sg_lst_p = &sg_entry; + rr.id = start_id+i; /* WQE id used is the index to buffers ptr array */ + + ret= VAPI_post_rr(QP_list[i].hca_hndl,QP_list[i].qp_hndl, &rr); + if (ret != VAPI_OK) { + CERROR("failed posting RQ WQE (%s)\n",VAPI_strerror_sym(ret)); + return i; + } + } + + return i; /* num of buffers posted */ +} + +int +post_RDMA_bufs(QP_info *qp, + void *buf_array, + unsigned int num_bufs, + unsigned int buf_size, + VAPI_wr_id_t start_id) +{ + + CDEBUG(D_NET, "post_RDMA_bufs \n"); + return YES; +} + + + +// +// LIB NAL +// assign function pointers to theirs corresponding entries +// + +nal_cb_t kibnal_lib = { + nal_data: &kibnal_data, /* NAL private data */ + cb_send: kibnal_send, + cb_send_pages: NULL, // not implemented + cb_recv: kibnal_recv, + cb_recv_pages: NULL, // not implemented + cb_read: kibnal_read, + cb_write: kibnal_write, + cb_callback: NULL, // not implemented + cb_malloc: kibnal_malloc, + cb_free: kibnal_free, + cb_map: NULL, // not implemented + cb_unmap: NULL, // not implemented + cb_map_pages: NULL, // not implemented + cb_unmap_pages: NULL, // not implemented + cb_printf: kibnal_printf, + cb_cli: kibnal_cli, + cb_sti: kibnal_sti, + cb_dist: kibnal_dist // no used at this moment +}; diff --git a/lnet/klnds/iblnd/ibnal_send_recv_self_testing.c b/lnet/klnds/iblnd/ibnal_send_recv_self_testing.c new file mode 100644 index 0000000..82defdb --- /dev/null +++ b/lnet/klnds/iblnd/ibnal_send_recv_self_testing.c @@ -0,0 +1,116 @@ +/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- + * vim:expandtab:shiftwidth=8:tabstop=8: + * * + * * Based on ksocknal, qswnal, and gmnal + * * + * * Copyright (C) 2003 LANL + * * Author: HB Chen + * * Los Alamos National Lab + * * + * * Portals is free software; you can redistribute it and/or + * * modify it under the terms of version 2 of the GNU General Public + * * License as published by the Free Software Foundation. + * * + * * Portals is distributed in the hope that it will be useful, + * * but WITHOUT ANY WARRANTY; without even the implied warranty of + * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * * GNU General Public License for more details. + * * + * * You should have received a copy of the GNU General Public License + * * along with Portals; if not, write to the Free Software + * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * * + * */ + +#include "ibnal.h" + + + +VAPI_ret_t ibnal_send_recv_self_testing() +{ + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + QP_info *qp; + VAPI_wr_id_t send_id; + int buf_id; + char sbuf[KB_32]; + char rbuf[KB_32]; + int i; + int buf_length = KB_32; + VAPI_wc_desc_t comp_desc; + int num_send = 1; + int loop_count = 0; + + + printk("ibnal_send_recv_self_testing\n"); + + memset(&sbuf, 'a', KB_32); + memset(&rbuf, ' ', KB_32); + + send_id = 2222; + buf_id = 0; + + qp = &QP_list[0]; + + sr_desc.opcode = VAPI_SEND; + sr_desc.comp_type = VAPI_SIGNALED; + + // scatter and gather info + sr_sg.len = KB_32; + sr_sg.lkey = MSbuf_list[buf_id].mr.l_key; // use send MR + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[buf_id].buf_addr; + + // copy data to register send buffer + memcpy(&sr_sg.addr, &buf, buf_length); + + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + + // call VAPI_post_sr to send out this data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + printk("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat)); + } + + printk("VAPI_post_sr success.\n"); + + // poll for completion + + while( loop_count < 100 ){ + vstat = VAPI_poll_cq(qp->hca_hndl, qp->cq_hndl, &comp_desc); + if( vstat == VAPI_OK ) { + if(comp_desc.opcode == VAPI_CQE_SQ_SEND_DATA ) { + /* SEND completion */ + printk("received SQ completion\n"); + } + else { + if(comp_desc.opcode == VAPI_CQE_RQ_SEND_DATA ) { + /* RECEIVE completion */ + printk("received RQ completion\n"); + memcpy(&rbuf, (char *) MRbuf_list[buf_id].buf_addar, KB_32); + + int n; + + n = memcmp($sbuf, &rbuf, KB_32); + printk("compare sbuf and rbuf n = %d\n", n); + + } + else { + printk("unexpected completion opcode %d \n", comp_desc.opcode); + } + } + } + + loop_count++; + schedule_timeout(500); + } + + printk("end of ibnal_self_send_recv_testing\n"); + + +} diff --git a/lnet/klnds/iblnd/uagent.c b/lnet/klnds/iblnd/uagent.c new file mode 100644 index 0000000..d7e939a --- /dev/null +++ b/lnet/klnds/iblnd/uagent.c @@ -0,0 +1,391 @@ +#include +#include +#include + + +#include +#include +#include +#include + +#include +#include +#include +#include + +// Infiniband VAPI/EVAPI header files Mellanox MT23108 VAPI +#include +#include +#include +#include + +// Remote HCA Info information + typedef struct Remote_HCA_Info { + unsigned long opcode; + unsigned long length; + IB_lid_t dlid[256]; + VAPI_qp_num_t rqp_num[256]; + VAPI_rkey_t rkey; // for remote RDAM request + unsigned long vaddr1; // virtual address fisrt 4 bytes + unsigned long vaddr2; // virtual address second 4 bytes + u_int32_t size; // size of RDMA memory buffer + char dest_ip[256]; //destination server IP address + } Remote_HCA_Info; + +#define SHARED_SEGMENT_SIZE 0x10000 // 16KB shared memory between U and K + +// some internals opcodes for IB operations used in IBNAL +#define SEND_QP_INFO 0X00000001 +#define RECV_QP_INFO 0X00000010 +#define DEFAULT_SOCKET_PORT 11211 +#define LISTEN_QUEUE_SIZE 2048 +#define DEST_IP "10.128.105.26" + +// server_thread +// + wait for an incoming connection from remote node +// + receive remote HCA's data +// +// +// +// +// +void *server_thread(void *vargp) +{ + Remote_HCA_Info *hca_data; + Remote_HCA_Info hca_data_buffer; + + int serverfd; + int infd; + struct hostent *hp; + struct sockaddr_in serveraddr; + struct sockaddr_in clientaddr; + int sin_size=sizeof(struct sockaddr_in); + int bytes_recv; + int i; + + + hca_data = (Remote_HCA_Info *) vargp; + + if((serverfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + printf("server_thread couldnot create a socket \n"); + pthread_exit((void *) 0); + } + + printf("server_thread create a socket \n"); + + bzero((char *) &serveraddr, sizeof(serveraddr)); + + serveraddr.sin_family = AF_INET; + serveraddr.sin_addr.s_addr = htons(INADDR_ANY); + serveraddr.sin_port = htons((unsigned short) DEFAULT_SOCKET_PORT); + + if(bind(serverfd,(struct sockaddr *)&serveraddr,sizeof(struct sockaddr)) < 0) { + printf("server_thread couldnot bind to a socket \n"); + pthread_exit((void *) 0); + } + + printf("server_thread bind to a socket \n"); + + if(listen(serverfd, LISTEN_QUEUE_SIZE) < 0) { + printf("server_thread couldnot listen to a socket \n"); + pthread_exit((void *) 0); + } + + printf("server_thread listen to a socket \n"); + + // + // I only expect to receive one HCA data from a remote HCA + // + printf("server_thread: Waiting for a connection\n"); + infd= accept(serverfd,(struct sockaddr*)&clientaddr,&sin_size); + printf("server_thread: Got an incoming connection"); + + /* receive data from socket into buffer */ + bytes_recv = recv(infd, + &hca_data_buffer, + sizeof(Remote_HCA_Info), + 0); + + if(bytes_recv > 0) { +/* + printf("server_thread receive data\n"); + printf("opcode is 0x%X\n", hca_data_buffer.opcode); + printf("length is 0x%X\n", hca_data_buffer.length); + + for(i=0; i < 256; i++) { + printf("dlid %d is 0x%X\n", i, hca_data_buffer.dlid[i]); + printf("rqp_num %d is 0x%X\n", hca_data_buffer.rqp_num[i]); + } + + printf("rkey is 0x%X\n", hca_data_buffer.rkey); + printf("vaddr1 is 0x%X\n", hca_data_buffer.vaddr1); + printf("vaddr2 is 0x%X\n", hca_data_buffer.vaddr2); + printf("size is 0x%X\n", hca_data_buffer.size); + printf("After conversion hton \n"); + printf("opcode is 0x%X\n", htonl(hca_data_buffer.opcode)); + printf("length is 0x%X\n", htonl(hca_data_buffer.length)); + + for(i=0; i < 256; i++) { + printf("dlid %d is 0x%X\n", htons(hca_data_buffer.dlid[i])); + printf("rqp_num %d is 0x%X\n", htonl(hca_data_buffer.rqp_num[i])); + } + + printf("rkey is 0x%X\n", htonl(hca_data_buffer.rkey)); + printf("vaddr1 is 0x%X\n", htonl(hca_data_buffer.vaddr1)); + printf("vaddr2 is 0x%X\n", htonl(hca_data_buffer.vaddr2)); + printf("size is 0x%X\n", htonl(hca_data_buffer.size)); +*/ + + hca_data->opcode = ntohl(hca_data_buffer.opcode); // long + hca_data->length = ntohl(hca_data_buffer.length); // long + + for(i=0; i < 256; i++) { + hca_data->dlid[i] = ntohs(hca_data_buffer.dlid[i]); // u_int16 + hca_data->rqp_num[i] = ntohl(hca_data_buffer.rqp_num[i]);// u_int32 + } + + hca_data->rkey = ntohl(hca_data_buffer.rkey); // u_int32 + hca_data->vaddr1 = ntohl(hca_data_buffer.vaddr1); // first word u_int32 + hca_data->vaddr2 = ntohl(hca_data_buffer.vaddr2); // second word u_int32 + hca_data->size = ntohl(hca_data_buffer.size); // u_int32 + } + else { + printf("server_thread receive ERROR bytes_recv = %d\n", bytes_recv); + } + + close(infd); + close(serverfd); + + printf("server_thread EXIT \n"); + + pthread_exit((void *) 0); + +} + +// +// client_thread +// + connect to a remote server_thread +// + send local HCA's data to remote server_thread +// +void *client_thread(void *vargp) +{ + + Remote_HCA_Info *hca_data; + Remote_HCA_Info hca_data_buffer; + + int clientfd; + struct hostent *hp; + struct sockaddr_in clientaddr; + int bytes_send; + int i; + + hca_data = (Remote_HCA_Info *) vargp; + + if((clientfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + printf("client_thread couldnot create a socket \n"); + pthread_exit((void *) 0); + } + + printf("client_thread create a socket \n"); + + bzero((char *) &clientaddr, sizeof(clientaddr)); + + clientaddr.sin_family = AF_INET; + clientaddr.sin_addr.s_addr = inet_addr(hca_data->dest_ip); + printf("client_thread get server Ip address = %s\n", hca_data->dest_ip); + clientaddr.sin_port = htons((unsigned short) DEFAULT_SOCKET_PORT); + memset(&(clientaddr.sin_zero), '\0', 8); + + connect(clientfd, (struct sockaddr *) &clientaddr, sizeof(struct sockaddr)); + + printf("client_thread connect to server Ip address = %s\n", hca_data->dest_ip); + + hca_data_buffer.opcode = htonl(hca_data->opcode); // long + hca_data_buffer.length = htonl(hca_data->length); // long + + for(i=0; i < 256; i++) { + hca_data_buffer.dlid[i] = htons(hca_data->dlid[i]); // u_int16 + hca_data_buffer.rqp_num[i] = htonl(hca_data->rqp_num[i]);// u_int32 + } + + hca_data_buffer.rkey = htonl(hca_data->rkey); // u_int32 + hca_data_buffer.vaddr1 = htonl(hca_data->vaddr1); // first word u_int32 + hca_data_buffer.vaddr2 = htonl(hca_data->vaddr2); // second word u_int32 + hca_data_buffer.size = htonl(hca_data->size); // u_int32 + + bytes_send = send(clientfd, & hca_data_buffer, sizeof(Remote_HCA_Info), 0); + + if(bytes_send == sizeof(Remote_HCA_Info)) { + printf("client_thread: send successfully \n"); + } + else { + printf("client_thread: send failed \n"); + } + + printf("client_thread EXIT \n"); + + pthread_exit((void *) 0); +} + + +// +// main +// + create a shared-memory between this main()/user address and +// a kernel thread/kernel address space associated with inbal +// kernel module +// + access local HCA's data through this shared memory +// +// + create a server_thread for receiving remote HCA's data +// + create a client_thread for sending out local HCA's data +// + after receiving remote HCA's data update this shared memory +// +int main(int argc , char *argv[]) +{ + int segment_id; + struct shmid_ds shmbuffer; + int segment_size; + const int shared_segment_size = sizeof(Remote_HCA_Info); + key_t key = 999; + unsigned long raddr; + Remote_HCA_Info *shared_memory; + Remote_HCA_Info exchange_hca_data; + Remote_HCA_Info remote_hca_data; + int i; + + /* pthread */ + pthread_t sid; + pthread_t cid; + pthread_attr_t attr; + int rc, status; + + char dest_ip[256]; + + if(argc != 2) { + printf("USAGE: uagent server_ip_address\n"); + printf("argc = %d \n", argc); + exit(1); + } + + strcpy(&exchange_hca_data.dest_ip[0], argv[1]); + printf("the destinational server IP address = %s\n", + &exchange_hca_data.dest_ip); + + segment_id = shmget(key, shared_segment_size, IPC_CREAT | 0666); + + printf("sys_shmget is done segment_id = %d\n", segment_id); + + shared_memory = (Remote_HCA_Info *) shmat(segment_id, 0, 0); + + if(shared_memory == (char *) -1) { + printf("Shared memory attach failed shared_memory=%p\n",shared_memory); + exit(0); + } + + printf("shared menory attached at address %p\n", shared_memory); + + while (1) { + if(shared_memory->opcode == SEND_QP_INFO) { + printf("Local HCA data received from kernel thread\n"); + break; + } + usleep(1000); + continue; + } + + printf("Local HCA data received from kernel thread\n"); + + // save local HCA's data in exchange_hca_data + // + exchange_hca_data.opcode = shared_memory->opcode; + exchange_hca_data.length = shared_memory->length; + + for(i=0; i < 256; i++) { + exchange_hca_data.dlid[i] = shared_memory->dlid[i]; + exchange_hca_data.rqp_num[i] = shared_memory->rqp_num[i]; + } + + exchange_hca_data.rkey = shared_memory->rkey; + exchange_hca_data.vaddr1 = shared_memory->vaddr1; + exchange_hca_data.vaddr2 = shared_memory->vaddr2; + exchange_hca_data.size = shared_memory->size; + + /* Initialize and set thread detached attribute */ + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); + + /* create a server thread for procsssing incoming remote node socket data */ + // + pthread_create(&sid, + &attr, + server_thread, + (Remote_HCA_Info *) &remote_hca_data); + + printf("Main: created a server thread \n"); + + sleep(10); + + /* create a clint thread to send out local HCA data to remote node */ + pthread_create(&cid, + &attr, + client_thread, + (Remote_HCA_Info *) &exchange_hca_data); + + printf("Main: created a client thread \n"); + + /* synchronization between server_thread and client_thread */ + pthread_attr_destroy(&attr); + + rc = pthread_join(sid, (void **) &status); + if(rc) { + printf("Error: return code from pthread_join() is %d\n", rc); + exit(-1); + } + + printf("completed join with thread %d status = %d\n", sid, status); + + rc = pthread_join(cid, (void **) &status); + if(rc) { + printf("Error: return code from pthread_join() is %d\n", rc); + exit(-1); + } + printf("completed join with thread %d status = %d\n", cid, status); + + // update shared memory with remote HCA's data + + shared_memory->opcode = RECV_QP_INFO; + shared_memory->length = remote_hca_data.length; + for(i=0; i < 256; i++) { + shared_memory->dlid[i] = remote_hca_data.dlid[i]; + shared_memory->rqp_num[i]= remote_hca_data.rqp_num[i]; + } + shared_memory->rkey = remote_hca_data.rkey; + shared_memory->vaddr1 = remote_hca_data.vaddr1; + shared_memory->vaddr2 = remote_hca_data.vaddr2; + shared_memory->size = remote_hca_data.size; + + sleep(5); + + shared_memory->opcode = RECV_QP_INFO; + shared_memory->length = remote_hca_data.length; + for(i=0; i < 256; i++) { + shared_memory->dlid[i] = remote_hca_data.dlid[i]; + shared_memory->rqp_num[i]= remote_hca_data.rqp_num[i]; + } + + shared_memory->rkey = remote_hca_data.rkey; + shared_memory->vaddr1 = remote_hca_data.vaddr1; + shared_memory->vaddr2 = remote_hca_data.vaddr2; + shared_memory->size = remote_hca_data.size; + + sleep(10); + +// shmdt(shared_memory); + + printf("uagent is DONE \n"); + + + + exit(0); + +} + diff --git a/lnet/klnds/qswlnd/qswlnd.c b/lnet/klnds/qswlnd/qswlnd.c index 4472e30..9caf381 100644 --- a/lnet/klnds/qswlnd/qswlnd.c +++ b/lnet/klnds/qswlnd/qswlnd.c @@ -131,7 +131,7 @@ kqswnal_get_tx_desc (struct portals_cfg *pcfg) pcfg->pcfg_pbuf1 = (char *)ktx; pcfg->pcfg_count = NTOH__u32(ktx->ktx_wire_hdr->type); - pcfg->pcfg_size = NTOH__u32(PTL_HDR_LENGTH(ktx->ktx_wire_hdr)); + pcfg->pcfg_size = NTOH__u32(ktx->ktx_wire_hdr->payload_length); pcfg->pcfg_nid = NTOH__u64(ktx->ktx_wire_hdr->dest_nid); pcfg->pcfg_nid2 = ktx->ktx_nid; pcfg->pcfg_misc = ktx->ktx_launcher; diff --git a/lnet/klnds/qswlnd/qswlnd_cb.c b/lnet/klnds/qswlnd/qswlnd_cb.c index 006ea49..43926c9 100644 --- a/lnet/klnds/qswlnd/qswlnd_cb.c +++ b/lnet/klnds/qswlnd/qswlnd_cb.c @@ -542,8 +542,9 @@ kqswnal_cerror_hdr(ptl_hdr_t * hdr) { char *type_str = hdr_type_string (hdr); - CERROR("P3 Header at %p of type %s\n", hdr, type_str); - CERROR(" From nid/pid "LPU64"/%u", NTOH__u64(hdr->src_nid), + CERROR("P3 Header at %p of type %s length %d\n", hdr, type_str, + NTOH__u32(hdr->payload_length)); + CERROR(" From nid/pid "LPU64"/%u\n", NTOH__u64(hdr->src_nid), NTOH__u32(hdr->src_pid)); CERROR(" To nid/pid "LPU64"/%u\n", NTOH__u64(hdr->dest_nid), NTOH__u32(hdr->dest_pid)); @@ -556,8 +557,7 @@ kqswnal_cerror_hdr(ptl_hdr_t * hdr) hdr->msg.put.ack_wmd.wh_interface_cookie, hdr->msg.put.ack_wmd.wh_object_cookie, NTOH__u64 (hdr->msg.put.match_bits)); - CERROR(" Length %d, offset %d, hdr data "LPX64"\n", - NTOH__u32(PTL_HDR_LENGTH(hdr)), + CERROR(" offset %d, hdr data "LPX64"\n", NTOH__u32(hdr->msg.put.offset), hdr->msg.put.hdr_data); break; @@ -582,10 +582,9 @@ kqswnal_cerror_hdr(ptl_hdr_t * hdr) break; case PTL_MSG_REPLY: - CERROR(" dst md "LPX64"."LPX64", length %d\n", + CERROR(" dst md "LPX64"."LPX64"\n", hdr->msg.reply.dst_wmd.wh_interface_cookie, - hdr->msg.reply.dst_wmd.wh_object_cookie, - NTOH__u32 (PTL_HDR_LENGTH(hdr))); + hdr->msg.reply.dst_wmd.wh_object_cookie); } } /* end of print_hdr() */ diff --git a/lnet/klnds/scimaclnd/scimacnal.c b/lnet/klnds/scimaclnd/scimacnal.c index f3fe617..5ffba31 100644 --- a/lnet/klnds/scimaclnd/scimacnal.c +++ b/lnet/klnds/scimaclnd/scimacnal.c @@ -123,7 +123,7 @@ static nal_t *kscimacnal_init(int interface, ptl_pt_index_t ptl_size, { int nnids = 512; /* FIXME: Need ScaMac funktion to get #nodes */ - CDEBUG(D_NET, "calling lib_init with nid 0x%Lx nnids %d\n", kscimacnal_data.ksci_nid, nnids); + CDEBUG(D_NET, "calling lib_init with nid "LPX64" nnids %d\n", kscimacnal_data.ksci_nid, nnids); lib_init(&kscimacnal_lib, kscimacnal_data.ksci_nid, 0, nnids,ptl_size, ac_size); return &kscimacnal_api; } diff --git a/lnet/klnds/scimaclnd/scimacnal_cb.c b/lnet/klnds/scimaclnd/scimacnal_cb.c index 7d5796e..b31c2ea 100644 --- a/lnet/klnds/scimaclnd/scimacnal_cb.c +++ b/lnet/klnds/scimaclnd/scimacnal_cb.c @@ -208,7 +208,7 @@ kscimacnal_sendmsg(nal_cb_t *nal, unsigned long physaddr; - CDEBUG(D_NET, "sending %d bytes from %p/%p to nid 0x%Lx niov: %d\n", + CDEBUG(D_NET, "sending %d bytes from %p/%p to nid "LPX64" niov: %d\n", payload_len, payload_iov, payload_kiov, nid, payload_niov); /* Basic sanity checks */ diff --git a/lnet/klnds/socklnd/socklnd.c b/lnet/klnds/socklnd/socklnd.c index 3d0c758..da47785 100644 --- a/lnet/klnds/socklnd/socklnd.c +++ b/lnet/klnds/socklnd/socklnd.c @@ -45,6 +45,8 @@ kpr_nal_interface_t ksocknal_router_interface = { #define SOCKNAL_SYSCTL_TIMEOUT 1 #define SOCKNAL_SYSCTL_EAGER_ACK 2 #define SOCKNAL_SYSCTL_ZERO_COPY 3 +#define SOCKNAL_SYSCTL_TYPED 4 +#define SOCKNAL_SYSCTL_MIN_BULK 5 static ctl_table ksocknal_ctl_table[] = { {SOCKNAL_SYSCTL_TIMEOUT, "timeout", @@ -58,6 +60,12 @@ static ctl_table ksocknal_ctl_table[] = { &ksocknal_data.ksnd_zc_min_frag, sizeof (int), 0644, NULL, &proc_dointvec}, #endif + {SOCKNAL_SYSCTL_TYPED, "typed", + &ksocknal_data.ksnd_typed_conns, sizeof (int), + 0644, NULL, &proc_dointvec}, + {SOCKNAL_SYSCTL_MIN_BULK, "min_bulk", + &ksocknal_data.ksnd_min_bulk, sizeof (int), + 0644, NULL, &proc_dointvec}, { 0 } }; @@ -86,7 +94,7 @@ ksocknal_api_shutdown(nal_t *nal, int ni) CDEBUG (D_NET, "closing all connections\n"); ksocknal_del_route (PTL_NID_ANY, 0, 0, 0); - ksocknal_close_conn (PTL_NID_ANY, 0); + ksocknal_close_matching_conns (PTL_NID_ANY, 0); return PTL_OK; } @@ -198,7 +206,7 @@ ksocknal_bind_irq (unsigned int irq) ksock_route_t * ksocknal_create_route (__u32 ipaddr, int port, int buffer_size, - int nonagel, int xchange_nids, int irq_affinity, int eager) + int nonagel, int irq_affinity, int eager) { ksock_route_t *route; @@ -215,13 +223,12 @@ ksocknal_create_route (__u32 ipaddr, int port, int buffer_size, route->ksnr_port = port; route->ksnr_buffer_size = buffer_size; route->ksnr_irq_affinity = irq_affinity; - route->ksnr_xchange_nids = xchange_nids; route->ksnr_nonagel = nonagel; route->ksnr_eager = eager; route->ksnr_connecting = 0; + route->ksnr_connected = 0; route->ksnr_deleted = 0; - route->ksnr_generation = 0; - route->ksnr_conn = NULL; + route->ksnr_conn_count = 0; return (route); } @@ -230,7 +237,6 @@ void ksocknal_destroy_route (ksock_route_t *route) { LASSERT (route->ksnr_sharecount == 0); - LASSERT (route->ksnr_conn == NULL); if (route->ksnr_peer != NULL) ksocknal_put_peer (route->ksnr_peer); @@ -397,8 +403,7 @@ ksocknal_get_route_by_idx (int index) int ksocknal_add_route (ptl_nid_t nid, __u32 ipaddr, int port, int bufnob, - int nonagle, int xchange_nids, int bind_irq, - int share, int eager) + int nonagle, int bind_irq, int share, int eager) { unsigned long flags; ksock_peer_t *peer; @@ -415,8 +420,8 @@ ksocknal_add_route (ptl_nid_t nid, __u32 ipaddr, int port, int bufnob, if (peer == NULL) return (-ENOMEM); - route = ksocknal_create_route (ipaddr, port, bufnob, nonagle, - xchange_nids, bind_irq, eager); + route = ksocknal_create_route (ipaddr, port, bufnob, + nonagle, bind_irq, eager); if (route == NULL) { ksocknal_put_peer (peer); return (-ENOMEM); @@ -455,7 +460,7 @@ ksocknal_add_route (ptl_nid_t nid, __u32 ipaddr, int port, int bufnob, route->ksnr_peer = peer; atomic_inc (&peer->ksnp_refcount); /* peer's route list takes existing ref on route */ - list_add (&route->ksnr_list, &peer->ksnp_routes); + list_add_tail (&route->ksnr_list, &peer->ksnp_routes); } route->ksnr_sharecount++; @@ -468,8 +473,10 @@ ksocknal_add_route (ptl_nid_t nid, __u32 ipaddr, int port, int bufnob, void ksocknal_del_route_locked (ksock_route_t *route, int share, int keep_conn) { - ksock_peer_t *peer = route->ksnr_peer; - ksock_conn_t *conn = route->ksnr_conn; + ksock_peer_t *peer = route->ksnr_peer; + ksock_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; if (!share) route->ksnr_sharecount = 0; @@ -479,18 +486,22 @@ ksocknal_del_route_locked (ksock_route_t *route, int share, int keep_conn) return; } - if (conn != NULL) { - if (!keep_conn) + list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { + conn = list_entry(ctmp, ksock_conn_t, ksnc_list); + + if (conn->ksnc_route != route) + continue; + + if (!keep_conn) { ksocknal_close_conn_locked (conn, 0); - else { - /* keeping the conn; just dissociate it and route... */ - conn->ksnc_route = NULL; - route->ksnr_conn = NULL; - ksocknal_put_route (route); /* drop conn's ref on route */ - ksocknal_put_conn (conn); /* drop route's ref on conn */ + continue; } + + /* keeping the conn; just dissociate it and route... */ + conn->ksnc_route = NULL; + ksocknal_put_route (route); /* drop conn's ref on route */ } - + route->ksnr_deleted = 1; list_del (&route->ksnr_list); ksocknal_put_route (route); /* drop peer's ref */ @@ -669,9 +680,11 @@ ksocknal_choose_scheduler_locked (unsigned int irq) } int -ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, - struct socket *sock, int bind_irq) +ksocknal_create_conn (ksock_route_t *route, struct socket *sock, + int bind_irq, int type) { + ptl_nid_t nid; + __u64 incarnation; unsigned long flags; ksock_conn_t *conn; ksock_peer_t *peer; @@ -692,6 +705,19 @@ ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, if (rc != 0) return (rc); + if (route == NULL) { + /* acceptor or explicit connect */ + nid = PTL_NID_ANY; + } else { + LASSERT (type != SOCKNAL_CONN_NONE); + /* autoconnect: expect this nid on exchange */ + nid = route->ksnr_peer->ksnp_nid; + } + + rc = ksocknal_hello (sock, &nid, &type, &incarnation); + if (rc != 0) + return (rc); + peer = NULL; if (route == NULL) { /* not autoconnect */ /* Assume this socket connects to a brand new peer */ @@ -711,6 +737,8 @@ ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, conn->ksnc_peer = NULL; conn->ksnc_route = NULL; conn->ksnc_sock = sock; + conn->ksnc_type = type; + conn->ksnc_incarnation = incarnation; conn->ksnc_saved_data_ready = sock->sk->sk_data_ready; conn->ksnc_saved_write_space = sock->sk->sk_write_space; atomic_set (&conn->ksnc_refcount, 1); /* 1 ref for me */ @@ -732,7 +760,8 @@ ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, if (route != NULL) { /* Autoconnected! */ - LASSERT (route->ksnr_conn == NULL && route->ksnr_connecting); + LASSERT ((route->ksnr_connected & (1 << type)) == 0); + LASSERT ((route->ksnr_connecting & (1 << type)) != 0); if (route->ksnr_deleted) { /* This conn was autoconnected, but the autoconnect @@ -745,14 +774,13 @@ ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, } - /* associate conn/route for auto-reconnect */ - route->ksnr_conn = conn; - atomic_inc (&conn->ksnc_refcount); + /* associate conn/route */ conn->ksnc_route = route; atomic_inc (&route->ksnr_refcount); - route->ksnr_connecting = 0; - route->ksnr_generation++; + route->ksnr_connecting &= ~(1 << type); + route->ksnr_connected |= (1 << type); + route->ksnr_conn_count++; route->ksnr_retry_interval = SOCKNAL_MIN_RECONNECT_INTERVAL; peer = route->ksnr_peer; @@ -803,8 +831,13 @@ ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, ksocknal_queue_tx_locked (tx, conn); } + rc = ksocknal_close_stale_conns_locked (peer, incarnation); + write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); + if (rc != 0) + CERROR ("Closed %d stale conns to "LPX64"\n", rc, nid); + if (bind_irq) /* irq binding required */ ksocknal_bind_irq (irq); @@ -836,14 +869,17 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) route = conn->ksnc_route; if (route != NULL) { /* dissociate conn from route... */ - LASSERT (!route->ksnr_connecting && - !route->ksnr_deleted); + LASSERT (!route->ksnr_deleted); + LASSERT ((route->ksnr_connecting & (1 << conn->ksnc_type)) == 0); + LASSERT ((route->ksnr_connected & (1 << conn->ksnc_type)) != 0); - route->ksnr_conn = NULL; + route->ksnr_connected &= ~(1 << conn->ksnc_type); conn->ksnc_route = NULL; + list_del (&route->ksnr_list); /* make route least favourite */ + list_add_tail (&route->ksnr_list, &peer->ksnp_routes); + ksocknal_put_route (route); /* drop conn's ref on route */ - ksocknal_put_conn (conn); /* drop route's ref on conn */ } /* ksnd_deathrow_conns takes over peer's ref */ @@ -869,24 +905,6 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) spin_unlock (&ksocknal_data.ksnd_reaper_lock); } -int -ksocknal_close_conn_unlocked (ksock_conn_t *conn, int why) -{ - unsigned long flags; - int did_it = 0; - - write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags); - - if (!conn->ksnc_closing) { - did_it = 1; - ksocknal_close_conn_locked (conn, why); - } - - write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); - - return (did_it); -} - void ksocknal_terminate_conn (ksock_conn_t *conn) { @@ -958,9 +976,10 @@ ksocknal_destroy_conn (ksock_conn_t *conn) ksock_tx_t *tx = list_entry (conn->ksnc_tx_queue.next, ksock_tx_t, tx_list); - CERROR ("Deleting packet type %d len %d ("LPX64"->"LPX64")\n", + CERROR ("Deleting packet %p type %d len %d ("LPX64"->"LPX64")\n", + tx, NTOH__u32 (tx->tx_hdr->type), - NTOH__u32 (PTL_HDR_LENGTH(tx->tx_hdr)), + NTOH__u32 (tx->tx_hdr->payload_length), NTOH__u64 (tx->tx_hdr->src_nid), NTOH__u64 (tx->tx_hdr->dest_nid)); @@ -1012,19 +1031,75 @@ ksocknal_put_conn (ksock_conn_t *conn) } int -ksocknal_close_conn (ptl_nid_t nid, __u32 ipaddr) +ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why) +{ + ksock_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + int count = 0; + + list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { + conn = list_entry (ctmp, ksock_conn_t, ksnc_list); + + if (ipaddr == 0 || + conn->ksnc_ipaddr == ipaddr) { + count++; + ksocknal_close_conn_locked (conn, why); + } + } + + return (count); +} + +int +ksocknal_close_stale_conns_locked (ksock_peer_t *peer, __u64 incarnation) { - unsigned long flags; ksock_conn_t *conn; struct list_head *ctmp; struct list_head *cnxt; + int count = 0; + + list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { + conn = list_entry (ctmp, ksock_conn_t, ksnc_list); + + if (conn->ksnc_incarnation == incarnation) + continue; + + count++; + ksocknal_close_conn_locked (conn, -ESTALE); + } + + return (count); +} + +int +ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why) +{ + ksock_peer_t *peer = conn->ksnc_peer; + __u32 ipaddr = conn->ksnc_ipaddr; + unsigned long flags; + int count; + + write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags); + + count = ksocknal_close_peer_conns_locked (peer, ipaddr, why); + + write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); + + return (count); +} + +int +ksocknal_close_matching_conns (ptl_nid_t nid, __u32 ipaddr) +{ + unsigned long flags; ksock_peer_t *peer; struct list_head *ptmp; struct list_head *pnxt; int lo; int hi; int i; - int rc = -ENOENT; + int count = 0; write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags); @@ -1043,24 +1118,17 @@ ksocknal_close_conn (ptl_nid_t nid, __u32 ipaddr) if (!(nid == PTL_NID_ANY || nid == peer->ksnp_nid)) continue; - list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { - - conn = list_entry (ctmp, ksock_conn_t, - ksnc_list); - - if (!(ipaddr == 0 || - conn->ksnc_ipaddr == ipaddr)) - continue; - - rc = 0; - ksocknal_close_conn_locked (conn, 0); - } + count += ksocknal_close_peer_conns_locked (peer, ipaddr, 0); } } write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); - return (rc); + /* wildcards always succeed */ + if (nid == PTL_NID_ANY || ipaddr == 0) + return (0); + + return (count == 0 ? -ENOENT : 0); } void @@ -1073,7 +1141,7 @@ ksocknal_notify (void *arg, ptl_nid_t gw_nid, int alive) if (!alive) { /* If the gateway crashed, close all open connections... */ - ksocknal_close_conn (gw_nid, 0); + ksocknal_close_matching_conns (gw_nid, 0); return; } @@ -1233,13 +1301,12 @@ ksocknal_cmd(struct portals_cfg *pcfg, void * private) pcfg->pcfg_nid = route->ksnr_peer->ksnp_nid; pcfg->pcfg_id = route->ksnr_ipaddr; pcfg->pcfg_misc = route->ksnr_port; - pcfg->pcfg_count = route->ksnr_generation; + pcfg->pcfg_count = route->ksnr_conn_count; pcfg->pcfg_size = route->ksnr_buffer_size; pcfg->pcfg_wait = route->ksnr_sharecount; pcfg->pcfg_flags = (route->ksnr_nonagel ? 1 : 0) | - (route->ksnr_xchange_nids ? 2 : 0) | - (route->ksnr_irq_affinity ? 4 : 0) | - (route->ksnr_eager ? 8 : 0); + (route->ksnr_irq_affinity ? 2 : 0) | + (route->ksnr_eager ? 4 : 0); ksocknal_put_route (route); } break; @@ -1250,8 +1317,7 @@ ksocknal_cmd(struct portals_cfg *pcfg, void * private) (pcfg->pcfg_flags & 0x01) != 0, (pcfg->pcfg_flags & 0x02) != 0, (pcfg->pcfg_flags & 0x04) != 0, - (pcfg->pcfg_flags & 0x08) != 0, - (pcfg->pcfg_flags & 0x10) != 0); + (pcfg->pcfg_flags & 0x08) != 0); break; } case NAL_CMD_DEL_AUTOCONN: { @@ -1267,26 +1333,38 @@ ksocknal_cmd(struct portals_cfg *pcfg, void * private) rc = -ENOENT; else { rc = 0; - pcfg->pcfg_nid = conn->ksnc_peer->ksnp_nid; - pcfg->pcfg_id = conn->ksnc_ipaddr; - pcfg->pcfg_misc = conn->ksnc_port; + pcfg->pcfg_nid = conn->ksnc_peer->ksnp_nid; + pcfg->pcfg_id = conn->ksnc_ipaddr; + pcfg->pcfg_misc = conn->ksnc_port; + pcfg->pcfg_flags = conn->ksnc_type; ksocknal_put_conn (conn); } break; } case NAL_CMD_REGISTER_PEER_FD: { struct socket *sock = sockfd_lookup (pcfg->pcfg_fd, &rc); + int type = pcfg->pcfg_misc; - if (sock != NULL) { - rc = ksocknal_create_conn (pcfg->pcfg_nid, NULL, - sock, pcfg->pcfg_flags); - if (rc != 0) - fput (sock->file); + if (sock == NULL) + break; + + switch (type) { + case SOCKNAL_CONN_NONE: + case SOCKNAL_CONN_ANY: + case SOCKNAL_CONN_CONTROL: + case SOCKNAL_CONN_BULK_IN: + case SOCKNAL_CONN_BULK_OUT: + rc = ksocknal_create_conn(NULL, sock, pcfg->pcfg_flags, type); + default: + break; } + if (rc != 0) + fput (sock->file); break; } case NAL_CMD_CLOSE_CONNECTION: { - rc = ksocknal_close_conn (pcfg->pcfg_nid, pcfg->pcfg_id); + rc = ksocknal_close_matching_conns (pcfg->pcfg_nid, + pcfg->pcfg_id); break; } case NAL_CMD_REGISTER_MYNID: { @@ -1337,7 +1415,7 @@ ksocknal_free_buffers (void) ksocknal_data.ksnd_peer_hash_size); } -void /*__exit*/ +void ksocknal_module_fini (void) { int i; @@ -1421,6 +1499,22 @@ ksocknal_module_fini (void) } +void __init +ksocknal_init_incarnation (void) +{ + struct timeval tv; + + /* The incarnation number is the time this module loaded and it + * identifies this particular instance of the socknal. Hopefully + * we won't be able to reboot more frequently than 1MHz for the + * forseeable future :) */ + + do_gettimeofday(&tv); + + ksocknal_data.ksnd_incarnation = + (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec; +} + int __init ksocknal_module_init (void) { @@ -1434,7 +1528,9 @@ ksocknal_module_init (void) /* the following must be sizeof(int) for proc_dointvec() */ LASSERT(sizeof (ksocknal_data.ksnd_io_timeout) == sizeof (int)); LASSERT(sizeof (ksocknal_data.ksnd_eager_ack) == sizeof (int)); - + /* check ksnr_connected/connecting field large enough */ + LASSERT(SOCKNAL_CONN_NTYPES <= 4); + LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); ksocknal_api.forward = ksocknal_api_forward; @@ -1451,10 +1547,13 @@ ksocknal_module_init (void) ksocknal_data.ksnd_io_timeout = SOCKNAL_IO_TIMEOUT; ksocknal_data.ksnd_eager_ack = SOCKNAL_EAGER_ACK; + ksocknal_data.ksnd_typed_conns = SOCKNAL_TYPED_CONNS; + ksocknal_data.ksnd_min_bulk = SOCKNAL_MIN_BULK; #if SOCKNAL_ZC ksocknal_data.ksnd_zc_min_frag = SOCKNAL_ZC_MIN_FRAG; #endif - + ksocknal_init_incarnation(); + ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE; PORTAL_ALLOC (ksocknal_data.ksnd_peers, sizeof (struct list_head) * ksocknal_data.ksnd_peer_hash_size); diff --git a/lnet/klnds/socklnd/socklnd.h b/lnet/klnds/socklnd/socklnd.h index 1c73ae8..8c906e2 100644 --- a/lnet/klnds/socklnd/socklnd.h +++ b/lnet/klnds/socklnd/socklnd.h @@ -25,7 +25,9 @@ */ #define DEBUG_PORTAL_ALLOC -#define EXPORT_SYMTAB +#ifndef EXPORT_SYMTAB +# define EXPORT_SYMTAB +#endif #include #include @@ -58,6 +60,7 @@ #include #include #include +#include #if CONFIG_SMP # define SOCKNAL_N_SCHED num_online_cpus() /* # socknal schedulers */ @@ -71,9 +74,10 @@ /* default vals for runtime tunables */ #define SOCKNAL_IO_TIMEOUT 50 /* default comms timeout (seconds) */ -#define SOCKNAL_EAGER_ACK 1 /* default eager ack (boolean) */ +#define SOCKNAL_EAGER_ACK 0 /* default eager ack (boolean) */ +#define SOCKNAL_TYPED_CONNS 1 /* unidirectional large, bidirectional small? */ #define SOCKNAL_ZC_MIN_FRAG (2<<10) /* default smallest zerocopy fragment */ - +#define SOCKNAL_MIN_BULK (1<<10) /* smallest "large" message */ #define SOCKNAL_USE_KEEPALIVES 0 /* use tcp/ip keepalive? */ #define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */ @@ -142,10 +146,13 @@ typedef struct { int ksnd_init; /* initialisation state */ int ksnd_io_timeout; /* "stuck" socket timeout (seconds) */ int ksnd_eager_ack; /* make TCP ack eagerly? */ + int ksnd_typed_conns; /* drive sockets by type? */ + int ksnd_min_bulk; /* smallest "large" message */ #if SOCKNAL_ZC unsigned int ksnd_zc_min_frag; /* minimum zero copy frag size */ #endif struct ctl_table_header *ksnd_sysctl; /* sysctl interface */ + __u64 ksnd_incarnation; /* my epoch */ rwlock_t ksnd_global_lock; /* stabilize peer/conn ops */ struct list_head *ksnd_peers; /* hash table of all my known peers */ @@ -300,8 +307,10 @@ typedef struct ksock_conn __u32 ksnc_ipaddr; /* peer's IP */ int ksnc_port; /* peer's port */ int ksnc_closing; /* being shut down */ + int ksnc_type; /* type of connection */ + __u64 ksnc_incarnation; /* peer's incarnation */ - /* READER */ + /* reader */ struct list_head ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */ unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times out */ int ksnc_rx_started; /* started receiving a message */ @@ -327,6 +336,10 @@ typedef struct ksock_conn int ksnc_tx_scheduled; /* being progressed */ } ksock_conn_t; +#define KSNR_TYPED_ROUTES ((1 << SOCKNAL_CONN_CONTROL) | \ + (1 << SOCKNAL_CONN_BULK_IN) | \ + (1 << SOCKNAL_CONN_BULK_OUT)) + typedef struct ksock_route { struct list_head ksnr_list; /* chain on peer route list */ @@ -340,13 +353,12 @@ typedef struct ksock_route int ksnr_port; /* port to connect to */ int ksnr_buffer_size; /* size of socket buffers */ unsigned int ksnr_irq_affinity:1; /* set affinity? */ - unsigned int ksnr_xchange_nids:1; /* do hello protocol? */ unsigned int ksnr_nonagel:1; /* disable nagle? */ unsigned int ksnr_eager:1; /* connect eagery? */ - unsigned int ksnr_connecting:1; /* autoconnect in progress? */ + unsigned int ksnr_connecting:4; /* autoconnects in progress by type */ + unsigned int ksnr_connected:4; /* connections established by type */ unsigned int ksnr_deleted:1; /* been removed from peer? */ - int ksnr_generation; /* connection incarnation # */ - ksock_conn_t *ksnr_conn; /* NULL/active connection */ + int ksnr_conn_count; /* # conns established by this route */ } ksock_route_t; typedef struct ksock_peer @@ -401,14 +413,15 @@ extern ksock_peer_t *ksocknal_find_peer_locked (ptl_nid_t nid); extern ksock_peer_t *ksocknal_get_peer (ptl_nid_t nid); extern int ksocknal_del_route (ptl_nid_t nid, __u32 ipaddr, int single, int keep_conn); -extern int ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, - struct socket *sock, int bind_irq); +extern int ksocknal_create_conn (ksock_route_t *route, + struct socket *sock, int bind_irq, int type); extern void ksocknal_close_conn_locked (ksock_conn_t *conn, int why); -extern int ksocknal_close_conn_unlocked (ksock_conn_t *conn, int why); extern void ksocknal_terminate_conn (ksock_conn_t *conn); extern void ksocknal_destroy_conn (ksock_conn_t *conn); extern void ksocknal_put_conn (ksock_conn_t *conn); -extern int ksocknal_close_conn (ptl_nid_t nid, __u32 ipaddr); +extern int ksocknal_close_stale_conns_locked (ksock_peer_t *peer, __u64 incarnation); +extern int ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why); +extern int ksocknal_close_matching_conns (ptl_nid_t nid, __u32 ipaddr); extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn); extern void ksocknal_tx_done (ksock_tx_t *tx, int asynch); @@ -423,3 +436,5 @@ extern void ksocknal_write_space(struct sock *sk); extern int ksocknal_autoconnectd (void *arg); extern int ksocknal_reaper (void *arg); extern int ksocknal_setup_sock (struct socket *sock); +extern int ksocknal_hello (struct socket *sock, + ptl_nid_t *nid, int *type, __u64 *incarnation); diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index 8ce6777..346d60e 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -218,9 +218,9 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) struct iovec *iov = tx->tx_iov; int fragsize = iov->iov_len; unsigned long vaddr = (unsigned long)iov->iov_base; - int more = (!list_empty (&conn->ksnc_tx_queue)) | - (tx->tx_niov > 1) | - (tx->tx_nkiov > 1); + int more = (tx->tx_niov > 1) || + (tx->tx_nkiov > 0) || + (!list_empty (&conn->ksnc_tx_queue)); #if SOCKNAL_ZC int offset = vaddr & (PAGE_SIZE - 1); int zcsize = MIN (fragsize, PAGE_SIZE - offset); @@ -266,7 +266,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) .msg_flags = more ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT }; mm_segment_t oldmm = get_fs(); - + set_fs (KERNEL_DS); rc = sock_sendmsg(sock, &msg, fragsize); set_fs (oldmm); @@ -298,8 +298,8 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) int fragsize = kiov->kiov_len; struct page *page = kiov->kiov_page; int offset = kiov->kiov_offset; - int more = (!list_empty (&conn->ksnc_tx_queue)) | - (tx->tx_nkiov > 1); + int more = (tx->tx_nkiov > 1) || + (!list_empty (&conn->ksnc_tx_queue)); int rc; /* NB we can't trust socket ops to either consume our iovs @@ -464,7 +464,7 @@ ksocknal_recv_iov (ksock_conn_t *conn) * or leave them alone, so we only receive 1 frag at a time. */ LASSERT (conn->ksnc_rx_niov > 0); LASSERT (fragsize <= conn->ksnc_rx_nob_wanted); - + set_fs (KERNEL_DS); rc = sock_recvmsg (conn->ksnc_sock, &msg, fragsize, MSG_DONTWAIT); /* NB this is just a boolean............................^ */ @@ -521,7 +521,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn) LASSERT (fragsize <= conn->ksnc_rx_nob_wanted); LASSERT (conn->ksnc_rx_nkiov > 0); LASSERT (offset + fragsize <= PAGE_SIZE); - + set_fs (KERNEL_DS); rc = sock_recvmsg (conn->ksnc_sock, &msg, fragsize, MSG_DONTWAIT); /* NB this is just a boolean............................^ */ @@ -597,7 +597,7 @@ ksocknal_recvmsg (ksock_conn_t *conn) if (conn->ksnc_rx_nob_wanted == 0) { /* Completed a message segment (header or payload) */ - if (ksocknal_data.ksnd_eager_ack && + if ((ksocknal_data.ksnd_eager_ack & conn->ksnc_type) != 0 && (conn->ksnc_rx_state == SOCKNAL_RX_BODY || conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD)) { /* Remind the socket to ack eagerly... */ @@ -721,12 +721,12 @@ ksocknal_process_transmit (ksock_sched_t *sched, unsigned long *irq_flags) CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc); if (rc != 0) { - if (ksocknal_close_conn_unlocked (conn, rc)) { - /* I'm the first to close */ + if (!conn->ksnc_closing) CERROR ("[%p] Error %d on write to "LPX64" ip %08x:%d\n", conn, rc, conn->ksnc_peer->ksnp_nid, conn->ksnc_ipaddr, conn->ksnc_port); - } + ksocknal_close_conn_and_siblings (conn, rc); + ksocknal_tx_launched (tx); spin_lock_irqsave (&sched->kss_lock, *irq_flags); @@ -764,10 +764,17 @@ ksocknal_launch_autoconnect_locked (ksock_route_t *route) /* called holding write lock on ksnd_global_lock */ - LASSERT (route->ksnr_conn == NULL); - LASSERT (!route->ksnr_deleted && !route->ksnr_connecting); + LASSERT (!route->ksnr_deleted); + LASSERT ((route->ksnr_connected & (1 << SOCKNAL_CONN_ANY)) == 0); + LASSERT ((route->ksnr_connected & KSNR_TYPED_ROUTES) != KSNR_TYPED_ROUTES); + LASSERT (!route->ksnr_connecting); - route->ksnr_connecting = 1; + if (ksocknal_data.ksnd_typed_conns) + route->ksnr_connecting = + KSNR_TYPED_ROUTES & ~route->ksnr_connected; + else + route->ksnr_connecting = (1 << SOCKNAL_CONN_ANY); + atomic_inc (&route->ksnr_refcount); /* extra ref for asynchd */ spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags); @@ -814,21 +821,51 @@ ksock_conn_t * ksocknal_find_conn_locked (ksock_tx_t *tx, ksock_peer_t *peer) { struct list_head *tmp; - ksock_conn_t *conn = NULL; - + ksock_conn_t *typed = NULL; + int tnob = 0; + ksock_conn_t *fallback = NULL; + int fnob = 0; + /* Find the conn with the shortest tx queue */ list_for_each (tmp, &peer->ksnp_conns) { - ksock_conn_t *c = list_entry (tmp, ksock_conn_t, ksnc_list); + ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); + int nob = atomic_read(&c->ksnc_tx_nob); LASSERT (!c->ksnc_closing); - - if (conn == NULL || - atomic_read (&conn->ksnc_tx_nob) > - atomic_read (&c->ksnc_tx_nob)) - conn = c; + + if (fallback == NULL || nob < fnob) { + fallback = c; + fnob = nob; + } + + if (!ksocknal_data.ksnd_typed_conns) + continue; + + switch (c->ksnc_type) { + default: + LBUG(); + case SOCKNAL_CONN_ANY: + break; + case SOCKNAL_CONN_BULK_IN: + continue; + case SOCKNAL_CONN_BULK_OUT: + if (tx->tx_nob < ksocknal_data.ksnd_min_bulk) + continue; + break; + case SOCKNAL_CONN_CONTROL: + if (tx->tx_nob >= ksocknal_data.ksnd_min_bulk) + continue; + break; + } + + if (typed == NULL || nob < tnob) { + typed = c; + tnob = nob; + } } - return (conn); + /* prefer the typed selection */ + return ((typed != NULL) ? typed : fallback); } void @@ -870,22 +907,46 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) } ksock_route_t * -ksocknal_find_connectable_route_locked (ksock_peer_t *peer, int eager_only) +ksocknal_find_connectable_route_locked (ksock_peer_t *peer) { struct list_head *tmp; ksock_route_t *route; + ksock_route_t *candidate = NULL; + int found = 0; + int bits; list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); + bits = route->ksnr_connected; + + if ((bits & KSNR_TYPED_ROUTES) == KSNR_TYPED_ROUTES || + (bits & (1 << SOCKNAL_CONN_ANY)) != 0 || + route->ksnr_connecting != 0) { + /* All typed connections have been established, or + * an untyped connection has been established, or + * connections are currently being established */ + found = 1; + continue; + } + + /* too soon to retry this guy? */ + if (!time_after_eq (jiffies, route->ksnr_timeout)) + continue; - if (route->ksnr_conn == NULL && /* not connected */ - !route->ksnr_connecting && /* not connecting */ - (!eager_only || route->ksnr_eager) && /* wants to connect */ - time_after_eq (jiffies, route->ksnr_timeout)) /* OK to retry */ + /* always do eager routes */ + if (route->ksnr_eager) return (route); + + if (candidate == NULL) { + /* If we don't find any other route that is fully + * connected or connecting, the first connectable + * route is returned. If it fails to connect, it + * will get placed at the end of the list */ + candidate = route; + } } - - return (NULL); + + return (found ? NULL : candidate); } ksock_route_t * @@ -897,7 +958,7 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); - if (route->ksnr_connecting) + if (route->ksnr_connecting != 0) return (route); } @@ -912,7 +973,7 @@ ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid) ksock_conn_t *conn; ksock_route_t *route; rwlock_t *g_lock; - + /* Ensure the frags we've been given EXACTLY match the number of * bytes we want to send. Many TCP/IP stacks disregard any total * size parameters passed to them and just look at the frags. @@ -936,18 +997,17 @@ ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid) peer = ksocknal_find_target_peer_locked (tx, nid); if (peer == NULL) { read_unlock (g_lock); - return (PTL_FAIL); + return (-EHOSTUNREACH); } - if (ksocknal_find_connectable_route_locked(peer, 1) == NULL) { + if (ksocknal_find_connectable_route_locked(peer) == NULL) { conn = ksocknal_find_conn_locked (tx, peer); if (conn != NULL) { - /* I've got no unconnected autoconnect routes that - * need to be connected, and I do have an actual - * connection... */ + /* I've got no autoconnect routes that need to be + * connecting and I do have an actual connection... */ ksocknal_queue_tx_locked (tx, conn); read_unlock (g_lock); - return (PTL_OK); + return (0); } } @@ -960,14 +1020,13 @@ ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid) if (peer->ksnp_closing) { /* peer deleted as I blocked! */ write_unlock_irqrestore (g_lock, flags); ksocknal_put_peer (peer); - return (PTL_FAIL); + return (-EHOSTUNREACH); } ksocknal_put_peer (peer); /* drop ref I got above */ - for (;;) { - /* launch all eager autoconnections */ - route = ksocknal_find_connectable_route_locked (peer, 1); + /* launch any/all autoconnections that need it */ + route = ksocknal_find_connectable_route_locked (peer); if (route == NULL) break; @@ -979,26 +1038,20 @@ ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid) /* Connection exists; queue message on it */ ksocknal_queue_tx_locked (tx, conn); write_unlock_irqrestore (g_lock, flags); - return (PTL_OK); + return (0); } - if (ksocknal_find_connecting_route_locked (peer) == NULL) { - /* no autoconnect routes actually connecting now. Scrape - * the barrel for non-eager autoconnects */ - route = ksocknal_find_connectable_route_locked (peer, 0); - if (route != NULL) { - ksocknal_launch_autoconnect_locked (route); - } else { - write_unlock_irqrestore (g_lock, flags); - return (PTL_FAIL); - } + route = ksocknal_find_connecting_route_locked (peer); + if (route != NULL) { + /* At least 1 connection is being established; queue the + * message... */ + list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue); + write_unlock_irqrestore (g_lock, flags); + return (0); } - - /* At least 1 connection is being established; queue the message... */ - list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue); - + write_unlock_irqrestore (g_lock, flags); - return (PTL_OK); + return (-EHOSTUNREACH); } ksock_ltx_t * @@ -1069,10 +1122,11 @@ ksocknal_send (nal_cb_t *nal, void *private, lib_msg_t *cookie, ltx->ltx_tx.tx_nob = sizeof (*hdr) + payload_len; rc = ksocknal_launch_packet (<x->ltx_tx, nid); - if (rc != PTL_OK) - ksocknal_put_ltx (ltx); + if (rc == 0) + return (PTL_OK); - return (rc); + ksocknal_put_ltx (ltx); + return (PTL_FAIL); } int @@ -1104,10 +1158,11 @@ ksocknal_send_pages (nal_cb_t *nal, void *private, lib_msg_t *cookie, ltx->ltx_tx.tx_nob = sizeof (*hdr) + payload_len; rc = ksocknal_launch_packet (<x->ltx_tx, nid); - if (rc != PTL_OK) - ksocknal_put_ltx (ltx); - - return (rc); + if (rc == 0) + return (PTL_OK); + + ksocknal_put_ltx (ltx); + return (PTL_FAIL); } void @@ -1133,10 +1188,8 @@ ksocknal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd) tx->tx_hdr = (ptl_hdr_t *)fwd->kprfd_iov[0].iov_base; rc = ksocknal_launch_packet (tx, nid); - if (rc != 0) { - /* FIXME, could pass a better completion error */ - kpr_fwd_done (&ksocknal_data.ksnd_router, fwd, -EHOSTUNREACH); - } + if (rc != 0) + kpr_fwd_done (&ksocknal_data.ksnd_router, fwd, rc); } int @@ -1353,7 +1406,7 @@ ksocknal_fwd_parse (ksock_conn_t *conn) { ksock_peer_t *peer; ptl_nid_t dest_nid = NTOH__u64 (conn->ksnc_hdr.dest_nid); - int body_len = NTOH__u32 (PTL_HDR_LENGTH(&conn->ksnc_hdr)); + int body_len = NTOH__u32 (conn->ksnc_hdr.payload_length); CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d parsing header\n", conn, NTOH__u64 (conn->ksnc_hdr.src_nid), @@ -1368,7 +1421,6 @@ ksocknal_fwd_parse (ksock_conn_t *conn) dest_nid, body_len); ksocknal_new_packet (conn, 0); /* on to new packet */ - ksocknal_close_conn_unlocked (conn, -EINVAL); /* give up on conn */ return; } @@ -1512,17 +1564,15 @@ ksocknal_process_receive (ksock_sched_t *sched, unsigned long *irq_flags) rc = ksocknal_recvmsg(conn); if (rc <= 0) { - if (ksocknal_close_conn_unlocked (conn, rc)) { - /* I'm the first to close */ - if (rc < 0) - CERROR ("[%p] Error %d on read from "LPX64" ip %08x:%d\n", - conn, rc, conn->ksnc_peer->ksnp_nid, - conn->ksnc_ipaddr, conn->ksnc_port); - else - CWARN ("[%p] EOF from "LPX64" ip %08x:%d\n", - conn, conn->ksnc_peer->ksnp_nid, - conn->ksnc_ipaddr, conn->ksnc_port); - } + if (rc == 0) + CWARN ("[%p] EOF from "LPX64" ip %08x:%d\n", + conn, conn->ksnc_peer->ksnp_nid, + conn->ksnc_ipaddr, conn->ksnc_port); + else if (!conn->ksnc_closing) + CERROR ("[%p] Error %d on read from "LPX64" ip %08x:%d\n", + conn, rc, conn->ksnc_peer->ksnp_nid, + conn->ksnc_ipaddr, conn->ksnc_port); + ksocknal_close_conn_and_siblings (conn, rc); goto out; } @@ -1945,7 +1995,7 @@ ksocknal_sock_read (struct socket *sock, void *buffer, int nob) } int -ksocknal_exchange_nids (struct socket *sock, ptl_nid_t nid) +ksocknal_hello (struct socket *sock, ptl_nid_t *nid, int *type, __u64 *incarnation) { int rc; ptl_hdr_t hdr; @@ -1960,24 +2010,28 @@ ksocknal_exchange_nids (struct socket *sock, ptl_nid_t nid) hdr.src_nid = __cpu_to_le64 (ksocknal_lib.ni.nid); hdr.type = __cpu_to_le32 (PTL_MSG_HELLO); - + + hdr.msg.hello.type = __cpu_to_le32 (*type); + hdr.msg.hello.incarnation = + __cpu_to_le64 (ksocknal_data.ksnd_incarnation); + /* Assume sufficient socket buffering for this message */ rc = ksocknal_sock_write (sock, &hdr, sizeof (hdr)); if (rc != 0) { - CERROR ("Error %d sending HELLO to "LPX64"\n", rc, nid); + CERROR ("Error %d sending HELLO to "LPX64"\n", rc, *nid); return (rc); } rc = ksocknal_sock_read (sock, hmv, sizeof (*hmv)); if (rc != 0) { - CERROR ("Error %d reading HELLO from "LPX64"\n", rc, nid); + CERROR ("Error %d reading HELLO from "LPX64"\n", rc, *nid); return (rc); } if (hmv->magic != __le32_to_cpu (PORTALS_PROTO_MAGIC)) { CERROR ("Bad magic %#08x (%#08x expected) from "LPX64"\n", - __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC, nid); - return (-EINVAL); + __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC, *nid); + return (-EPROTO); } if (hmv->version_major != __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR) || @@ -1988,37 +2042,71 @@ ksocknal_exchange_nids (struct socket *sock, ptl_nid_t nid) __le16_to_cpu (hmv->version_minor), PORTALS_PROTO_VERSION_MAJOR, PORTALS_PROTO_VERSION_MINOR, - nid); - return (-EINVAL); + *nid); + return (-EPROTO); } - LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0); +#if (PORTALS_PROTO_VERSION_MAJOR != 0) +# error "This code only understands protocol version 0.x" +#endif /* version 0 sends magic/version as the dest_nid of a 'hello' header, * so read the rest of it in now... */ rc = ksocknal_sock_read (sock, hmv + 1, sizeof (hdr) - sizeof (*hmv)); if (rc != 0) { CERROR ("Error %d reading rest of HELLO hdr from "LPX64"\n", - rc, nid); + rc, *nid); return (rc); } /* ...and check we got what we expected */ if (hdr.type != __cpu_to_le32 (PTL_MSG_HELLO) || - PTL_HDR_LENGTH (&hdr) != __cpu_to_le32 (0)) { + hdr.payload_length != __cpu_to_le32 (0)) { CERROR ("Expecting a HELLO hdr with 0 payload," " but got type %d with %d payload from "LPX64"\n", __le32_to_cpu (hdr.type), - __le32_to_cpu (PTL_HDR_LENGTH (&hdr)), nid); - return (-EINVAL); + __le32_to_cpu (hdr.payload_length), *nid); + return (-EPROTO); } - - if (__le64_to_cpu (hdr.src_nid) != nid) { + + if (__le64_to_cpu(hdr.src_nid) == PTL_NID_ANY) { + CERROR("Expecting a HELLO hdr with a NID, but got PTL_NID_ANY\n"); + return (-EPROTO); + } + + if (*nid == PTL_NID_ANY) { /* don't know peer's nid yet */ + *nid = __le64_to_cpu(hdr.src_nid); + } else if (*nid != __le64_to_cpu (hdr.src_nid)) { CERROR ("Connected to nid "LPX64", but expecting "LPX64"\n", - __le64_to_cpu (hdr.src_nid), nid); - return (-EINVAL); + __le64_to_cpu (hdr.src_nid), *nid); + return (-EPROTO); + } + + if (*type == SOCKNAL_CONN_NONE) { + /* I've accepted this connection; peer determines type */ + *type = __le32_to_cpu(hdr.msg.hello.type); + switch (*type) { + case SOCKNAL_CONN_ANY: + case SOCKNAL_CONN_CONTROL: + break; + case SOCKNAL_CONN_BULK_IN: + *type = SOCKNAL_CONN_BULK_OUT; + break; + case SOCKNAL_CONN_BULK_OUT: + *type = SOCKNAL_CONN_BULK_IN; + break; + default: + CERROR ("Unexpected type %d from "LPX64"\n", *type, *nid); + return (-EPROTO); + } + } else if (__le32_to_cpu(hdr.msg.hello.type) != SOCKNAL_CONN_NONE) { + CERROR ("Mismatched types: me %d "LPX64" %d\n", + *type, *nid, __le32_to_cpu(hdr.msg.hello.type)); + return (-EPROTO); } + *incarnation = __le64_to_cpu(hdr.msg.hello.incarnation); + return (0); } @@ -2103,7 +2191,7 @@ ksocknal_setup_sock (struct socket *sock) } int -ksocknal_connect_peer (ksock_route_t *route) +ksocknal_connect_peer (ksock_route_t *route, int type) { struct sockaddr_in peer_addr; mm_segment_t oldmm = get_fs(); @@ -2208,14 +2296,7 @@ ksocknal_connect_peer (ksock_route_t *route) goto out; } - if (route->ksnr_xchange_nids) { - rc = ksocknal_exchange_nids (sock, route->ksnr_peer->ksnp_nid); - if (rc != 0) - goto out; - } - - rc = ksocknal_create_conn (route->ksnr_peer->ksnp_nid, - route, sock, route->ksnr_irq_affinity); + rc = ksocknal_create_conn (route, sock, route->ksnr_irq_affinity, type); if (rc == 0) { /* Take an extra ref on sock->file to compensate for the * upcoming close which will lose fd's ref on it. */ @@ -2235,20 +2316,36 @@ ksocknal_autoconnect (ksock_route_t *route) ksock_peer_t *peer; unsigned long flags; int rc; + int type; - rc = ksocknal_connect_peer (route); - if (rc == 0) { + for (;;) { + for (type = 0; type < SOCKNAL_CONN_NTYPES; type++) + if ((route->ksnr_connecting & (1 << type)) != 0) + break; + LASSERT (type < SOCKNAL_CONN_NTYPES); + + rc = ksocknal_connect_peer (route, type); + + if (rc != 0) + break; + /* successfully autoconnected: create_conn did the - * route/conn binding and scheduled any blocked packets, - * so there's nothing left to do now. */ - return; + * route/conn binding and scheduled any blocked packets */ + + if (route->ksnr_connecting == 0) { + /* No more connections required */ + return; + } } + /* Connection attempt failed */ + write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags); peer = route->ksnr_peer; route->ksnr_connecting = 0; + /* This is a retry rather than a new connection */ LASSERT (route->ksnr_retry_interval != 0); route->ksnr_timeout = jiffies + route->ksnr_retry_interval; route->ksnr_retry_interval = MIN (route->ksnr_retry_interval * 2, @@ -2268,6 +2365,12 @@ ksocknal_autoconnect (ksock_route_t *route) } while (!list_empty (&peer->ksnp_tx_queue)); } + /* make this route least-favourite for re-selection */ + if (!route->ksnr_deleted) { + list_del(&route->ksnr_list); + list_add_tail(&route->ksnr_list, &peer->ksnp_routes); + } + write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); while (!list_empty (&zombies)) { @@ -2275,7 +2378,7 @@ ksocknal_autoconnect (ksock_route_t *route) CERROR ("Deleting packet type %d len %d ("LPX64"->"LPX64")\n", NTOH__u32 (tx->tx_hdr->type), - NTOH__u32 (PTL_HDR_LENGTH(tx->tx_hdr)), + NTOH__u32 (tx->tx_hdr->payload_length), NTOH__u64 (tx->tx_hdr->src_nid), NTOH__u64 (tx->tx_hdr->dest_nid)); @@ -2393,13 +2496,11 @@ ksocknal_check_peer_timeouts (int idx) if (conn != NULL) { read_unlock (&ksocknal_data.ksnd_global_lock); - if (ksocknal_close_conn_unlocked (conn, -ETIMEDOUT)) { - /* I actually closed... */ - CERROR ("Timeout out conn->"LPX64" ip %x:%d\n", - peer->ksnp_nid, conn->ksnc_ipaddr, - conn->ksnc_port); - } - + CERROR ("Timeout out conn->"LPX64" ip %x:%d\n", + peer->ksnp_nid, conn->ksnc_ipaddr, + conn->ksnc_port); + ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); + /* NB we won't find this one again, but we can't * just proceed with the next peer, since we dropped * ksnd_global_lock and it might be dead already! */ diff --git a/lnet/klnds/toelnd/toenal_cb.c b/lnet/klnds/toelnd/toenal_cb.c index 983fa71..37e3f1e 100644 --- a/lnet/klnds/toelnd/toenal_cb.c +++ b/lnet/klnds/toelnd/toenal_cb.c @@ -708,26 +708,7 @@ ktoenal_fwd_parse (ksock_conn_t *conn) LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER); LASSERT (conn->ksnc_rx_scheduled); - switch (conn->ksnc_hdr.type) - { - case PTL_MSG_GET: - case PTL_MSG_ACK: - body_len = 0; - break; - case PTL_MSG_PUT: - body_len = conn->ksnc_hdr.msg.put.length; - break; - case PTL_MSG_REPLY: - body_len = conn->ksnc_hdr.msg.reply.length; - break; - default: - /* Unrecognised packet type */ - CERROR ("Unrecognised packet type %d from "LPX64" for "LPX64"\n", - conn->ksnc_hdr.type, conn->ksnc_hdr.src_nid, conn->ksnc_hdr.dest_nid); - /* Ignore this header and go back to reading a new packet. */ - ktoenal_new_packet (conn, 0); - return; - } + body_len = conn->ksnc_hdr.payload_length; if (body_len < 0) /* length corrupt */ { diff --git a/lnet/libcfs/debug.c b/lnet/libcfs/debug.c index a42d422..092baae 100644 --- a/lnet/libcfs/debug.c +++ b/lnet/libcfs/debug.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -920,8 +919,116 @@ void portals_run_lbug_upcall(char *file, const char *fn, const int line) portals_run_upcall (argv); } +char *portals_nid2str(int nal, ptl_nid_t nid, char *str) +{ + switch(nal){ + case TCPNAL: + /* userspace NAL */ + case SOCKNAL: + sprintf(str, "%u:%d.%d.%d.%d", (__u32)(nid >> 32), + HIPQUAD(nid)); + break; + case QSWNAL: + case GMNAL: + case IBNAL: + case TOENAL: + case SCIMACNAL: + sprintf(str, "%u:%u", (__u32)(nid >> 32), (__u32)nid); + break; + default: + return NULL; + } + return str; +} + +char stack_backtrace[LUSTRE_TRACE_SIZE]; +spinlock_t stack_backtrace_lock = SPIN_LOCK_UNLOCKED; + +#if defined(__arch_um__) +# warning in arch_um + +extern int is_kernel_text_address(unsigned long addr); + +char *portals_debug_dumpstack(void) +{ + int size; + unsigned long addr; + char *buf = stack_backtrace; + char *pbuf = buf; + unsigned long *stack = (unsigned long *)&buf; + + size = sprintf(pbuf, " Call Trace: "); + pbuf += size; + while (((long) stack & (THREAD_SIZE-1)) != 0) { + addr = *stack++; + if (is_kernel_text_address(addr)) { + size = sprintf(pbuf, "[<%08lx>] ", addr); + pbuf += size; + if (buf + LUSTRE_TRACE_SIZE <= pbuf + 12) + break; + } + } + + return buf; +} + +#elif defined(CONFIG_X86) +# warning in __i386__ + +extern int is_kernel_text_address(unsigned long addr); +extern int lookup_symbol(unsigned long address, char *buf, int buflen); + +char *portals_debug_dumpstack(void) +{ + unsigned long esp = current->thread.esp; + unsigned long *stack = (unsigned long *)&esp; + int size; + unsigned long addr; + char *buf = stack_backtrace; + char *pbuf = buf; + static char buffer[512]; + + /* User space on another CPU? */ + if ((esp ^ (unsigned long)current) & (PAGE_MASK<<1)){ + memset(buf, 0x0, LUSTRE_TRACE_SIZE); + goto out; + } + + size = sprintf(pbuf, " Call Trace: "); + pbuf += size; + while (((long) stack & (THREAD_SIZE-1)) != 0) { + addr = *stack++; + if (is_kernel_text_address(addr)) { + lookup_symbol(addr, buffer, 512); + if (buf + LUSTRE_TRACE_SIZE + /* fix length + sizeof('\0') */ + <= pbuf + strlen(buffer) + 28 + 1) + break; + size = sprintf(pbuf, "([<%08lx>] %s (0x%x)) ", + addr, buffer, stack-1); + pbuf += size; + } + } +out: + return buf; +} + +#else /* !__arch_um__ && !__i386__ */ + +char *portals_debug_dumpstack(void) +{ + char *buf = stack_backtrace; + memset(buf, 0x0, LUSTRE_TRACE_SIZE); + return buf; +} + +#endif /* __arch_um__ */ + EXPORT_SYMBOL(portals_debug_dumplog); EXPORT_SYMBOL(portals_debug_msg); EXPORT_SYMBOL(portals_debug_set_level); EXPORT_SYMBOL(portals_run_upcall); EXPORT_SYMBOL(portals_run_lbug_upcall); +EXPORT_SYMBOL(portals_nid2str); +EXPORT_SYMBOL(portals_debug_dumpstack); +EXPORT_SYMBOL(stack_backtrace_lock); diff --git a/lnet/libcfs/module.c b/lnet/libcfs/module.c index c51a506..a15ce6a 100644 --- a/lnet/libcfs/module.c +++ b/lnet/libcfs/module.c @@ -210,6 +210,84 @@ kportal_get_route(int index, __u32 *gateway_nalidp, ptl_nid_t *gateway_nidp, return (rc); } +static int +kportal_router_cmd(struct portals_cfg *pcfg, void * private) +{ + int err; + ENTRY; + + switch(pcfg->pcfg_command) { + case IOC_PORTAL_ADD_ROUTE: + CDEBUG(D_IOCTL, "Adding route: [%d] "LPU64" : "LPU64" - "LPU64"\n", + pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_nid2, pcfg->pcfg_nid3); + err = kportal_add_route(pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_nid2, pcfg->pcfg_nid3); + break; + + case IOC_PORTAL_DEL_ROUTE: + CDEBUG (D_IOCTL, "Removing routes via [%d] "LPU64" : "LPU64" - "LPU64"\n", + pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_nid2, pcfg->pcfg_nid3); + err = kportal_del_route (pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_nid2, pcfg->pcfg_nid3); + break; + + case IOC_PORTAL_NOTIFY_ROUTER: { + CDEBUG (D_IOCTL, "Notifying peer [%d] "LPU64" %s @ %ld\n", + pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_flags ? "Enabling" : "Disabling", + (time_t)pcfg->pcfg_nid3); + + err = kportal_notify_router (pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_flags, + (time_t)pcfg->pcfg_nid3); + break; + } + + case IOC_PORTAL_GET_ROUTE: + CDEBUG (D_IOCTL, "Getting route [%d]\n", pcfg->pcfg_count); + err = kportal_get_route(pcfg->pcfg_count, &pcfg->pcfg_nal, + &pcfg->pcfg_nid, + &pcfg->pcfg_nid2, &pcfg->pcfg_nid3, + &pcfg->pcfg_flags); + break; + } + RETURN(err); +} + +static int +kportal_register_router (void) +{ + int rc; + kpr_control_interface_t *ci; + + ci = (kpr_control_interface_t *)PORTAL_SYMBOL_GET(kpr_control_interface); + if (ci == NULL) + return (0); + + rc = kportal_nal_register(ROUTER, kportal_router_cmd, NULL); + + PORTAL_SYMBOL_PUT(kpr_control_interface); + return (rc); +} + +static int +kportal_unregister_router (void) +{ + int rc; + kpr_control_interface_t *ci; + + ci = (kpr_control_interface_t *)PORTAL_SYMBOL_GET(kpr_control_interface); + if (ci == NULL) + return (0); + + rc = kportal_nal_unregister(ROUTER); + + PORTAL_SYMBOL_PUT(kpr_control_interface); + return (rc); +} + int kportal_nal_cmd(struct portals_cfg *pcfg) { @@ -242,6 +320,8 @@ kportal_get_ni (int nal) return (PORTAL_SYMBOL_GET(ktoenal_ni)); case GMNAL: return (PORTAL_SYMBOL_GET(kgmnal_ni)); + case IBNAL: + return (PORTAL_SYMBOL_GET(kibnal_ni)); case TCPNAL: /* userspace NAL */ return (NULL); @@ -272,6 +352,9 @@ kportal_put_ni (int nal) case GMNAL: PORTAL_SYMBOL_PUT(kgmnal_ni); break; + case IBNAL: + PORTAL_SYMBOL_PUT(kibnal_ni); + break; case TCPNAL: /* A lesson to a malicious caller */ LBUG (); @@ -326,6 +409,7 @@ static int kportal_ioctl(struct inode *inode, struct file *file, int err = 0; char buf[1024]; struct portal_ioctl_data *data; + char str[PTL_NALFMT_SIZE]; ENTRY; @@ -379,8 +463,9 @@ static int kportal_ioctl(struct inode *inode, struct file *file, case IOC_PORTAL_PING: { void (*ping)(struct portal_ioctl_data *); - CDEBUG(D_IOCTL, "doing %d pings to nid "LPU64"\n", - data->ioc_count, data->ioc_nid); + CDEBUG(D_IOCTL, "doing %d pings to nid "LPX64" (%s)\n", + data->ioc_count, data->ioc_nid, + portals_nid2str(data->ioc_nal, data->ioc_nid, str)); ping = PORTAL_SYMBOL_GET(kping_client); if (!ping) CERROR("PORTAL_SYMBOL_GET failed\n"); @@ -391,50 +476,11 @@ static int kportal_ioctl(struct inode *inode, struct file *file, RETURN(0); } - case IOC_PORTAL_ADD_ROUTE: - CDEBUG(D_IOCTL, "Adding route: [%d] "LPU64" : "LPU64" - "LPU64"\n", - data->ioc_nal, data->ioc_nid, - data->ioc_nid2, data->ioc_nid3); - err = kportal_add_route(data->ioc_nal, data->ioc_nid, - data->ioc_nid2, data->ioc_nid3); - break; - - case IOC_PORTAL_DEL_ROUTE: - CDEBUG (D_IOCTL, "Removing routes via [%d] "LPU64" : "LPU64" - "LPU64"\n", - data->ioc_nal, data->ioc_nid, - data->ioc_nid2, data->ioc_nid3); - err = kportal_del_route (data->ioc_nal, data->ioc_nid, - data->ioc_nid2, data->ioc_nid3); - break; - - case IOC_PORTAL_NOTIFY_ROUTER: { - CDEBUG (D_IOCTL, "Notifying peer [%d] "LPU64" %s @ %ld\n", - data->ioc_nal, data->ioc_nid, - data->ioc_flags ? "Enabling" : "Disabling", - (time_t)data->ioc_nid3); - - err = kportal_notify_router (data->ioc_nal, data->ioc_nid, - data->ioc_flags, - (time_t)data->ioc_nid3); - break; - } - - case IOC_PORTAL_GET_ROUTE: - CDEBUG (D_IOCTL, "Getting route [%d]\n", data->ioc_count); - err = kportal_get_route(data->ioc_count, &data->ioc_nal, - &data->ioc_nid, - &data->ioc_nid2, &data->ioc_nid3, - &data->ioc_flags); - if (err == 0) - if (copy_to_user((char *)arg, data, sizeof (*data))) - err = -EFAULT; - break; - case IOC_PORTAL_GET_NID: { const ptl_handle_ni_t *nip; ptl_process_id_t pid; - CDEBUG (D_IOCTL, "Getting nid [%d]\n", data->ioc_nal); + CDEBUG (D_IOCTL, "Getting nid for nal [%d]\n", data->ioc_nal); nip = kportal_get_ni (data->ioc_nal); if (nip == NULL) @@ -573,9 +619,17 @@ static int init_kportals_module(void) goto cleanup_fini; } + rc = kportal_register_router(); + if (rc) { + CERROR("kportals_register_router: error %d\n", rc); + goto cleanup_proc; + } + CDEBUG (D_OTHER, "portals setup OK\n"); return (0); + cleanup_proc: + remove_proc(); cleanup_fini: PtlFini(); cleanup_deregister: @@ -593,6 +647,7 @@ static void exit_kportals_module(void) { int rc; + kportal_unregister_router(); remove_proc(); PtlFini(); diff --git a/lnet/lnet/api-init.c b/lnet/lnet/api-init.c index f77a439..020a2a9 100644 --- a/lnet/lnet/api-init.c +++ b/lnet/lnet/api-init.c @@ -26,7 +26,7 @@ #include int ptl_init; -unsigned int portal_subsystem_debug = ~0 - (S_PORTALS | S_QSWNAL | S_SOCKNAL | S_GMNAL); +unsigned int portal_subsystem_debug = ~0 - (S_PORTALS | S_QSWNAL | S_SOCKNAL | S_GMNAL | S_IBNAL); unsigned int portal_debug = ~0; unsigned int portal_cerror = 1; unsigned int portal_printk; diff --git a/lnet/lnet/lib-init.c b/lnet/lnet/lib-init.c index 99c4d32..ab223d6 100644 --- a/lnet/lnet/lib-init.c +++ b/lnet/lnet/lib-init.c @@ -127,6 +127,8 @@ kportal_descriptor_setup (nal_cb_t *nal) void kportal_descriptor_cleanup (nal_cb_t *nal) { + int rc; + if (--ptl_slab_users != 0) return; @@ -135,14 +137,26 @@ kportal_descriptor_cleanup (nal_cb_t *nal) LASSERT (atomic_read (&eq_in_use_count) == 0); LASSERT (atomic_read (&msg_in_use_count) == 0); - if (ptl_md_slab != NULL) - kmem_cache_destroy(ptl_md_slab); - if (ptl_msg_slab != NULL) - kmem_cache_destroy(ptl_msg_slab); - if (ptl_me_slab != NULL) - kmem_cache_destroy(ptl_me_slab); - if (ptl_eq_slab != NULL) - kmem_cache_destroy(ptl_eq_slab); + if (ptl_md_slab != NULL) { + rc = kmem_cache_destroy(ptl_md_slab); + if (rc != 0) + CERROR("unable to free MD slab\n"); + } + if (ptl_msg_slab != NULL) { + rc = kmem_cache_destroy(ptl_msg_slab); + if (rc != 0) + CERROR("unable to free MSG slab\n"); + } + if (ptl_me_slab != NULL) { + rc = kmem_cache_destroy(ptl_me_slab); + if (rc != 0) + CERROR("unable to free ME slab\n"); + } + if (ptl_eq_slab != NULL) { + rc = kmem_cache_destroy(ptl_eq_slab); + if (rc != 0) + CERROR("unable to free EQ slab\n"); + } } #else diff --git a/lnet/lnet/lib-move.c b/lnet/lnet/lib-move.c index e73cbb8..6e904ba 100644 --- a/lnet/lnet/lib-move.c +++ b/lnet/lnet/lib-move.c @@ -583,7 +583,7 @@ static int parse_put(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) me = lib_find_me(nal, hdr->msg.put.ptl_index, PTL_MD_OP_PUT, hdr->src_nid, hdr->src_pid, - PTL_HDR_LENGTH (hdr), hdr->msg.put.offset, + hdr->payload_length, hdr->msg.put.offset, hdr->msg.put.match_bits, &mlength, &offset, &unlink); if (me == NULL) @@ -592,7 +592,7 @@ static int parse_put(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) md = me->md; CDEBUG(D_NET, "Incoming put index %x from "LPU64"/%u of length %d/%d " "into md "LPX64" [%d] + %d\n", hdr->msg.put.ptl_index, - hdr->src_nid, hdr->src_pid, mlength, PTL_HDR_LENGTH(hdr), + hdr->src_nid, hdr->src_pid, mlength, hdr->payload_length, md->md_lh.lh_cookie, md->md_niov, offset); msg = get_new_msg (nal, md); @@ -617,7 +617,7 @@ static int parse_put(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) msg->ev.initiator.pid = hdr->src_pid; msg->ev.portal = hdr->msg.put.ptl_index; msg->ev.match_bits = hdr->msg.put.match_bits; - msg->ev.rlength = PTL_HDR_LENGTH(hdr); + msg->ev.rlength = hdr->payload_length; msg->ev.mlength = mlength; msg->ev.offset = offset; msg->ev.hdr_data = hdr->msg.put.hdr_data; @@ -646,14 +646,14 @@ static int parse_put(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) state_unlock(nal, &flags); - lib_recv (nal, private, msg, md, offset, mlength, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, msg, md, offset, mlength, hdr->payload_length); return 0; drop: nal->ni.counters.drop_count++; - nal->ni.counters.drop_length += PTL_HDR_LENGTH(hdr); + nal->ni.counters.drop_length += hdr->payload_length; state_unlock (nal, &flags); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return -1; } @@ -676,11 +676,6 @@ static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) hdr->msg.get.sink_length = NTOH__u32 (hdr->msg.get.sink_length); hdr->msg.get.src_offset = NTOH__u32 (hdr->msg.get.src_offset); - /* compatibility check until field is deleted */ - if (hdr->msg.get.return_offset != 0) - CERROR("Unexpected non-zero get.return_offset %x from " - LPU64"\n", hdr->msg.get.return_offset, hdr->src_nid); - state_lock(nal, &flags); me = lib_find_me(nal, hdr->msg.get.ptl_index, PTL_MD_OP_GET, @@ -694,7 +689,7 @@ static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) md = me->md; CDEBUG(D_NET, "Incoming get index %d from "LPU64".%u of length %d/%d " "from md "LPX64" [%d] + %d\n", hdr->msg.get.ptl_index, - hdr->src_nid, hdr->src_pid, mlength, PTL_HDR_LENGTH(hdr), + hdr->src_nid, hdr->src_pid, mlength, hdr->payload_length, md->md_lh.lh_cookie, md->md_niov, offset); msg = get_new_msg (nal, md); @@ -710,7 +705,7 @@ static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) msg->ev.initiator.pid = hdr->src_pid; msg->ev.portal = hdr->msg.get.ptl_index; msg->ev.match_bits = hdr->msg.get.match_bits; - msg->ev.rlength = PTL_HDR_LENGTH(hdr); + msg->ev.rlength = hdr->payload_length; msg->ev.mlength = mlength; msg->ev.offset = offset; msg->ev.hdr_data = 0; @@ -745,7 +740,7 @@ static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) reply.src_nid = HTON__u64 (ni->nid); reply.dest_pid = HTON__u32 (hdr->src_pid); reply.src_pid = HTON__u32 (ni->pid); - PTL_HDR_LENGTH(&reply) = HTON__u32 (mlength); + reply.payload_length = HTON__u32 (mlength); reply.msg.reply.dst_wmd = hdr->msg.get.return_wmd; @@ -763,13 +758,13 @@ static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) } /* Complete the incoming message */ - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return (rc); drop: ni->counters.drop_count++; ni->counters.drop_length += hdr->msg.get.sink_length; state_unlock(nal, &flags); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return -1; } @@ -782,11 +777,6 @@ static int parse_reply(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) lib_msg_t *msg; unsigned long flags; - /* compatibility check until field is deleted */ - if (hdr->msg.reply.dst_offset != 0) - CERROR("Unexpected non-zero reply.dst_offset %x from "LPU64"\n", - hdr->msg.reply.dst_offset, hdr->src_nid); - state_lock(nal, &flags); /* NB handles only looked up by creator (no flips) */ @@ -802,7 +792,7 @@ static int parse_reply(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) LASSERT (md->offset == 0); - length = rlength = PTL_HDR_LENGTH(hdr); + length = rlength = hdr->payload_length; if (length > md->length) { if ((md->options & PTL_MD_TRUNCATE) == 0) { @@ -848,9 +838,9 @@ static int parse_reply(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) drop: nal->ni.counters.drop_count++; - nal->ni.counters.drop_length += PTL_HDR_LENGTH(hdr); + nal->ni.counters.drop_length += hdr->payload_length; state_unlock (nal, &flags); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return -1; } @@ -901,13 +891,13 @@ static int parse_ack(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) ni->counters.recv_count++; state_unlock(nal, &flags); - lib_recv (nal, private, msg, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, msg, NULL, 0, 0, hdr->payload_length); return 0; drop: nal->ni.counters.drop_count++; state_unlock (nal, &flags); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return -1; } @@ -954,7 +944,7 @@ void print_hdr(nal_cb_t * nal, ptl_hdr_t * hdr) hdr->msg.put.match_bits); nal->cb_printf(nal, " Length %d, offset %d, hdr data "LPX64"\n", - PTL_HDR_LENGTH(hdr), hdr->msg.put.offset, + hdr->payload_length, hdr->msg.put.offset, hdr->msg.put.hdr_data); break; @@ -984,7 +974,7 @@ void print_hdr(nal_cb_t * nal, ptl_hdr_t * hdr) "length %d\n", hdr->msg.reply.dst_wmd.wh_interface_cookie, hdr->msg.reply.dst_wmd.wh_object_cookie, - PTL_HDR_LENGTH(hdr)); + hdr->payload_length); } } /* end of print_hdr() */ @@ -994,21 +984,13 @@ int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) { unsigned long flags; - /* NB static check; optimizer will elide this if it's right */ - LASSERT (offsetof (ptl_hdr_t, msg.ack.length) == - offsetof (ptl_hdr_t, msg.put.length)); - LASSERT (offsetof (ptl_hdr_t, msg.ack.length) == - offsetof (ptl_hdr_t, msg.get.length)); - LASSERT (offsetof (ptl_hdr_t, msg.ack.length) == - offsetof (ptl_hdr_t, msg.reply.length)); - /* convert common fields to host byte order */ hdr->dest_nid = NTOH__u64 (hdr->dest_nid); hdr->src_nid = NTOH__u64 (hdr->src_nid); hdr->dest_pid = NTOH__u32 (hdr->dest_pid); hdr->src_pid = NTOH__u32 (hdr->src_pid); hdr->type = NTOH__u32 (hdr->type); - PTL_HDR_LENGTH(hdr) = NTOH__u32 (PTL_HDR_LENGTH(hdr)); + hdr->payload_length = NTOH__u32(hdr->payload_length); #if 0 nal->cb_printf(nal, "%d: lib_parse: nal=%p hdr=%p type=%d\n", nal->ni.nid, nal, hdr, hdr->type); @@ -1023,7 +1005,7 @@ int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) nal->ni.nid, mv->magic, mv->version_major, mv->version_minor, hdr->src_nid); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return (-1); } @@ -1034,10 +1016,10 @@ int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) state_lock (nal, &flags); nal->ni.counters.drop_count++; - nal->ni.counters.drop_length += PTL_HDR_LENGTH(hdr); + nal->ni.counters.drop_length += hdr->payload_length; state_unlock (nal, &flags); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return (-1); } @@ -1048,7 +1030,7 @@ int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) ": simulated failure\n", nal->ni.nid, hdr_type_string (hdr), hdr->src_nid); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return (-1); } @@ -1069,7 +1051,7 @@ int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) ": Bad type=0x%x\n", nal->ni.nid, hdr->src_nid, hdr->type); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return (-1); } } @@ -1126,7 +1108,7 @@ int do_PtlPut(nal_cb_t * nal, void *private, void *v_args, void *v_ret) hdr.src_nid = HTON__u64 (ni->nid); hdr.dest_pid = HTON__u32 (id->pid); hdr.src_pid = HTON__u32 (ni->pid); - PTL_HDR_LENGTH(&hdr) = HTON__u32 (md->length); + hdr.payload_length = HTON__u32 (md->length); /* NB handles only looked up by creator (no flips) */ if (args->ack_req_in == PTL_ACK_REQ) { @@ -1303,7 +1285,7 @@ int do_PtlGet(nal_cb_t * nal, void *private, void *v_args, void *v_ret) hdr.src_nid = HTON__u64 (ni->nid); hdr.dest_pid = HTON__u32 (id->pid); hdr.src_pid = HTON__u32 (ni->pid); - PTL_HDR_LENGTH(&hdr) = 0; + hdr.payload_length = 0; /* NB handles only looked up by creator (no flips) */ hdr.msg.get.return_wmd.wh_interface_cookie = ni->ni_interface_cookie; @@ -1364,12 +1346,15 @@ int do_PtlGet(nal_cb_t * nal, void *private, void *v_args, void *v_ret) void lib_assert_wire_constants (void) { - /* Wire protocol assertions generated by 'wirecheck' */ + /* Wire protocol assertions generated by 'wirecheck' + * running on Linux robert.bartonsoftware.com 2.4.20-18.9 #1 Thu May 29 06:54:41 EDT 2003 i68 + * with gcc version 3.2.2 20030222 (Red Hat Linux 3.2.2-5) */ + /* Constants... */ LASSERT (PORTALS_PROTO_MAGIC == 0xeebc0ded); LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0); - LASSERT (PORTALS_PROTO_VERSION_MINOR == 1); + LASSERT (PORTALS_PROTO_VERSION_MINOR == 3); LASSERT (PTL_MSG_ACK == 0); LASSERT (PTL_MSG_PUT == 1); LASSERT (PTL_MSG_GET == 2); @@ -1377,79 +1362,77 @@ void lib_assert_wire_constants (void) LASSERT (PTL_MSG_HELLO == 4); /* Checks for struct ptl_handle_wire_t */ - LASSERT (sizeof (ptl_handle_wire_t) == 16); - LASSERT (offsetof (ptl_handle_wire_t, wh_interface_cookie) == 0); - LASSERT (sizeof (((ptl_handle_wire_t *)0)->wh_interface_cookie) == 8); - LASSERT (offsetof (ptl_handle_wire_t, wh_object_cookie) == 8); - LASSERT (sizeof (((ptl_handle_wire_t *)0)->wh_object_cookie) == 8); + LASSERT ((int)sizeof(ptl_handle_wire_t) == 16); + LASSERT (offsetof(ptl_handle_wire_t, wh_interface_cookie) == 0); + LASSERT ((int)sizeof(((ptl_handle_wire_t *)0)->wh_interface_cookie) == 8); + LASSERT (offsetof(ptl_handle_wire_t, wh_object_cookie) == 8); + LASSERT ((int)sizeof(((ptl_handle_wire_t *)0)->wh_object_cookie) == 8); /* Checks for struct ptl_magicversion_t */ - LASSERT (sizeof (ptl_magicversion_t) == 8); - LASSERT (offsetof (ptl_magicversion_t, magic) == 0); - LASSERT (sizeof (((ptl_magicversion_t *)0)->magic) == 4); - LASSERT (offsetof (ptl_magicversion_t, version_major) == 4); - LASSERT (sizeof (((ptl_magicversion_t *)0)->version_major) == 2); - LASSERT (offsetof (ptl_magicversion_t, version_minor) == 6); - LASSERT (sizeof (((ptl_magicversion_t *)0)->version_minor) == 2); + LASSERT ((int)sizeof(ptl_magicversion_t) == 8); + LASSERT (offsetof(ptl_magicversion_t, magic) == 0); + LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->magic) == 4); + LASSERT (offsetof(ptl_magicversion_t, version_major) == 4); + LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->version_major) == 2); + LASSERT (offsetof(ptl_magicversion_t, version_minor) == 6); + LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->version_minor) == 2); /* Checks for struct ptl_hdr_t */ - LASSERT (sizeof (ptl_hdr_t) == 72); - LASSERT (offsetof (ptl_hdr_t, dest_nid) == 0); - LASSERT (sizeof (((ptl_hdr_t *)0)->dest_nid) == 8); - LASSERT (offsetof (ptl_hdr_t, src_nid) == 8); - LASSERT (sizeof (((ptl_hdr_t *)0)->src_nid) == 8); - LASSERT (offsetof (ptl_hdr_t, dest_pid) == 16); - LASSERT (sizeof (((ptl_hdr_t *)0)->dest_pid) == 4); - LASSERT (offsetof (ptl_hdr_t, src_pid) == 20); - LASSERT (sizeof (((ptl_hdr_t *)0)->src_pid) == 4); - LASSERT (offsetof (ptl_hdr_t, type) == 24); - LASSERT (sizeof (((ptl_hdr_t *)0)->type) == 4); + LASSERT ((int)sizeof(ptl_hdr_t) == 72); + LASSERT (offsetof(ptl_hdr_t, dest_nid) == 0); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->dest_nid) == 8); + LASSERT (offsetof(ptl_hdr_t, src_nid) == 8); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->src_nid) == 8); + LASSERT (offsetof(ptl_hdr_t, dest_pid) == 16); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->dest_pid) == 4); + LASSERT (offsetof(ptl_hdr_t, src_pid) == 20); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->src_pid) == 4); + LASSERT (offsetof(ptl_hdr_t, type) == 24); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->type) == 4); + LASSERT (offsetof(ptl_hdr_t, payload_length) == 28); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->payload_length) == 4); + LASSERT (offsetof(ptl_hdr_t, msg) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg) == 40); /* Ack */ - LASSERT (offsetof (ptl_hdr_t, msg.ack.mlength) == 28); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.mlength) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.ack.dst_wmd) == 32); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.dst_wmd) == 16); - LASSERT (offsetof (ptl_hdr_t, msg.ack.match_bits) == 48); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.match_bits) == 8); - LASSERT (offsetof (ptl_hdr_t, msg.ack.length) == 56); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.length) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.ack.dst_wmd) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.dst_wmd) == 16); + LASSERT (offsetof(ptl_hdr_t, msg.ack.match_bits) == 48); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.match_bits) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.ack.mlength) == 56); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.mlength) == 4); /* Put */ - LASSERT (offsetof (ptl_hdr_t, msg.put.ptl_index) == 28); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.ptl_index) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.put.ack_wmd) == 32); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.ack_wmd) == 16); - LASSERT (offsetof (ptl_hdr_t, msg.put.match_bits) == 48); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.match_bits) == 8); - LASSERT (offsetof (ptl_hdr_t, msg.put.length) == 56); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.length) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.put.offset) == 60); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.offset) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.put.hdr_data) == 64); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.hdr_data) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.put.ack_wmd) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.ack_wmd) == 16); + LASSERT (offsetof(ptl_hdr_t, msg.put.match_bits) == 48); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.match_bits) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.put.hdr_data) == 56); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.hdr_data) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.put.ptl_index) == 64); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.ptl_index) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.put.offset) == 68); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.offset) == 4); /* Get */ - LASSERT (offsetof (ptl_hdr_t, msg.get.ptl_index) == 28); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.ptl_index) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.get.return_wmd) == 32); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.return_wmd) == 16); - LASSERT (offsetof (ptl_hdr_t, msg.get.match_bits) == 48); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.match_bits) == 8); - LASSERT (offsetof (ptl_hdr_t, msg.get.length) == 56); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.length) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.get.src_offset) == 60); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.src_offset) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.get.return_offset) == 64); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.return_offset) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.get.sink_length) == 68); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.sink_length) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.get.return_wmd) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.return_wmd) == 16); + LASSERT (offsetof(ptl_hdr_t, msg.get.match_bits) == 48); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.match_bits) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.get.ptl_index) == 56); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.ptl_index) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.get.src_offset) == 60); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.src_offset) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.get.sink_length) == 64); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.sink_length) == 4); /* Reply */ - LASSERT (offsetof (ptl_hdr_t, msg.reply.dst_wmd) == 32); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.reply.dst_wmd) == 16); - LASSERT (offsetof (ptl_hdr_t, msg.reply.dst_offset) == 48); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.reply.dst_offset) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.reply.length) == 56); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.reply.length) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.reply.dst_wmd) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.reply.dst_wmd) == 16); + + /* Hello */ + LASSERT (offsetof(ptl_hdr_t, msg.hello.incarnation) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.hello.incarnation) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.hello.type) == 40); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.hello.type) == 4); } diff --git a/lnet/lnet/lib-msg.c b/lnet/lnet/lib-msg.c index 9363251..9840ff5 100644 --- a/lnet/lnet/lib-msg.c +++ b/lnet/lnet/lib-msg.c @@ -59,7 +59,7 @@ int lib_finalize(nal_cb_t * nal, void *private, lib_msg_t *msg) ack.src_nid = HTON__u64 (nal->ni.nid); ack.dest_pid = HTON__u32 (msg->pid); ack.src_pid = HTON__u32 (nal->ni.pid); - PTL_HDR_LENGTH(&ack) = 0; + ack.payload_length = 0; ack.msg.ack.dst_wmd = msg->ack_wmd; ack.msg.ack.match_bits = msg->ev.match_bits; diff --git a/lnet/router/router.c b/lnet/router/router.c index b5bab2c..e29f628 100644 --- a/lnet/router/router.c +++ b/lnet/router/router.c @@ -126,12 +126,16 @@ kpr_do_upcall (void *arg) void kpr_upcall (int gw_nalid, ptl_nid_t gw_nid, int alive, time_t when) { + char str[PTL_NALFMT_SIZE]; + /* May be in arbitrary context */ kpr_upcall_t *u = kmalloc (sizeof (kpr_upcall_t), GFP_ATOMIC); if (u == NULL) { - CERROR ("Upcall out of memory: nal %d nid "LPX64" %s\n", - gw_nalid, gw_nid, alive ? "up" : "down"); + CERROR ("Upcall out of memory: nal %d nid "LPX64" (%s) %s\n", + gw_nalid, gw_nid, + portals_nid2str(gw_nalid, gw_nid, str), + alive ? "up" : "down"); return; } @@ -155,6 +159,7 @@ kpr_do_notify (int byNal, int gateway_nalid, ptl_nid_t gateway_nid, struct timeval now; struct list_head *e; struct list_head *n; + char str[PTL_NALFMT_SIZE]; CDEBUG (D_NET, "%s notifying [%d] "LPX64": %s\n", byNal ? "NAL" : "userspace", @@ -253,8 +258,9 @@ kpr_do_notify (int byNal, int gateway_nalid, ptl_nid_t gateway_nid, if (byNal) { /* It wasn't userland that notified me... */ - CWARN ("Upcall: NAL %d NID "LPX64" is %s\n", + CWARN ("Upcall: NAL %d NID "LPX64" (%s) is %s\n", gateway_nalid, gateway_nid, + portals_nid2str(gateway_nalid, gateway_nid, str), alive ? "alive" : "dead"); kpr_upcall (gateway_nalid, gateway_nid, alive, when); } else { diff --git a/lnet/tests/ping_cli.c b/lnet/tests/ping_cli.c index 22bdb45..85c0d71 100644 --- a/lnet/tests/ping_cli.c +++ b/lnet/tests/ping_cli.c @@ -111,11 +111,15 @@ pingcli_start(struct portal_ioctl_data *args) unsigned ping_bulk_magic = PING_BULK_MAGIC; int rc; struct timeval tv1, tv2; + char str[PTL_NALFMT_SIZE]; + client->tsk = current; client->args = args; - CDEBUG (D_OTHER, "pingcli_setup args: nid "LPX64", \ + CDEBUG (D_OTHER, "pingcli_setup args: nid "LPX64" (%s), \ nal %d, size %u, count: %u, timeout: %u\n", - args->ioc_nid, args->ioc_nal, args->ioc_size, + args->ioc_nid, + portals_nid2str(args->ioc_nal, args->ioc_nid, str), + args->ioc_nal, args->ioc_size, args->ioc_count, args->ioc_timeout); diff --git a/lnet/tests/sping_cli.c b/lnet/tests/sping_cli.c index c37db4c..64a1dd2 100644 --- a/lnet/tests/sping_cli.c +++ b/lnet/tests/sping_cli.c @@ -104,14 +104,17 @@ pingcli_start(struct portal_ioctl_data *args) { const ptl_handle_ni_t *nip; unsigned ping_head_magic = PING_HEADER_MAGIC; + char str[PTL_NALFMT_SIZE]; int rc; client->tsk = current; client->args = args; - CDEBUG (D_OTHER, "pingcli_setup args: nid "LPX64", \ + CDEBUG (D_OTHER, "pingcli_setup args: nid "LPX64" (%s), \ nal %d, size %u, count: %u, timeout: %u\n", - args->ioc_nid, args->ioc_nal, args->ioc_size, + args->ioc_nid, + portals_nid2str(args->ioc_nid, args->ioc_nal, str), + args->ioc_nal, args->ioc_size, args->ioc_count, args->ioc_timeout); diff --git a/lnet/utils/acceptor.c b/lnet/utils/acceptor.c index 9fb2759..29b8d1e 100644 --- a/lnet/utils/acceptor.c +++ b/lnet/utils/acceptor.c @@ -19,6 +19,7 @@ #include #include #include +#include /* should get this from autoconf somehow */ #ifndef PIDFILE_DIR @@ -100,7 +101,7 @@ parse_size (int *sizep, char *str) } void -show_connection (int fd, __u32 net_ip, ptl_nid_t nid) +show_connection (int fd, __u32 net_ip) { struct hostent *h = gethostbyaddr ((char *)&net_ip, sizeof net_ip, AF_INET); __u32 host_ip = ntohl (net_ip); @@ -128,136 +129,8 @@ show_connection (int fd, __u32 net_ip, ptl_nid_t nid) else snprintf (host, sizeof(host), "%s", h->h_name); - syslog (LOG_INFO, "Accepted host: %s NID: "LPX64" snd: %d rcv %d nagle: %s\n", - host, nid, txmem, rxmem, nonagle ? "disabled" : "enabled"); -} - -int -sock_write (int cfd, void *buffer, int nob) -{ - while (nob > 0) - { - int rc = write (cfd, buffer, nob); - - if (rc < 0) - { - if (errno == EINTR) - continue; - - return (rc); - } - - if (rc == 0) - { - fprintf (stderr, "Unexpected zero sock_write\n"); - abort(); - } - - nob -= rc; - buffer = (char *)buffer + nob; - } - - return (0); -} - -int -sock_read (int cfd, void *buffer, int nob) -{ - while (nob > 0) - { - int rc = read (cfd, buffer, nob); - - if (rc < 0) - { - if (errno == EINTR) - continue; - - return (rc); - } - - if (rc == 0) /* EOF */ - { - errno = ECONNABORTED; - return (-1); - } - - nob -= rc; - buffer = (char *)buffer + nob; - } - - return (0); -} - -int -exchange_nids (int cfd, ptl_nid_t my_nid, ptl_nid_t *peer_nid) -{ - int rc; - ptl_hdr_t hdr; - ptl_magicversion_t *hmv = (ptl_magicversion_t *)&hdr.dest_nid; - - LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid)); - - memset (&hdr, 0, sizeof (hdr)); - - hmv->magic = __cpu_to_le32 (PORTALS_PROTO_MAGIC); - hmv->version_major = __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR); - hmv->version_minor = __cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR); - - hdr.src_nid = __cpu_to_le64 (my_nid); - hdr.type = __cpu_to_le32 (PTL_MSG_HELLO); - - /* Assume there's sufficient socket buffering for a portals HELLO header */ - rc = sock_write (cfd, &hdr, sizeof (hdr)); - if (rc != 0) { - perror ("Can't send initial HELLO"); - return (-1); - } - - /* First few bytes down the wire are the portals protocol magic and - * version, no matter what protocol version we're running. */ - - rc = sock_read (cfd, hmv, sizeof (*hmv)); - if (rc != 0) { - perror ("Can't read from peer"); - return (-1); - } - - if (__cpu_to_le32 (hmv->magic) != PORTALS_PROTO_MAGIC) { - fprintf (stderr, "Bad magic %#08x (%#08x expected)\n", - __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC); - return (-1); - } - - if (__cpu_to_le16 (hmv->version_major) != PORTALS_PROTO_VERSION_MAJOR || - __cpu_to_le16 (hmv->version_minor) != PORTALS_PROTO_VERSION_MINOR) { - fprintf (stderr, "Incompatible protocol version %d.%d (%d.%d expected)\n", - __cpu_to_le16 (hmv->version_major), - __cpu_to_le16 (hmv->version_minor), - PORTALS_PROTO_VERSION_MAJOR, - PORTALS_PROTO_VERSION_MINOR); - } - - /* version 0 sends magic/version as the dest_nid of a 'hello' header, - * so read the rest of it in now... */ - LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0); - rc = sock_read (cfd, hmv + 1, sizeof (hdr) - sizeof (*hmv)); - if (rc != 0) { - perror ("Can't read rest of HELLO hdr"); - return (-1); - } - - /* ...and check we got what we expected */ - if (__cpu_to_le32 (hdr.type) != PTL_MSG_HELLO || - __cpu_to_le32 (PTL_HDR_LENGTH (&hdr)) != 0) { - fprintf (stderr, "Expecting a HELLO hdr with 0 payload," - " but got type %d with %d payload\n", - __cpu_to_le32 (hdr.type), - __cpu_to_le32 (PTL_HDR_LENGTH (&hdr))); - return (-1); - } - - *peer_nid = __le64_to_cpu (hdr.src_nid); - return (0); + syslog (LOG_INFO, "Accepted host: %s snd: %d rcv %d nagle: %s\n", + host, txmem, rxmem, nonagle ? "disabled" : "enabled"); } void @@ -277,10 +150,9 @@ int main(int argc, char **argv) int noclose = 0; int nonagle = 1; int nal = SOCKNAL; - int xchg_nids = 0; int bind_irq = 0; - while ((c = getopt (argc, argv, "N:r:s:nlxi")) != -1) + while ((c = getopt (argc, argv, "N:r:s:nli")) != -1) switch (c) { case 'r': @@ -301,10 +173,6 @@ int main(int argc, char **argv) noclose = 1; break; - case 'x': - xchg_nids = 1; - break; - case 'i': bind_irq = 1; break; @@ -410,7 +278,6 @@ int main(int argc, char **argv) int cfd; struct portal_ioctl_data data; struct portals_cfg pcfg; - ptl_nid_t peer_nid; cfd = accept(fd, (struct sockaddr *)&clntaddr, &len); if ( cfd < 0 ) { @@ -419,43 +286,20 @@ int main(int argc, char **argv) continue; } - if (!xchg_nids) - peer_nid = ntohl (clntaddr.sin_addr.s_addr); /* HOST byte order */ - else - { - PORTAL_IOC_INIT (data); - data.ioc_nal = nal; - rc = ioctl (pfd, IOC_PORTAL_GET_NID, &data); - if (rc < 0) - { - perror ("Can't get my NID"); - close (cfd); - continue; - } - - rc = exchange_nids (cfd, data.ioc_nid, &peer_nid); - if (rc != 0) - { - close (cfd); - continue; - } - } - - show_connection (cfd, clntaddr.sin_addr.s_addr, peer_nid); + show_connection (cfd, clntaddr.sin_addr.s_addr); PCFG_INIT(pcfg, NAL_CMD_REGISTER_PEER_FD); pcfg.pcfg_nal = nal; pcfg.pcfg_fd = cfd; - pcfg.pcfg_nid = peer_nid; pcfg.pcfg_flags = bind_irq; - + pcfg.pcfg_misc = SOCKNAL_CONN_NONE; /* == incoming connection */ + PORTAL_IOC_INIT(data); data.ioc_pbuf1 = (char*)&pcfg; data.ioc_plen1 = sizeof(pcfg); if (ioctl(pfd, IOC_PORTAL_NAL_CMD, &data) < 0) { perror("ioctl failed"); - } else { printf("client registered\n"); } diff --git a/lnet/utils/portals.c b/lnet/utils/portals.c index 4beac34..147d132 100644 --- a/lnet/utils/portals.c +++ b/lnet/utils/portals.c @@ -55,6 +55,7 @@ #include #include #include +#include #include "parser.h" unsigned int portal_debug; @@ -80,6 +81,7 @@ static name2num_t nalnames[] = { {"toe", TOENAL}, {"elan", QSWNAL}, {"gm", GMNAL}, + {"ib", IBNAL}, {"scimac", SCIMACNAL}, {NULL, -1} }; @@ -453,14 +455,13 @@ jt_ptl_print_autoconnects (int argc, char **argv) if (rc != 0) break; - printf (LPX64"@%s:%d #%d buffer %d nonagle %s xchg %s " - "affinity %s eager %s share %d\n", + printf (LPX64"@%s:%d #%d buffer %d " + "nonagle %s affinity %s eager %s share %d\n", pcfg.pcfg_nid, ptl_ipaddr_2_str (pcfg.pcfg_id, buffer), pcfg.pcfg_misc, pcfg.pcfg_count, pcfg.pcfg_size, (pcfg.pcfg_flags & 1) ? "on" : "off", (pcfg.pcfg_flags & 2) ? "on" : "off", (pcfg.pcfg_flags & 4) ? "on" : "off", - (pcfg.pcfg_flags & 8) ? "on" : "off", pcfg.pcfg_wait); } @@ -476,14 +477,13 @@ jt_ptl_add_autoconnect (int argc, char **argv) ptl_nid_t nid; __u32 ip; int port; - int xchange_nids = 0; int irq_affinity = 0; int share = 0; int eager = 0; int rc; if (argc < 4 || argc > 5) { - fprintf (stderr, "usage: %s nid ipaddr port [ixse]\n", argv[0]); + fprintf (stderr, "usage: %s nid ipaddr port [ise]\n", argv[0]); return 0; } @@ -511,9 +511,6 @@ jt_ptl_add_autoconnect (int argc, char **argv) while (*opts != 0) switch (*opts++) { - case 'x': - xchange_nids = 1; - break; case 'i': irq_affinity = 1; break; @@ -537,10 +534,9 @@ jt_ptl_add_autoconnect (int argc, char **argv) /* only passing one buffer size! */ pcfg.pcfg_size = MAX (g_socket_rxmem, g_socket_txmem); pcfg.pcfg_flags = (g_socket_nonagle ? 0x01 : 0) | - (xchange_nids ? 0x02 : 0) | - (irq_affinity ? 0x04 : 0) | - (share ? 0x08 : 0) | - (eager ? 0x10 : 0); + (irq_affinity ? 0x02 : 0) | + (share ? 0x04 : 0) | + (eager ? 0x08 : 0); rc = pcfg_ioctl (&pcfg); if (rc != 0) { @@ -636,10 +632,14 @@ jt_ptl_print_connections (int argc, char **argv) if (rc != 0) break; - printf (LPX64"@%s:%d\n", + printf (LPX64"@%s:%d:%s\n", pcfg.pcfg_nid, ptl_ipaddr_2_str (pcfg.pcfg_id, buffer), - pcfg.pcfg_misc); + pcfg.pcfg_misc, + (pcfg.pcfg_flags == SOCKNAL_CONN_ANY) ? "A" : + (pcfg.pcfg_flags == SOCKNAL_CONN_CONTROL) ? "C" : + (pcfg.pcfg_flags == SOCKNAL_CONN_BULK_IN) ? "I" : + (pcfg.pcfg_flags == SOCKNAL_CONN_BULK_OUT) ? "O" : "?"); } if (index == 0) @@ -647,82 +647,8 @@ jt_ptl_print_connections (int argc, char **argv) return 0; } -int -exchange_nids (int cfd, ptl_nid_t my_nid, ptl_nid_t *peer_nid) -{ - int rc; - ptl_hdr_t hdr; - ptl_magicversion_t *hmv = (ptl_magicversion_t *)&hdr.dest_nid; - - LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid)); - - memset (&hdr, 0, sizeof (hdr)); - - hmv->magic = __cpu_to_le32 (PORTALS_PROTO_MAGIC); - hmv->version_major = __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR); - hmv->version_minor = __cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR); - - hdr.src_nid = __cpu_to_le64 (my_nid); - hdr.type = __cpu_to_le32 (PTL_MSG_HELLO); - - /* Assume there's sufficient socket buffering for a portals HELLO header */ - rc = sock_write (cfd, &hdr, sizeof (hdr)); - if (rc != 0) { - perror ("Can't send initial HELLO"); - return (-1); - } - - /* First few bytes down the wire are the portals protocol magic and - * version, no matter what protocol version we're running. */ - - rc = sock_read (cfd, hmv, sizeof (*hmv)); - if (rc != 0) { - perror ("Can't read from peer"); - return (-1); - } - - if (hmv->magic != __cpu_to_le32 (PORTALS_PROTO_MAGIC)) { - fprintf (stderr, "Bad magic %#08x (%#08x expected)\n", - __le32_to_cpu (hmv->magic), PORTALS_PROTO_MAGIC); - return (-1); - } - - if (hmv->version_major != __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR) || - hmv->version_minor != __cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR)) { - fprintf (stderr, "Incompatible protocol version %d.%d (%d.%d expected)\n", - __le16_to_cpu (hmv->version_major), - __le16_to_cpu (hmv->version_minor), - PORTALS_PROTO_VERSION_MAJOR, - PORTALS_PROTO_VERSION_MINOR); - } - - /* version 0 sends magic/version as the dest_nid of a 'hello' header, - * so read the rest of it in now... */ - LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0); - rc = sock_read (cfd, hmv + 1, sizeof (hdr) - sizeof (*hmv)); - if (rc != 0) { - perror ("Can't read rest of HELLO hdr"); - return (-1); - } - - /* ...and check we got what we expected */ - if (hdr.type != __cpu_to_le32 (PTL_MSG_HELLO) || - PTL_HDR_LENGTH (&hdr) != __cpu_to_le32 (0)) { - fprintf (stderr, "Expecting a HELLO hdr with 0 payload," - " but got type %d with %d payload\n", - __le32_to_cpu (hdr.type), - __le32_to_cpu (PTL_HDR_LENGTH (&hdr))); - return (-1); - } - - *peer_nid = __le64_to_cpu (hdr.src_nid); - return (0); -} - int jt_ptl_connect(int argc, char **argv) { - ptl_nid_t peer_nid; - struct portal_ioctl_data data; struct portals_cfg pcfg; struct sockaddr_in srvaddr; __u32 ipaddr; @@ -732,13 +658,13 @@ int jt_ptl_connect(int argc, char **argv) int rxmem = 0; int txmem = 0; int bind_irq = 0; - int xchange_nids = 0; + int type = SOCKNAL_CONN_ANY; int port; int o; int olen; if (argc < 3) { - fprintf(stderr, "usage: %s ip port [xi]\n", argv[0]); + fprintf(stderr, "usage: %s ip port [xibctr]\n", argv[0]); return 0; } @@ -764,8 +690,28 @@ int jt_ptl_connect(int argc, char **argv) bind_irq = 1; break; - case 'x': - xchange_nids = 1; + case 'I': + if (type != SOCKNAL_CONN_ANY) { + fprintf(stderr, "Can't flag type twice\n"); + return -1; + } + type = SOCKNAL_CONN_BULK_IN; + break; + + case 'O': + if (type != SOCKNAL_CONN_ANY) { + fprintf(stderr, "Can't flag type twice\n"); + return -1; + } + type = SOCKNAL_CONN_BULK_OUT; + break; + + case 'C': + if (type != SOCKNAL_CONN_ANY) { + fprintf(stderr, "Can't flag type twice\n"); + return -1; + } + type = SOCKNAL_CONN_CONTROL; break; default: @@ -826,33 +772,19 @@ int jt_ptl_connect(int argc, char **argv) if (getsockopt (fd, IPPROTO_TCP, TCP_NODELAY, &nonagle, &olen) != 0) fprintf (stderr, "Can't get nagle: %s\n", strerror (errno)); - if (!xchange_nids) - peer_nid = ipaddr; - else { - PORTAL_IOC_INIT (data); - data.ioc_nal = g_nal; - rc = l_ioctl(PORTALS_DEV_ID, IOC_PORTAL_GET_NID, &data); - if (rc != 0) { - fprintf (stderr, "failed to get my nid: %s\n", - strerror (errno)); - close (fd); - return (-1); - } - - rc = exchange_nids (fd, data.ioc_nid, &peer_nid); - if (rc != 0) { - close (fd); - return (-1); - } - } - printf("Connected host: %s NID "LPX64" snd: %d rcv: %d nagle: %s\n", argv[1], - peer_nid, txmem, rxmem, nonagle ? "Disabled" : "Enabled"); + printf("Connected host: %s snd: %d rcv: %d nagle: %s type: %s\n", + argv[1], txmem, rxmem, nonagle ? "Disabled" : "Enabled", + (type == SOCKNAL_CONN_ANY) ? "A" : + (type == SOCKNAL_CONN_CONTROL) ? "C" : + (type == SOCKNAL_CONN_BULK_IN) ? "I" : + (type == SOCKNAL_CONN_BULK_OUT) ? "O" : "?"); PCFG_INIT(pcfg, NAL_CMD_REGISTER_PEER_FD); + pcfg.pcfg_nal = g_nal; pcfg.pcfg_fd = fd; - pcfg.pcfg_nid = peer_nid; pcfg.pcfg_flags = bind_irq; - + pcfg.pcfg_misc = type; + rc = pcfg_ioctl(&pcfg); if (rc) { fprintf(stderr, "failed to register fd with portals: %s\n", @@ -861,7 +793,7 @@ int jt_ptl_connect(int argc, char **argv) return -1; } - printf("Connection to "LPX64" registered with socknal\n", peer_nid); + printf("Connection to %s registered with socknal\n", argv[1]); rc = close(fd); if (rc) @@ -1219,7 +1151,7 @@ jt_ptl_nagle (int argc, char **argv) int jt_ptl_add_route (int argc, char **argv) { - struct portal_ioctl_data data; + struct portals_cfg pcfg; ptl_nid_t nid1; ptl_nid_t nid2; ptl_nid_t gateway_nid; @@ -1254,13 +1186,13 @@ jt_ptl_add_route (int argc, char **argv) return (-1); } - PORTAL_IOC_INIT(data); - data.ioc_nid = gateway_nid; - data.ioc_nal = g_nal; - data.ioc_nid2 = MIN (nid1, nid2); - data.ioc_nid3 = MAX (nid1, nid2); + PCFG_INIT(pcfg, IOC_PORTAL_ADD_ROUTE); + pcfg.pcfg_nid = gateway_nid; + pcfg.pcfg_nal = g_nal; + pcfg.pcfg_nid2 = MIN (nid1, nid2); + pcfg.pcfg_nid3 = MAX (nid1, nid2); - rc = l_ioctl(PORTALS_DEV_ID, IOC_PORTAL_ADD_ROUTE, &data); + rc = pcfg_ioctl(&pcfg); if (rc != 0) { fprintf (stderr, "IOC_PORTAL_ADD_ROUTE failed: %s\n", strerror (errno)); @@ -1273,7 +1205,7 @@ jt_ptl_add_route (int argc, char **argv) int jt_ptl_del_route (int argc, char **argv) { - struct portal_ioctl_data data; + struct portals_cfg pcfg; ptl_nid_t nid; ptl_nid_t nid1 = PTL_NID_ANY; ptl_nid_t nid2 = PTL_NID_ANY; @@ -1317,13 +1249,13 @@ jt_ptl_del_route (int argc, char **argv) } } - PORTAL_IOC_INIT(data); - data.ioc_nal = g_nal; - data.ioc_nid = nid; - data.ioc_nid2 = nid1; - data.ioc_nid3 = nid2; + PCFG_INIT(pcfg, IOC_PORTAL_DEL_ROUTE); + pcfg.pcfg_nal = g_nal; + pcfg.pcfg_nid = nid; + pcfg.pcfg_nid2 = nid1; + pcfg.pcfg_nid3 = nid2; - rc = l_ioctl(PORTALS_DEV_ID, IOC_PORTAL_DEL_ROUTE, &data); + rc = pcfg_ioctl(&pcfg); if (rc != 0) { fprintf (stderr, "IOC_PORTAL_DEL_ROUTE ("LPX64") failed: %s\n", nid, strerror (errno)); @@ -1336,7 +1268,7 @@ jt_ptl_del_route (int argc, char **argv) int jt_ptl_notify_router (int argc, char **argv) { - struct portal_ioctl_data data; + struct portals_cfg pcfg; int enable; ptl_nid_t nid; int rc; @@ -1376,14 +1308,14 @@ jt_ptl_notify_router (int argc, char **argv) return (-1); } - PORTAL_IOC_INIT(data); - data.ioc_nal = g_nal; - data.ioc_nid = nid; - data.ioc_flags = enable; + PCFG_INIT(pcfg, IOC_PORTAL_NOTIFY_ROUTER); + pcfg.pcfg_nal = g_nal; + pcfg.pcfg_nid = nid; + pcfg.pcfg_flags = enable; /* Yeuch; 'cept I need a __u64 on 64 bit machines... */ - data.ioc_nid3 = (__u64)when; + pcfg.pcfg_nid3 = (__u64)when; - rc = l_ioctl(PORTALS_DEV_ID, IOC_PORTAL_NOTIFY_ROUTER, &data); + rc = pcfg_ioctl(&pcfg); if (rc != 0) { fprintf (stderr, "IOC_PORTAL_NOTIFY_ROUTER ("LPX64") failed: %s\n", @@ -1398,7 +1330,7 @@ int jt_ptl_print_routes (int argc, char **argv) { char buffer[3][128]; - struct portal_ioctl_data data; + struct portals_cfg pcfg; int rc; int index; int gateway_nal; @@ -1409,18 +1341,18 @@ jt_ptl_print_routes (int argc, char **argv) for (index = 0;;index++) { - PORTAL_IOC_INIT(data); - data.ioc_count = index; + PCFG_INIT(pcfg, IOC_PORTAL_GET_ROUTE); + pcfg.pcfg_count = index; - rc = l_ioctl(PORTALS_DEV_ID, IOC_PORTAL_GET_ROUTE, &data); + rc = pcfg_ioctl(&pcfg); if (rc != 0) break; - gateway_nal = data.ioc_nal; - gateway_nid = data.ioc_nid; - nid1 = data.ioc_nid2; - nid2 = data.ioc_nid3; - alive = data.ioc_flags; + gateway_nal = pcfg.pcfg_nal; + gateway_nid = pcfg.pcfg_nid; + nid1 = pcfg.pcfg_nid2; + nid2 = pcfg.pcfg_nid3; + alive = pcfg.pcfg_flags; printf ("%8s %18s : %s - %s, %s\n", nal2name (gateway_nal), diff --git a/lnet/utils/ptlctl.c b/lnet/utils/ptlctl.c index 1a8e637..c65ecb2 100644 --- a/lnet/utils/ptlctl.c +++ b/lnet/utils/ptlctl.c @@ -31,10 +31,10 @@ command_t list[] = { {"network", jt_ptl_network, 0,"setup the NAL (args: nal name)"}, {"print_autoconns", jt_ptl_print_autoconnects, 0, "print autoconnect entries (no args)"}, - {"add_autoconn", jt_ptl_add_autoconnect, 0, "add autoconnect entry (args: nid host [ixse])"}, + {"add_autoconn", jt_ptl_add_autoconnect, 0, "add autoconnect entry (args: nid host [ise])"}, {"del_autoconn", jt_ptl_del_autoconnect, 0, "delete autoconnect entry (args: [nid] [host] [ks])"}, {"print_conns", jt_ptl_print_connections, 0, "print connections (no args)"}, - {"connect", jt_ptl_connect, 0, "connect to a remote nid (args: host port [xi])"}, + {"connect", jt_ptl_connect, 0, "connect to a remote nid (args: host port [iIOC])"}, {"disconnect", jt_ptl_disconnect, 0, "disconnect from a remote nid (args: [nid] [host]"}, {"push", jt_ptl_push_connection, 0, "flush connection to a remote nid (args: [nid]"}, {"active_tx", jt_ptl_print_active_txs, 0, "print active transmits (no args)"}, diff --git a/lustre/kernel_patches/patches/iod-stock-exports-2.4.22.patch b/lustre/kernel_patches/patches/iod-stock-exports-2.4.22.patch new file mode 100644 index 0000000..47a6ff9 --- /dev/null +++ b/lustre/kernel_patches/patches/iod-stock-exports-2.4.22.patch @@ -0,0 +1,52 @@ + fs/Makefile | 2 +- + fs/inode.c | 4 +++- + mm/page_alloc.c | 1 + + 3 files changed, 5 insertions(+), 2 deletions(-) + +Index: linux-2.4.22-vanilla/fs/inode.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/inode.c 2003-11-03 23:22:24.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/inode.c 2003-11-03 23:25:00.000000000 +0300 +@@ -5,6 +5,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -66,7 +67,8 @@ + * NOTE! You also have to own the lock if you change + * the i_state of an inode while it is in use.. + */ +-static spinlock_t inode_lock = SPIN_LOCK_UNLOCKED; ++spinlock_t inode_lock = SPIN_LOCK_UNLOCKED; ++EXPORT_SYMBOL(inode_lock); + + /* + * Statistics gathering.. +Index: linux-2.4.22-vanilla/fs/Makefile +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/Makefile 2003-11-03 23:22:11.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/Makefile 2003-11-03 23:25:24.000000000 +0300 +@@ -7,7 +7,7 @@ + + O_TARGET := fs.o + +-export-objs := filesystems.o open.o dcache.o buffer.o dquot.o ++export-objs := filesystems.o open.o dcache.o buffer.o dquot.o inode.o + mod-subdirs := nls + + obj-y := open.o read_write.o devices.o file_table.o buffer.o \ +Index: linux-2.4.22-vanilla/mm/page_alloc.c +=================================================================== +--- linux-2.4.22-vanilla.orig/mm/page_alloc.c 2003-11-03 23:21:29.000000000 +0300 ++++ linux-2.4.22-vanilla/mm/page_alloc.c 2003-11-03 23:25:00.000000000 +0300 +@@ -28,6 +28,7 @@ + LIST_HEAD(inactive_list); + LIST_HEAD(active_list); + pg_data_t *pgdat_list; ++EXPORT_SYMBOL(pgdat_list); + + /* + * diff --git a/lustre/kernel_patches/patches/kernel_text_address-2.4.20-rh.patch b/lustre/kernel_patches/patches/kernel_text_address-2.4.20-rh.patch new file mode 100644 index 0000000..6e78be1 --- /dev/null +++ b/lustre/kernel_patches/patches/kernel_text_address-2.4.20-rh.patch @@ -0,0 +1,68 @@ +Index: linux-2.4.20-20.9/kernel/kksymoops.c +=================================================================== +--- linux-2.4.20-20.9.orig/kernel/kksymoops.c 2003-08-18 23:16:51.000000000 +0800 ++++ linux-2.4.20-20.9/kernel/kksymoops.c 2003-11-06 18:38:12.000000000 +0800 +@@ -80,3 +80,5 @@ + printk("%s\n",modlist); + #endif + } ++ ++EXPORT_SYMBOL(lookup_symbol); +Index: linux-2.4.20-20.9/kernel/Makefile +=================================================================== +--- linux-2.4.20-20.9.orig/kernel/Makefile 2003-11-06 18:35:56.000000000 +0800 ++++ linux-2.4.20-20.9/kernel/Makefile 2003-11-06 18:42:57.000000000 +0800 +@@ -9,7 +9,7 @@ + + O_TARGET := kernel.o + +-export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o printk.o cpufreq.o profile.o ++export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o printk.o cpufreq.o profile.o kksymoops.o + + obj-y = sched.o dma.o fork.o exec_domain.o panic.o printk.o lowlat.o profile.o \ + module.o exit.o itimer.o info.o time.o softirq.o resource.o \ +Index: linux-2.4.20-20.9/arch/i386/kernel/traps.c +=================================================================== +--- linux-2.4.20-20.9.orig/arch/i386/kernel/traps.c 2003-11-06 18:35:56.000000000 +0800 ++++ linux-2.4.20-20.9/arch/i386/kernel/traps.c 2003-11-06 18:43:26.000000000 +0800 +@@ -1027,9 +1019,39 @@ + #endif + } + ++#ifdef CONFIG_MODULES ++extern struct module *module_list; ++extern struct module kernel_module; ++#endif ++ ++int is_kernel_text_address(unsigned long addr) ++{ ++ int retval = 0; ++#ifdef CONFIG_MODULES ++ struct module *mod; ++#endif ++ if (addr >= (unsigned long) &_stext && ++ addr <= (unsigned long) &_etext); ++ return 1; ++ ++#ifdef CONFIG_MODULES ++ for (mod = module_list; mod != &kernel_module; mod = mod->next) { ++ /* mod_bound tests for addr being inside the vmalloc'ed ++ * module area. Of course it'd be better to test only ++ * for the .text subset... */ ++ if (mod_bound(addr, 0, mod)) { ++ retval = 1; ++ break; ++ } ++ } ++#endif ++ ++ return retval; ++} ++ + EXPORT_SYMBOL_GPL(netdump_func); + EXPORT_SYMBOL_GPL(netdump_mode); + #if CONFIG_X86_LOCAL_APIC + EXPORT_SYMBOL_GPL(nmi_watchdog); + #endif +- ++EXPORT_SYMBOL_GPL(is_kernel_text_address); diff --git a/lustre/kernel_patches/patches/kernel_text_address-2.4.20-vanilla.patch b/lustre/kernel_patches/patches/kernel_text_address-2.4.20-vanilla.patch new file mode 100644 index 0000000..14767592 --- /dev/null +++ b/lustre/kernel_patches/patches/kernel_text_address-2.4.20-vanilla.patch @@ -0,0 +1,56 @@ +Index: linux-2.4.20/arch/um/kernel/Makefile +=================================================================== +--- linux-2.4.20.orig/arch/um/kernel/Makefile 2003-11-07 15:54:41.000000000 +0800 ++++ linux-2.4.20/arch/um/kernel/Makefile 2003-11-07 15:57:08.000000000 +0800 +@@ -37,7 +37,8 @@ + export-objs-$(CONFIG_GPROF) += gprof_syms.o + export-objs-$(CONFIG_GCOV) += gmon_syms.o + +-export-objs = ksyms.o process_kern.o signal_kern.o user_syms.o $(export-objs-y) ++export-objs = ksyms.o process_kern.o signal_kern.o user_syms.o sysrq.o \ ++ $(export-objs-y) + + CFLAGS_user_syms.o = -D__AUTOCONF_INCLUDED__ $(DMODULES-y) $(DMODVERSIONS-y) \ + -I/usr/include -I../include +Index: linux-2.4.20/arch/um/kernel/sysrq.c +=================================================================== +--- linux-2.4.20.orig/arch/um/kernel/sysrq.c 2003-11-07 15:54:41.000000000 +0800 ++++ linux-2.4.20/arch/um/kernel/sysrq.c 2003-11-07 16:02:48.000000000 +0800 +@@ -86,6 +86,37 @@ + show_trace((unsigned long *)esp); + } + ++#ifdef CONFIG_MODULES ++extern struct module *module_list; ++extern struct module kernel_module; ++#endif ++ ++int is_kernel_text_address(unsigned long addr) ++{ ++ int retval = 0; ++#ifdef CONFIG_MODULES ++ struct module *mod; ++#endif ++ if (addr >= (unsigned long) &_stext && ++ addr <= (unsigned long) &_etext) ++ return 1; ++ ++#ifdef CONFIG_MODULES ++ for (mod = module_list; mod != &kernel_module; mod = mod->next) { ++ /* mod_bound tests for addr being inside the vmalloc'ed ++ * module area. Of course it'd be better to test only ++ * for the .text subset... */ ++ if (mod_bound(addr, 0, mod)) { ++ retval = 1; ++ break; ++ } ++ } ++#endif ++ return retval; ++} ++ ++EXPORT_SYMBOL(is_kernel_text_address); ++ + /* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically diff --git a/lustre/kernel_patches/patches/linux-2.4.22-xattr-0.8.54.patch b/lustre/kernel_patches/patches/linux-2.4.22-xattr-0.8.54.patch new file mode 100644 index 0000000..c5abbf3 --- /dev/null +++ b/lustre/kernel_patches/patches/linux-2.4.22-xattr-0.8.54.patch @@ -0,0 +1,5460 @@ + Documentation/Configure.help | 66 ++ + arch/alpha/defconfig | 7 + arch/alpha/kernel/entry.S | 12 + arch/arm/defconfig | 7 + arch/arm/kernel/calls.S | 24 + arch/i386/defconfig | 7 + arch/ia64/defconfig | 7 + arch/ia64/kernel/entry.S | 24 + arch/m68k/defconfig | 7 + arch/mips/defconfig | 7 + arch/mips64/defconfig | 7 + arch/ppc/defconfig | 14 + arch/ppc64/kernel/misc.S | 2 + arch/s390/defconfig | 7 + arch/s390/kernel/entry.S | 24 + arch/s390x/defconfig | 7 + arch/s390x/kernel/entry.S | 24 + arch/s390x/kernel/wrapper32.S | 92 +++ + arch/sparc/defconfig | 7 + arch/sparc/kernel/systbls.S | 10 + arch/sparc64/defconfig | 7 + arch/sparc64/kernel/systbls.S | 20 + fs/Config.in | 14 + fs/Makefile | 3 + fs/ext2/Makefile | 4 + fs/ext2/file.c | 5 + fs/ext2/ialloc.c | 2 + fs/ext2/inode.c | 34 - + fs/ext2/namei.c | 14 + fs/ext2/super.c | 29 + fs/ext2/symlink.c | 14 + fs/ext2/xattr.c | 1212 +++++++++++++++++++++++++++++++++++++++++ + fs/ext2/xattr_user.c | 103 +++ + fs/ext3/Makefile | 10 + fs/ext3/file.c | 5 + fs/ext3/ialloc.c | 2 + fs/ext3/inode.c | 35 - + fs/ext3/namei.c | 21 + fs/ext3/super.c | 36 + + fs/ext3/symlink.c | 14 + fs/ext3/xattr.c | 1225 ++++++++++++++++++++++++++++++++++++++++++ + fs/ext3/xattr_user.c | 111 +++ + fs/jfs/jfs_xattr.h | 6 + fs/jfs/xattr.c | 6 + fs/mbcache.c | 648 ++++++++++++++++++++++ + include/asm-arm/unistd.h | 2 + include/asm-ia64/unistd.h | 13 + include/asm-ppc64/unistd.h | 2 + include/asm-s390/unistd.h | 15 + include/asm-s390x/unistd.h | 15 + include/asm-sparc/unistd.h | 24 + include/asm-sparc64/unistd.h | 24 + include/linux/cache_def.h | 15 + include/linux/errno.h | 4 + include/linux/ext2_fs.h | 31 - + include/linux/ext2_xattr.h | 157 +++++ + include/linux/ext3_fs.h | 31 - + include/linux/ext3_jbd.h | 8 + include/linux/ext3_xattr.h | 157 +++++ + include/linux/fs.h | 2 + include/linux/mbcache.h | 69 ++ + kernel/ksyms.c | 4 + mm/vmscan.c | 35 + + fs/ext3/ext3-exports.c | 14 + + 64 files changed, 4355 insertions(+), 195 deletions(-) + +Index: linux-2.4.22-vanilla/Documentation/Configure.help +=================================================================== +--- linux-2.4.22-vanilla.orig/Documentation/Configure.help 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/Documentation/Configure.help 2003-11-03 23:41:29.000000000 +0300 +@@ -15613,6 +15613,39 @@ + be compiled as a module, and so this could be dangerous. Most + everyone wants to say Y here. + ++Ext2 extended attributes ++CONFIG_EXT2_FS_XATTR ++ Extended attributes are name:value pairs associated with inodes by ++ the kernel or by users (see the attr(5) manual page, or visit ++ for details). ++ ++ If unsure, say N. ++ ++Ext2 extended attribute block sharing ++CONFIG_EXT2_FS_XATTR_SHARING ++ This options enables code for sharing identical extended attribute ++ blocks among multiple inodes. ++ ++ Usually, say Y. ++ ++Ext2 extended user attributes ++CONFIG_EXT2_FS_XATTR_USER ++ This option enables extended user attributes on ext2. Processes can ++ associate extended user attributes with inodes to store additional ++ information such as the character encoding of files, etc. (see the ++ attr(5) manual page, or visit for details). ++ ++ If unsure, say N. ++ ++Ext2 trusted extended attributes ++CONFIG_EXT2_FS_XATTR_TRUSTED ++ This option enables extended attributes on ext2 that are accessible ++ (and visible) only to users capable of CAP_SYS_ADMIN. Usually this ++ is only the super user. Trusted extended attributes are meant for ++ implementing system/security services. ++ ++ If unsure, say N. ++ + Ext3 journalling file system support (EXPERIMENTAL) + CONFIG_EXT3_FS + This is the journalling version of the Second extended file system +@@ -15645,6 +15678,39 @@ + of your root partition (the one containing the directory /) cannot + be compiled as a module, and so this may be dangerous. + ++Ext3 extended attributes ++CONFIG_EXT3_FS_XATTR ++ Extended attributes are name:value pairs associated with inodes by ++ the kernel or by users (see the attr(5) manual page, or visit ++ for details). ++ ++ If unsure, say N. ++ ++Ext3 extended attribute block sharing ++CONFIG_EXT3_FS_XATTR_SHARING ++ This options enables code for sharing identical extended attribute ++ blocks among multiple inodes. ++ ++ Usually, say Y. ++ ++Ext3 extended user attributes ++CONFIG_EXT3_FS_XATTR_USER ++ This option enables extended user attributes on ext3. Processes can ++ associate extended user attributes with inodes to store additional ++ information such as the character encoding of files, etc. (see the ++ attr(5) manual page, or visit for details). ++ ++ If unsure, say N. ++ ++Ext3 trusted extended attributes ++CONFIG_EXT3_FS_XATTR_TRUSTED ++ This option enables extended attributes on ext3 that are accessible ++ (and visible) only to users capable of CAP_SYS_ADMIN. Usually this ++ is only the super user. Trusted extended attributes are meant for ++ implementing system/security services. ++ ++ If unsure, say N. ++ + Journal Block Device support (JBD for ext3) (EXPERIMENTAL) + CONFIG_JBD + This is a generic journalling layer for block devices. It is +Index: linux-2.4.22-vanilla/arch/alpha/defconfig +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/alpha/defconfig 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/alpha/defconfig 2003-11-03 23:41:29.000000000 +0300 +@@ -1,6 +1,13 @@ + # + # Automatically generated make config: don't edit + # ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT3_FS_XATTR_SHARING is not set ++# CONFIG_EXT3_FS_XATTR_USER is not set ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XATTR_SHARING is not set ++# CONFIG_EXT2_FS_XATTR_USER is not set ++# CONFIG_FS_MBCACHE is not set + CONFIG_ALPHA=y + # CONFIG_UID16 is not set + # CONFIG_RWSEM_GENERIC_SPINLOCK is not set +Index: linux-2.4.22-vanilla/arch/alpha/kernel/entry.S +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/alpha/kernel/entry.S 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/alpha/kernel/entry.S 2003-11-03 23:41:29.000000000 +0300 +@@ -1154,6 +1154,18 @@ + .quad sys_readahead + .quad sys_ni_syscall /* 380, sys_security */ + .quad sys_tkill ++ .quad sys_setxattr ++ .quad sys_lsetxattr ++ .quad sys_fsetxattr ++ .quad sys_getxattr /* 385 */ ++ .quad sys_lgetxattr ++ .quad sys_fgetxattr ++ .quad sys_listxattr ++ .quad sys_llistxattr ++ .quad sys_flistxattr /* 390 */ ++ .quad sys_removexattr ++ .quad sys_lremovexattr ++ .quad sys_fremovexattr + + /* Remember to update everything, kids. */ + .ifne (. - sys_call_table) - (NR_SYSCALLS * 8) +Index: linux-2.4.22-vanilla/arch/arm/defconfig +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/arm/defconfig 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/arm/defconfig 2003-11-03 23:41:29.000000000 +0300 +@@ -1,6 +1,13 @@ + # + # Automatically generated make config: don't edit + # ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT3_FS_XATTR_SHARING is not set ++# CONFIG_EXT3_FS_XATTR_USER is not set ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XATTR_SHARING is not set ++# CONFIG_EXT2_FS_XATTR_USER is not set ++# CONFIG_FS_MBCACHE is not set + CONFIG_ARM=y + # CONFIG_EISA is not set + # CONFIG_SBUS is not set +Index: linux-2.4.22-vanilla/arch/arm/kernel/calls.S +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/arm/kernel/calls.S 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/arm/kernel/calls.S 2003-11-03 23:41:29.000000000 +0300 +@@ -240,18 +240,18 @@ + .long SYMBOL_NAME(sys_ni_syscall) /* Security */ + .long SYMBOL_NAME(sys_gettid) + /* 225 */ .long SYMBOL_NAME(sys_readahead) +- .long SYMBOL_NAME(sys_ni_syscall) /* setxattr */ +- .long SYMBOL_NAME(sys_ni_syscall) /* lsetxattr */ +- .long SYMBOL_NAME(sys_ni_syscall) /* fsetxattr */ +- .long SYMBOL_NAME(sys_ni_syscall) /* getxattr */ +-/* 230 */ .long SYMBOL_NAME(sys_ni_syscall) /* lgetxattr */ +- .long SYMBOL_NAME(sys_ni_syscall) /* fgetxattr */ +- .long SYMBOL_NAME(sys_ni_syscall) /* listxattr */ +- .long SYMBOL_NAME(sys_ni_syscall) /* llistxattr */ +- .long SYMBOL_NAME(sys_ni_syscall) /* flistxattr */ +-/* 235 */ .long SYMBOL_NAME(sys_ni_syscall) /* removexattr */ +- .long SYMBOL_NAME(sys_ni_syscall) /* lremovexattr */ +- .long SYMBOL_NAME(sys_ni_syscall) /* fremovexattr */ ++ .long SYMBOL_NAME(sys_setxattr) ++ .long SYMBOL_NAME(sys_lsetxattr) ++ .long SYMBOL_NAME(sys_fsetxattr) ++ .long SYMBOL_NAME(sys_getxattr) ++/* 230 */ .long SYMBOL_NAME(sys_lgetxattr) ++ .long SYMBOL_NAME(sys_fgetxattr) ++ .long SYMBOL_NAME(sys_listxattr) ++ .long SYMBOL_NAME(sys_llistxattr) ++ .long SYMBOL_NAME(sys_flistxattr) ++/* 235 */ .long SYMBOL_NAME(sys_removexattr) ++ .long SYMBOL_NAME(sys_lremovexattr) ++ .long SYMBOL_NAME(sys_fremovexattr) + .long SYMBOL_NAME(sys_tkill) + .long SYMBOL_NAME(sys_ni_syscall) /* sendfile64 */ + /* 240 */ .long SYMBOL_NAME(sys_ni_syscall) /* futex */ +Index: linux-2.4.22-vanilla/arch/i386/defconfig +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/i386/defconfig 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/i386/defconfig 2003-11-03 23:41:29.000000000 +0300 +@@ -1,6 +1,13 @@ + # + # Automatically generated make config: don't edit + # ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT3_FS_XATTR_SHARING is not set ++# CONFIG_EXT3_FS_XATTR_USER is not set ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XATTR_SHARING is not set ++# CONFIG_EXT2_FS_XATTR_USER is not set ++# CONFIG_FS_MBCACHE is not set + CONFIG_X86=y + CONFIG_ISA=y + # CONFIG_SBUS is not set +Index: linux-2.4.22-vanilla/arch/ia64/defconfig +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/ia64/defconfig 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/ia64/defconfig 2003-11-03 23:41:29.000000000 +0300 +@@ -1,6 +1,13 @@ + # + # Automatically generated make config: don't edit + # ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT3_FS_XATTR_SHARING is not set ++# CONFIG_EXT3_FS_XATTR_USER is not set ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XATTR_SHARING is not set ++# CONFIG_EXT2_FS_XATTR_USER is not set ++# CONFIG_FS_MBCACHE is not set + + # + # Code maturity level options +Index: linux-2.4.22-vanilla/arch/m68k/defconfig +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/m68k/defconfig 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/m68k/defconfig 2003-11-03 23:41:29.000000000 +0300 +@@ -1,6 +1,13 @@ + # + # Automatically generated make config: don't edit + # ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT3_FS_XATTR_SHARING is not set ++# CONFIG_EXT3_FS_XATTR_USER is not set ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XATTR_SHARING is not set ++# CONFIG_EXT2_FS_XATTR_USER is not set ++# CONFIG_FS_MBCACHE is not set + CONFIG_UID16=y + + # +Index: linux-2.4.22-vanilla/arch/mips/defconfig +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/mips/defconfig 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/mips/defconfig 2003-11-03 23:41:29.000000000 +0300 +@@ -1,6 +1,13 @@ + # + # Automatically generated make config: don't edit + # ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT3_FS_XATTR_SHARING is not set ++# CONFIG_EXT3_FS_XATTR_USER is not set ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XATTR_SHARING is not set ++# CONFIG_EXT2_FS_XATTR_USER is not set ++# CONFIG_FS_MBCACHE is not set + CONFIG_MIPS=y + CONFIG_MIPS32=y + # CONFIG_MIPS64 is not set +Index: linux-2.4.22-vanilla/arch/mips64/defconfig +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/mips64/defconfig 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/mips64/defconfig 2003-11-03 23:41:29.000000000 +0300 +@@ -1,6 +1,13 @@ + # + # Automatically generated make config: don't edit + # ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT3_FS_XATTR_SHARING is not set ++# CONFIG_EXT3_FS_XATTR_USER is not set ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XATTR_SHARING is not set ++# CONFIG_EXT2_FS_XATTR_USER is not set ++# CONFIG_FS_MBCACHE is not set + CONFIG_MIPS=y + # CONFIG_MIPS32 is not set + CONFIG_MIPS64=y +Index: linux-2.4.22-vanilla/arch/s390/defconfig +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/s390/defconfig 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/s390/defconfig 2003-11-03 23:41:29.000000000 +0300 +@@ -1,6 +1,13 @@ + # + # Automatically generated make config: don't edit + # ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT3_FS_XATTR_SHARING is not set ++# CONFIG_EXT3_FS_XATTR_USER is not set ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XATTR_SHARING is not set ++# CONFIG_EXT2_FS_XATTR_USER is not set ++# CONFIG_FS_MBCACHE is not set + # CONFIG_ISA is not set + # CONFIG_EISA is not set + # CONFIG_MCA is not set +Index: linux-2.4.22-vanilla/arch/s390/kernel/entry.S +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/s390/kernel/entry.S 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/s390/kernel/entry.S 2003-11-03 23:41:29.000000000 +0300 +@@ -558,18 +558,18 @@ + .long sys_fcntl64 + .long sys_readahead + .long sys_ni_syscall +- .long sys_ni_syscall /* 224 - reserved for setxattr */ +- .long sys_ni_syscall /* 225 - reserved for lsetxattr */ +- .long sys_ni_syscall /* 226 - reserved for fsetxattr */ +- .long sys_ni_syscall /* 227 - reserved for getxattr */ +- .long sys_ni_syscall /* 228 - reserved for lgetxattr */ +- .long sys_ni_syscall /* 229 - reserved for fgetxattr */ +- .long sys_ni_syscall /* 230 - reserved for listxattr */ +- .long sys_ni_syscall /* 231 - reserved for llistxattr */ +- .long sys_ni_syscall /* 232 - reserved for flistxattr */ +- .long sys_ni_syscall /* 233 - reserved for removexattr */ +- .long sys_ni_syscall /* 234 - reserved for lremovexattr */ +- .long sys_ni_syscall /* 235 - reserved for fremovexattr */ ++ .long sys_setxattr ++ .long sys_lsetxattr /* 225 */ ++ .long sys_fsetxattr ++ .long sys_getxattr ++ .long sys_lgetxattr ++ .long sys_fgetxattr ++ .long sys_listxattr /* 230 */ ++ .long sys_llistxattr ++ .long sys_flistxattr ++ .long sys_removexattr ++ .long sys_lremovexattr ++ .long sys_fremovexattr /* 235 */ + .long sys_gettid + .long sys_tkill + .rept 255-237 +Index: linux-2.4.22-vanilla/arch/s390x/defconfig +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/s390x/defconfig 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/s390x/defconfig 2003-11-03 23:41:29.000000000 +0300 +@@ -1,6 +1,13 @@ + # + # Automatically generated make config: don't edit + # ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT3_FS_XATTR_SHARING is not set ++# CONFIG_EXT3_FS_XATTR_USER is not set ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XATTR_SHARING is not set ++# CONFIG_EXT2_FS_XATTR_USER is not set ++# CONFIG_FS_MBCACHE is not set + # CONFIG_ISA is not set + # CONFIG_EISA is not set + # CONFIG_MCA is not set +Index: linux-2.4.22-vanilla/arch/s390x/kernel/entry.S +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/s390x/kernel/entry.S 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/s390x/kernel/entry.S 2003-11-03 23:41:29.000000000 +0300 +@@ -591,18 +591,18 @@ + .long SYSCALL(sys_ni_syscall,sys32_fcntl64_wrapper) + .long SYSCALL(sys_readahead,sys32_readahead) + .long SYSCALL(sys_ni_syscall,sys_ni_syscall) +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 224 - reserved for setxattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 225 - reserved for lsetxattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 226 - reserved for fsetxattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 227 - reserved for getxattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 228 - reserved for lgetxattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 229 - reserved for fgetxattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 230 - reserved for listxattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 231 - reserved for llistxattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 232 - reserved for flistxattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 233 - reserved for removexattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 234 - reserved for lremovexattr */ +- .long SYSCALL(sys_ni_syscall,sys_ni_syscall) /* 235 - reserved for fremovexattr */ ++ .long SYSCALL(sys_setxattr,sys32_setxattr_wrapper) ++ .long SYSCALL(sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */ ++ .long SYSCALL(sys_fsetxattr,sys32_fsetxattr_wrapper) ++ .long SYSCALL(sys_getxattr,sys32_getxattr_wrapper) ++ .long SYSCALL(sys_lgetxattr,sys32_lgetxattr_wrapper) ++ .long SYSCALL(sys_fgetxattr,sys32_fgetxattr_wrapper) ++ .long SYSCALL(sys_listxattr,sys32_listxattr_wrapper) /* 230 */ ++ .long SYSCALL(sys_llistxattr,sys32_llistxattr_wrapper) ++ .long SYSCALL(sys_flistxattr,sys32_flistxattr_wrapper) ++ .long SYSCALL(sys_removexattr,sys32_removexattr_wrapper) ++ .long SYSCALL(sys_lremovexattr,sys32_lremovexattr_wrapper) ++ .long SYSCALL(sys_fremovexattr,sys32_fremovexattr_wrapper)/* 235 */ + .long SYSCALL(sys_gettid,sys_gettid) + .long SYSCALL(sys_tkill,sys_tkill) + .rept 255-237 +Index: linux-2.4.22-vanilla/arch/s390x/kernel/wrapper32.S +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/s390x/kernel/wrapper32.S 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/s390x/kernel/wrapper32.S 2003-11-03 23:41:29.000000000 +0300 +@@ -1098,6 +1098,98 @@ + llgfr %r4,%r4 # long + jg sys32_fstat64 # branch to system call + ++ .globl sys32_setxattr_wrapper ++sys32_setxattr_wrapper: ++ llgtr %r2,%r2 # char * ++ llgtr %r3,%r3 # char * ++ llgtr %r4,%r4 # void * ++ llgfr %r5,%r5 # size_t ++ lgfr %r6,%r6 # int ++ jg sys_setxattr ++ ++ .globl sys32_lsetxattr_wrapper ++sys32_lsetxattr_wrapper: ++ llgtr %r2,%r2 # char * ++ llgtr %r3,%r3 # char * ++ llgtr %r4,%r4 # void * ++ llgfr %r5,%r5 # size_t ++ lgfr %r6,%r6 # int ++ jg sys_lsetxattr ++ ++ .globl sys32_fsetxattr_wrapper ++sys32_fsetxattr_wrapper: ++ lgfr %r2,%r2 # int ++ llgtr %r3,%r3 # char * ++ llgtr %r4,%r4 # void * ++ llgfr %r5,%r5 # size_t ++ lgfr %r6,%r6 # int ++ jg sys_fsetxattr ++ ++ .globl sys32_getxattr_wrapper ++sys32_getxattr_wrapper: ++ llgtr %r2,%r2 # char * ++ llgtr %r3,%r3 # char * ++ llgtr %r4,%r4 # void * ++ llgfr %r5,%r5 # size_t ++ jg sys_getxattr ++ ++ .globl sys32_lgetxattr_wrapper ++sys32_lgetxattr_wrapper: ++ llgtr %r2,%r2 # char * ++ llgtr %r3,%r3 # char * ++ llgtr %r4,%r4 # void * ++ llgfr %r5,%r5 # size_t ++ jg sys_lgetxattr ++ ++ .globl sys32_fgetxattr_wrapper ++sys32_fgetxattr_wrapper: ++ lgfr %r2,%r2 # int ++ llgtr %r3,%r3 # char * ++ llgtr %r4,%r4 # void * ++ llgfr %r5,%r5 # size_t ++ jg sys_fgetxattr ++ ++ .globl sys32_listxattr_wrapper ++sys32_listxattr_wrapper: ++ llgtr %r2,%r2 # char * ++ llgtr %r3,%r3 # char * ++ llgfr %r4,%r4 # size_t ++ jg sys_listxattr ++ ++ .globl sys32_llistxattr_wrapper ++sys32_llistxattr_wrapper: ++ llgtr %r2,%r2 # char * ++ llgtr %r3,%r3 # char * ++ llgfr %r4,%r4 # size_t ++ jg sys_llistxattr ++ ++ .globl sys32_flistxattr_wrapper ++sys32_flistxattr_wrapper: ++ lgfr %r2,%r2 # int ++ llgtr %r3,%r3 # char * ++ llgfr %r4,%r4 # size_t ++ jg sys_flistxattr ++ ++ .globl sys32_removexattr_wrapper ++sys32_removexattr_wrapper: ++ llgtr %r2,%r2 # char * ++ llgtr %r3,%r3 # char * ++ jg sys_removexattr ++ ++ .globl sys32_lremovexattr_wrapper ++sys32_lremovexattr_wrapper: ++ llgtr %r2,%r2 # char * ++ llgtr %r3,%r3 # char * ++ jg sys_lremovexattr ++ ++ .globl sys32_fremovexattr_wrapper ++sys32_fremovexattr_wrapper: ++ lgfr %r2,%r2 # int ++ llgtr %r3,%r3 # char * ++ jg sys_fremovexattr ++ ++ ++ + .globl sys32_stime_wrapper + sys32_stime_wrapper: + llgtr %r2,%r2 # int * +Index: linux-2.4.22-vanilla/arch/sparc64/defconfig +=================================================================== +--- linux-2.4.22-vanilla.orig/arch/sparc64/defconfig 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/arch/sparc64/defconfig 2003-11-03 23:41:29.000000000 +0300 +@@ -1,6 +1,13 @@ + # + # Automatically generated make config: don't edit + # ++# CONFIG_EXT3_FS_XATTR is not set ++# CONFIG_EXT3_FS_XATTR_SHARING is not set ++# CONFIG_EXT3_FS_XATTR_USER is not set ++# CONFIG_EXT2_FS_XATTR is not set ++# CONFIG_EXT2_FS_XATTR_SHARING is not set ++# CONFIG_EXT2_FS_XATTR_USER is not set ++# CONFIG_FS_MBCACHE is not set + + # + # Code maturity level options +Index: linux-2.4.22-vanilla/fs/Config.in +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/Config.in 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/Config.in 2003-11-03 23:41:29.000000000 +0300 +@@ -29,6 +29,11 @@ + dep_tristate 'BFS file system support (EXPERIMENTAL)' CONFIG_BFS_FS $CONFIG_EXPERIMENTAL + + tristate 'Ext3 journalling file system support' CONFIG_EXT3_FS ++dep_mbool ' Ext3 extended attributes' CONFIG_EXT3_FS_XATTR $CONFIG_EXT3_FS ++dep_bool ' Ext3 extended attribute block sharing' \ ++ CONFIG_EXT3_FS_XATTR_SHARING $CONFIG_EXT3_FS_XATTR ++dep_bool ' Ext3 extended user attributes' \ ++ CONFIG_EXT3_FS_XATTR_USER $CONFIG_EXT3_FS_XATTR + # CONFIG_JBD could be its own option (even modular), but until there are + # other users than ext3, we will simply make it be the same as CONFIG_EXT3_FS + # dep_tristate ' Journal Block Device support (JBD for ext3)' CONFIG_JBD $CONFIG_EXT3_FS +@@ -88,6 +93,11 @@ + tristate 'ROM file system support' CONFIG_ROMFS_FS + + tristate 'Second extended fs support' CONFIG_EXT2_FS ++dep_mbool ' Ext2 extended attributes' CONFIG_EXT2_FS_XATTR $CONFIG_EXT2_FS ++dep_bool ' Ext2 extended attribute block sharing' \ ++ CONFIG_EXT2_FS_XATTR_SHARING $CONFIG_EXT2_FS_XATTR ++dep_bool ' Ext2 extended user attributes' \ ++ CONFIG_EXT2_FS_XATTR_USER $CONFIG_EXT2_FS_XATTR + + tristate 'System V/Xenix/V7/Coherent file system support' CONFIG_SYSV_FS + +@@ -160,6 +170,10 @@ + define_tristate CONFIG_ZISOFS_FS n + fi + ++# Meta block cache for Extended Attributes (ext2/ext3) ++#tristate 'Meta block cache' CONFIG_FS_MBCACHE ++define_tristate CONFIG_FS_MBCACHE y ++ + mainmenu_option next_comment + comment 'Partition Types' + source fs/partitions/Config.in +Index: linux-2.4.22-vanilla/fs/Makefile +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/Makefile 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/Makefile 2003-11-03 23:41:29.000000000 +0300 +@@ -77,6 +77,9 @@ + + obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o + ++export-objs += mbcache.o ++obj-$(CONFIG_FS_MBCACHE) += mbcache.o ++ + # persistent filesystems + obj-y += $(join $(subdir-y),$(subdir-y:%=/%.o)) + +Index: linux-2.4.22-vanilla/fs/ext2/Makefile +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext2/Makefile 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext2/Makefile 2003-11-03 23:41:29.000000000 +0300 +@@ -13,4 +13,8 @@ + ioctl.o namei.o super.o symlink.o + obj-m := $(O_TARGET) + ++export-objs += xattr.o ++obj-$(CONFIG_EXT2_FS_XATTR) += xattr.o ++obj-$(CONFIG_EXT2_FS_XATTR_USER) += xattr_user.o ++ + include $(TOPDIR)/Rules.make +Index: linux-2.4.22-vanilla/fs/ext2/file.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext2/file.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext2/file.c 2003-11-03 23:41:29.000000000 +0300 +@@ -20,6 +20,7 @@ + + #include + #include ++#include + #include + + /* +@@ -51,4 +52,8 @@ + + struct inode_operations ext2_file_inode_operations = { + truncate: ext2_truncate, ++ setxattr: ext2_setxattr, ++ getxattr: ext2_getxattr, ++ listxattr: ext2_listxattr, ++ removexattr: ext2_removexattr, + }; +Index: linux-2.4.22-vanilla/fs/ext2/ialloc.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext2/ialloc.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext2/ialloc.c 2003-11-03 23:41:29.000000000 +0300 +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -167,6 +168,7 @@ + */ + if (!is_bad_inode(inode)) { + /* Quota is already initialized in iput() */ ++ ext2_xattr_delete_inode(inode); + DQUOT_FREE_INODE(inode); + DQUOT_DROP(inode); + } +Index: linux-2.4.22-vanilla/fs/ext2/inode.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext2/inode.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext2/inode.c 2003-11-03 23:41:29.000000000 +0300 +@@ -39,6 +39,18 @@ + static int ext2_update_inode(struct inode * inode, int do_sync); + + /* ++ * Test whether an inode is a fast symlink. ++ */ ++static inline int ext2_inode_is_fast_symlink(struct inode *inode) ++{ ++ int ea_blocks = inode->u.ext2_i.i_file_acl ? ++ (inode->i_sb->s_blocksize >> 9) : 0; ++ ++ return (S_ISLNK(inode->i_mode) && ++ inode->i_blocks - ea_blocks == 0); ++} ++ ++/* + * Called at each iput() + */ + void ext2_put_inode (struct inode * inode) +@@ -53,9 +65,7 @@ + { + lock_kernel(); + +- if (is_bad_inode(inode) || +- inode->i_ino == EXT2_ACL_IDX_INO || +- inode->i_ino == EXT2_ACL_DATA_INO) ++ if (is_bad_inode(inode)) + goto no_delete; + inode->u.ext2_i.i_dtime = CURRENT_TIME; + mark_inode_dirty(inode); +@@ -801,6 +811,8 @@ + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode))) + return; ++ if (ext2_inode_is_fast_symlink(inode)) ++ return; + if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + return; + +@@ -903,8 +915,7 @@ + unsigned long offset; + struct ext2_group_desc * gdp; + +- if ((inode->i_ino != EXT2_ROOT_INO && inode->i_ino != EXT2_ACL_IDX_INO && +- inode->i_ino != EXT2_ACL_DATA_INO && ++ if ((inode->i_ino != EXT2_ROOT_INO && + inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) || + inode->i_ino > le32_to_cpu(inode->i_sb->u.ext2_sb.s_es->s_inodes_count)) { + ext2_error (inode->i_sb, "ext2_read_inode", +@@ -989,10 +1000,7 @@ + for (block = 0; block < EXT2_N_BLOCKS; block++) + inode->u.ext2_i.i_data[block] = raw_inode->i_block[block]; + +- if (inode->i_ino == EXT2_ACL_IDX_INO || +- inode->i_ino == EXT2_ACL_DATA_INO) +- /* Nothing to do */ ; +- else if (S_ISREG(inode->i_mode)) { ++ if (S_ISREG(inode->i_mode)) { + inode->i_op = &ext2_file_inode_operations; + inode->i_fop = &ext2_file_operations; + inode->i_mapping->a_ops = &ext2_aops; +@@ -1001,15 +1009,17 @@ + inode->i_fop = &ext2_dir_operations; + inode->i_mapping->a_ops = &ext2_aops; + } else if (S_ISLNK(inode->i_mode)) { +- if (!inode->i_blocks) ++ if (ext2_inode_is_fast_symlink(inode)) + inode->i_op = &ext2_fast_symlink_inode_operations; + else { +- inode->i_op = &page_symlink_inode_operations; ++ inode->i_op = &ext2_symlink_inode_operations; + inode->i_mapping->a_ops = &ext2_aops; + } +- } else ++ } else { ++ inode->i_op = &ext2_special_inode_operations; + init_special_inode(inode, inode->i_mode, + le32_to_cpu(raw_inode->i_block[0])); ++ } + brelse (bh); + inode->i_attr_flags = 0; + ext2_set_inode_flags(inode); +Index: linux-2.4.22-vanilla/fs/ext2/namei.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext2/namei.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext2/namei.c 2003-11-03 23:41:29.000000000 +0300 +@@ -31,6 +31,7 @@ + + #include + #include ++#include + #include + + /* +@@ -136,7 +137,7 @@ + + if (l > sizeof (inode->u.ext2_i.i_data)) { + /* slow symlink */ +- inode->i_op = &page_symlink_inode_operations; ++ inode->i_op = &ext2_symlink_inode_operations; + inode->i_mapping->a_ops = &ext2_aops; + err = block_symlink(inode, symname, l); + if (err) +@@ -345,4 +346,15 @@ + rmdir: ext2_rmdir, + mknod: ext2_mknod, + rename: ext2_rename, ++ setxattr: ext2_setxattr, ++ getxattr: ext2_getxattr, ++ listxattr: ext2_listxattr, ++ removexattr: ext2_removexattr, ++}; ++ ++struct inode_operations ext2_special_inode_operations = { ++ setxattr: ext2_setxattr, ++ getxattr: ext2_getxattr, ++ listxattr: ext2_listxattr, ++ removexattr: ext2_removexattr, + }; +Index: linux-2.4.22-vanilla/fs/ext2/super.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext2/super.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext2/super.c 2003-11-03 23:41:29.000000000 +0300 +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -125,6 +126,7 @@ + int db_count; + int i; + ++ ext2_xattr_put_super(sb); + if (!(sb->s_flags & MS_RDONLY)) { + struct ext2_super_block *es = EXT2_SB(sb)->s_es; + +@@ -175,6 +177,13 @@ + this_char = strtok (NULL, ",")) { + if ((value = strchr (this_char, '=')) != NULL) + *value++ = 0; ++#ifdef CONFIG_EXT2_FS_XATTR_USER ++ if (!strcmp (this_char, "user_xattr")) ++ set_opt (*mount_options, XATTR_USER); ++ else if (!strcmp (this_char, "nouser_xattr")) ++ clear_opt (*mount_options, XATTR_USER); ++ else ++#endif + if (!strcmp (this_char, "bsddf")) + clear_opt (*mount_options, MINIX_DF); + else if (!strcmp (this_char, "nouid32")) { +@@ -424,6 +433,9 @@ + blocksize = BLOCK_SIZE; + + sb->u.ext2_sb.s_mount_opt = 0; ++#ifdef CONFIG_EXT2_FS_XATTR_USER ++ /* set_opt (sb->u.ext2_sb.s_mount_opt, XATTR_USER); */ ++#endif + if (!parse_options ((char *) data, &sb_block, &resuid, &resgid, + &sb->u.ext2_sb.s_mount_opt)) { + return NULL; +@@ -813,12 +825,27 @@ + + static int __init init_ext2_fs(void) + { +- return register_filesystem(&ext2_fs_type); ++ int error = init_ext2_xattr(); ++ if (error) ++ return error; ++ error = init_ext2_xattr_user(); ++ if (error) ++ goto fail; ++ error = register_filesystem(&ext2_fs_type); ++ if (!error) ++ return 0; ++ ++ exit_ext2_xattr_user(); ++fail: ++ exit_ext2_xattr(); ++ return error; + } + + static void __exit exit_ext2_fs(void) + { + unregister_filesystem(&ext2_fs_type); ++ exit_ext2_xattr_user(); ++ exit_ext2_xattr(); + } + + EXPORT_NO_SYMBOLS; +Index: linux-2.4.22-vanilla/fs/ext2/symlink.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext2/symlink.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext2/symlink.c 2003-11-03 23:41:29.000000000 +0300 +@@ -19,6 +19,7 @@ + + #include + #include ++#include + + static int ext2_readlink(struct dentry *dentry, char *buffer, int buflen) + { +@@ -32,7 +33,20 @@ + return vfs_follow_link(nd, s); + } + ++struct inode_operations ext2_symlink_inode_operations = { ++ readlink: page_readlink, ++ follow_link: page_follow_link, ++ setxattr: ext2_setxattr, ++ getxattr: ext2_getxattr, ++ listxattr: ext2_listxattr, ++ removexattr: ext2_removexattr, ++}; ++ + struct inode_operations ext2_fast_symlink_inode_operations = { + readlink: ext2_readlink, + follow_link: ext2_follow_link, ++ setxattr: ext2_setxattr, ++ getxattr: ext2_getxattr, ++ listxattr: ext2_listxattr, ++ removexattr: ext2_removexattr, + }; +Index: linux-2.4.22-vanilla/fs/ext2/xattr.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext2/xattr.c 2003-11-03 23:41:29.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext2/xattr.c 2003-11-03 23:41:29.000000000 +0300 +@@ -0,0 +1,1212 @@ ++/* ++ * linux/fs/ext2/xattr.c ++ * ++ * Copyright (C) 2001 by Andreas Gruenbacher, ++ * ++ * Fix by Harrison Xing . ++ * Extended attributes for symlinks and special files added per ++ * suggestion of Luka Renko . ++ */ ++ ++/* ++ * Extended attributes are stored on disk blocks allocated outside of ++ * any inode. The i_file_acl field is then made to point to this allocated ++ * block. If all extended attributes of an inode are identical, these ++ * inodes may share the same extended attribute block. Such situations ++ * are automatically detected by keeping a cache of recent attribute block ++ * numbers and hashes over the block's contents in memory. ++ * ++ * ++ * Extended attribute block layout: ++ * ++ * +------------------+ ++ * | header | ++ * | entry 1 | | ++ * | entry 2 | | growing downwards ++ * | entry 3 | v ++ * | four null bytes | ++ * | . . . | ++ * | value 1 | ^ ++ * | value 3 | | growing upwards ++ * | value 2 | | ++ * +------------------+ ++ * ++ * The block header is followed by multiple entry descriptors. These entry ++ * descriptors are variable in size, and alligned to EXT2_XATTR_PAD ++ * byte boundaries. The entry descriptors are sorted by attribute name, ++ * so that two extended attribute blocks can be compared efficiently. ++ * ++ * Attribute values are aligned to the end of the block, stored in ++ * no specific order. They are also padded to EXT2_XATTR_PAD byte ++ * boundaries. No additional gaps are left between them. ++ * ++ * Locking strategy ++ * ---------------- ++ * The VFS already holds the BKL and the inode->i_sem semaphore when any of ++ * the xattr inode operations are called, so we are guaranteed that only one ++ * processes accesses extended attributes of an inode at any time. ++ * ++ * For writing we also grab the ext2_xattr_sem semaphore. This ensures that ++ * only a single process is modifying an extended attribute block, even ++ * if the block is shared among inodes. ++ * ++ * Note for porting to 2.5 ++ * ----------------------- ++ * The BKL will no longer be held in the xattr inode operations. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* These symbols may be needed by a module. */ ++EXPORT_SYMBOL(ext2_xattr_register); ++EXPORT_SYMBOL(ext2_xattr_unregister); ++EXPORT_SYMBOL(ext2_xattr_get); ++EXPORT_SYMBOL(ext2_xattr_list); ++EXPORT_SYMBOL(ext2_xattr_set); ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) ++# define mark_buffer_dirty(bh) mark_buffer_dirty(bh, 1) ++#endif ++ ++#define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data)) ++#define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr)) ++#define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) ++#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) ++ ++#ifdef EXT2_XATTR_DEBUG ++# define ea_idebug(inode, f...) do { \ ++ printk(KERN_DEBUG "inode %s:%ld: ", \ ++ kdevname(inode->i_dev), inode->i_ino); \ ++ printk(f); \ ++ printk("\n"); \ ++ } while (0) ++# define ea_bdebug(bh, f...) do { \ ++ printk(KERN_DEBUG "block %s:%ld: ", \ ++ kdevname(bh->b_dev), bh->b_blocknr); \ ++ printk(f); \ ++ printk("\n"); \ ++ } while (0) ++#else ++# define ea_idebug(f...) ++# define ea_bdebug(f...) ++#endif ++ ++static int ext2_xattr_set2(struct inode *, struct buffer_head *, ++ struct ext2_xattr_header *); ++ ++#ifdef CONFIG_EXT2_FS_XATTR_SHARING ++ ++static int ext2_xattr_cache_insert(struct buffer_head *); ++static struct buffer_head *ext2_xattr_cache_find(struct inode *, ++ struct ext2_xattr_header *); ++static void ext2_xattr_cache_remove(struct buffer_head *); ++static void ext2_xattr_rehash(struct ext2_xattr_header *, ++ struct ext2_xattr_entry *); ++ ++static struct mb_cache *ext2_xattr_cache; ++ ++#else ++# define ext2_xattr_cache_insert(bh) 0 ++# define ext2_xattr_cache_find(inode, header) NULL ++# define ext2_xattr_cache_remove(bh) while(0) {} ++# define ext2_xattr_rehash(header, entry) while(0) {} ++#endif ++ ++/* ++ * If a file system does not share extended attributes among inodes, ++ * we should not need the ext2_xattr_sem semaphore. However, the ++ * filesystem may still contain shared blocks, so we always take ++ * the lock. ++ */ ++ ++DECLARE_MUTEX(ext2_xattr_sem); ++ ++static inline int ++ext2_xattr_new_block(struct inode *inode, int * errp, int force) ++{ ++ struct super_block *sb = inode->i_sb; ++ int goal = le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block) + ++ EXT2_I(inode)->i_block_group * EXT2_BLOCKS_PER_GROUP(sb); ++ ++ /* How can we enforce the allocation? */ ++ int block = ext2_new_block(inode, goal, 0, 0, errp); ++#ifdef OLD_QUOTAS ++ if (!*errp) ++ inode->i_blocks += inode->i_sb->s_blocksize >> 9; ++#endif ++ return block; ++} ++ ++static inline int ++ext2_xattr_quota_alloc(struct inode *inode, int force) ++{ ++ /* How can we enforce the allocation? */ ++#ifdef OLD_QUOTAS ++ int error = DQUOT_ALLOC_BLOCK(inode->i_sb, inode, 1); ++ if (!error) ++ inode->i_blocks += inode->i_sb->s_blocksize >> 9; ++#else ++ int error = DQUOT_ALLOC_BLOCK(inode, 1); ++#endif ++ return error; ++} ++ ++#ifdef OLD_QUOTAS ++ ++static inline void ++ext2_xattr_quota_free(struct inode *inode) ++{ ++ DQUOT_FREE_BLOCK(inode->i_sb, inode, 1); ++ inode->i_blocks -= inode->i_sb->s_blocksize >> 9; ++} ++ ++static inline void ++ext2_xattr_free_block(struct inode * inode, unsigned long block) ++{ ++ ext2_free_blocks(inode, block, 1); ++ inode->i_blocks -= inode->i_sb->s_blocksize >> 9; ++} ++ ++#else ++# define ext2_xattr_quota_free(inode) \ ++ DQUOT_FREE_BLOCK(inode, 1) ++# define ext2_xattr_free_block(inode, block) \ ++ ext2_free_blocks(inode, block, 1) ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ++ ++static inline struct buffer_head * ++sb_bread(struct super_block *sb, int block) ++{ ++ return bread(sb->s_dev, block, sb->s_blocksize); ++} ++ ++static inline struct buffer_head * ++sb_getblk(struct super_block *sb, int block) ++{ ++ return getblk(sb->s_dev, block, sb->s_blocksize); ++} ++ ++#endif ++ ++struct ext2_xattr_handler *ext2_xattr_handlers[EXT2_XATTR_INDEX_MAX]; ++rwlock_t ext2_handler_lock = RW_LOCK_UNLOCKED; ++ ++int ++ext2_xattr_register(int name_index, struct ext2_xattr_handler *handler) ++{ ++ int error = -EINVAL; ++ ++ if (name_index > 0 && name_index <= EXT2_XATTR_INDEX_MAX) { ++ write_lock(&ext2_handler_lock); ++ if (!ext2_xattr_handlers[name_index-1]) { ++ ext2_xattr_handlers[name_index-1] = handler; ++ error = 0; ++ } ++ write_unlock(&ext2_handler_lock); ++ } ++ return error; ++} ++ ++void ++ext2_xattr_unregister(int name_index, struct ext2_xattr_handler *handler) ++{ ++ if (name_index > 0 || name_index <= EXT2_XATTR_INDEX_MAX) { ++ write_lock(&ext2_handler_lock); ++ ext2_xattr_handlers[name_index-1] = NULL; ++ write_unlock(&ext2_handler_lock); ++ } ++} ++ ++static inline const char * ++strcmp_prefix(const char *a, const char *a_prefix) ++{ ++ while (*a_prefix && *a == *a_prefix) { ++ a++; ++ a_prefix++; ++ } ++ return *a_prefix ? NULL : a; ++} ++ ++/* ++ * Decode the extended attribute name, and translate it into ++ * the name_index and name suffix. ++ */ ++static struct ext2_xattr_handler * ++ext2_xattr_resolve_name(const char **name) ++{ ++ struct ext2_xattr_handler *handler = NULL; ++ int i; ++ ++ if (!*name) ++ return NULL; ++ read_lock(&ext2_handler_lock); ++ for (i=0; iprefix); ++ if (n) { ++ handler = ext2_xattr_handlers[i]; ++ *name = n; ++ break; ++ } ++ } ++ } ++ read_unlock(&ext2_handler_lock); ++ return handler; ++} ++ ++static inline struct ext2_xattr_handler * ++ext2_xattr_handler(int name_index) ++{ ++ struct ext2_xattr_handler *handler = NULL; ++ if (name_index > 0 && name_index <= EXT2_XATTR_INDEX_MAX) { ++ read_lock(&ext2_handler_lock); ++ handler = ext2_xattr_handlers[name_index-1]; ++ read_unlock(&ext2_handler_lock); ++ } ++ return handler; ++} ++ ++/* ++ * Inode operation getxattr() ++ * ++ * dentry->d_inode->i_sem down ++ * BKL held [before 2.5.x] ++ */ ++ssize_t ++ext2_getxattr(struct dentry *dentry, const char *name, ++ void *buffer, size_t size) ++{ ++ struct ext2_xattr_handler *handler; ++ struct inode *inode = dentry->d_inode; ++ ++ handler = ext2_xattr_resolve_name(&name); ++ if (!handler) ++ return -ENOTSUP; ++ return handler->get(inode, name, buffer, size); ++} ++ ++/* ++ * Inode operation listxattr() ++ * ++ * dentry->d_inode->i_sem down ++ * BKL held [before 2.5.x] ++ */ ++ssize_t ++ext2_listxattr(struct dentry *dentry, char *buffer, size_t size) ++{ ++ return ext2_xattr_list(dentry->d_inode, buffer, size); ++} ++ ++/* ++ * Inode operation setxattr() ++ * ++ * dentry->d_inode->i_sem down ++ * BKL held [before 2.5.x] ++ */ ++int ++ext2_setxattr(struct dentry *dentry, const char *name, ++ const void *value, size_t size, int flags) ++{ ++ struct ext2_xattr_handler *handler; ++ struct inode *inode = dentry->d_inode; ++ ++ if (size == 0) ++ value = ""; /* empty EA, do not remove */ ++ handler = ext2_xattr_resolve_name(&name); ++ if (!handler) ++ return -ENOTSUP; ++ return handler->set(inode, name, value, size, flags); ++} ++ ++/* ++ * Inode operation removexattr() ++ * ++ * dentry->d_inode->i_sem down ++ * BKL held [before 2.5.x] ++ */ ++int ++ext2_removexattr(struct dentry *dentry, const char *name) ++{ ++ struct ext2_xattr_handler *handler; ++ struct inode *inode = dentry->d_inode; ++ ++ handler = ext2_xattr_resolve_name(&name); ++ if (!handler) ++ return -ENOTSUP; ++ return handler->set(inode, name, NULL, 0, XATTR_REPLACE); ++} ++ ++/* ++ * ext2_xattr_get() ++ * ++ * Copy an extended attribute into the buffer ++ * provided, or compute the buffer size required. ++ * Buffer is NULL to compute the size of the buffer required. ++ * ++ * Returns a negative error number on failure, or the number of bytes ++ * used / required on success. ++ */ ++int ++ext2_xattr_get(struct inode *inode, int name_index, const char *name, ++ void *buffer, size_t buffer_size) ++{ ++ struct buffer_head *bh = NULL; ++ struct ext2_xattr_entry *entry; ++ unsigned int block, size; ++ char *end; ++ int name_len, error; ++ ++ ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", ++ name_index, name, buffer, (long)buffer_size); ++ ++ if (name == NULL) ++ return -EINVAL; ++ if (!EXT2_I(inode)->i_file_acl) ++ return -ENOATTR; ++ block = EXT2_I(inode)->i_file_acl; ++ ea_idebug(inode, "reading block %d", block); ++ bh = sb_bread(inode->i_sb, block); ++ if (!bh) ++ return -EIO; ++ ea_bdebug(bh, "b_count=%d, refcount=%d", ++ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); ++ end = bh->b_data + bh->b_size; ++ if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || ++ HDR(bh)->h_blocks != cpu_to_le32(1)) { ++bad_block: ext2_error(inode->i_sb, "ext2_xattr_get", ++ "inode %ld: bad block %d", inode->i_ino, block); ++ error = -EIO; ++ goto cleanup; ++ } ++ /* find named attribute */ ++ name_len = strlen(name); ++ ++ error = -ERANGE; ++ if (name_len > 255) ++ goto cleanup; ++ entry = FIRST_ENTRY(bh); ++ while (!IS_LAST_ENTRY(entry)) { ++ struct ext2_xattr_entry *next = ++ EXT2_XATTR_NEXT(entry); ++ if ((char *)next >= end) ++ goto bad_block; ++ if (name_index == entry->e_name_index && ++ name_len == entry->e_name_len && ++ memcmp(name, entry->e_name, name_len) == 0) ++ goto found; ++ entry = next; ++ } ++ /* Check the remaining name entries */ ++ while (!IS_LAST_ENTRY(entry)) { ++ struct ext2_xattr_entry *next = ++ EXT2_XATTR_NEXT(entry); ++ if ((char *)next >= end) ++ goto bad_block; ++ entry = next; ++ } ++ if (ext2_xattr_cache_insert(bh)) ++ ea_idebug(inode, "cache insert failed"); ++ error = -ENOATTR; ++ goto cleanup; ++found: ++ /* check the buffer size */ ++ if (entry->e_value_block != 0) ++ goto bad_block; ++ size = le32_to_cpu(entry->e_value_size); ++ if (size > inode->i_sb->s_blocksize || ++ le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize) ++ goto bad_block; ++ ++ if (ext2_xattr_cache_insert(bh)) ++ ea_idebug(inode, "cache insert failed"); ++ if (buffer) { ++ error = -ERANGE; ++ if (size > buffer_size) ++ goto cleanup; ++ /* return value of attribute */ ++ memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), ++ size); ++ } ++ error = size; ++ ++cleanup: ++ brelse(bh); ++ ++ return error; ++} ++ ++/* ++ * ext2_xattr_list() ++ * ++ * Copy a list of attribute names into the buffer ++ * provided, or compute the buffer size required. ++ * Buffer is NULL to compute the size of the buffer required. ++ * ++ * Returns a negative error number on failure, or the number of bytes ++ * used / required on success. ++ */ ++int ++ext2_xattr_list(struct inode *inode, char *buffer, size_t buffer_size) ++{ ++ struct buffer_head *bh = NULL; ++ struct ext2_xattr_entry *entry; ++ unsigned int block, size = 0; ++ char *buf, *end; ++ int error; ++ ++ ea_idebug(inode, "buffer=%p, buffer_size=%ld", ++ buffer, (long)buffer_size); ++ ++ if (!EXT2_I(inode)->i_file_acl) ++ return 0; ++ block = EXT2_I(inode)->i_file_acl; ++ ea_idebug(inode, "reading block %d", block); ++ bh = sb_bread(inode->i_sb, block); ++ if (!bh) ++ return -EIO; ++ ea_bdebug(bh, "b_count=%d, refcount=%d", ++ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); ++ end = bh->b_data + bh->b_size; ++ if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || ++ HDR(bh)->h_blocks != cpu_to_le32(1)) { ++bad_block: ext2_error(inode->i_sb, "ext2_xattr_list", ++ "inode %ld: bad block %d", inode->i_ino, block); ++ error = -EIO; ++ goto cleanup; ++ } ++ /* compute the size required for the list of attribute names */ ++ for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); ++ entry = EXT2_XATTR_NEXT(entry)) { ++ struct ext2_xattr_handler *handler; ++ struct ext2_xattr_entry *next = ++ EXT2_XATTR_NEXT(entry); ++ if ((char *)next >= end) ++ goto bad_block; ++ ++ handler = ext2_xattr_handler(entry->e_name_index); ++ if (handler) ++ size += handler->list(NULL, inode, entry->e_name, ++ entry->e_name_len); ++ } ++ ++ if (ext2_xattr_cache_insert(bh)) ++ ea_idebug(inode, "cache insert failed"); ++ if (!buffer) { ++ error = size; ++ goto cleanup; ++ } else { ++ error = -ERANGE; ++ if (size > buffer_size) ++ goto cleanup; ++ } ++ ++ /* list the attribute names */ ++ buf = buffer; ++ for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); ++ entry = EXT2_XATTR_NEXT(entry)) { ++ struct ext2_xattr_handler *handler; ++ ++ handler = ext2_xattr_handler(entry->e_name_index); ++ if (handler) ++ buf += handler->list(buf, inode, entry->e_name, ++ entry->e_name_len); ++ } ++ error = size; ++ ++cleanup: ++ brelse(bh); ++ ++ return error; ++} ++ ++/* ++ * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is ++ * not set, set it. ++ */ ++static void ext2_xattr_update_super_block(struct super_block *sb) ++{ ++ if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) ++ return; ++ ++ lock_super(sb); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) ++ EXT2_SB(sb)->s_feature_compat |= EXT2_FEATURE_COMPAT_EXT_ATTR; ++#endif ++ EXT2_SB(sb)->s_es->s_feature_compat |= ++ cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR); ++ sb->s_dirt = 1; ++ mark_buffer_dirty(EXT2_SB(sb)->s_sbh); ++ unlock_super(sb); ++} ++ ++/* ++ * ext2_xattr_set() ++ * ++ * Create, replace or remove an extended attribute for this inode. Buffer ++ * is NULL to remove an existing extended attribute, and non-NULL to ++ * either replace an existing extended attribute, or create a new extended ++ * attribute. The flags XATTR_REPLACE and XATTR_CREATE ++ * specify that an extended attribute must exist and must not exist ++ * previous to the call, respectively. ++ * ++ * Returns 0, or a negative error number on failure. ++ */ ++int ++ext2_xattr_set(struct inode *inode, int name_index, const char *name, ++ const void *value, size_t value_len, int flags) ++{ ++ struct super_block *sb = inode->i_sb; ++ struct buffer_head *bh = NULL; ++ struct ext2_xattr_header *header = NULL; ++ struct ext2_xattr_entry *here, *last; ++ unsigned int name_len; ++ int block = EXT2_I(inode)->i_file_acl; ++ int min_offs = sb->s_blocksize, not_found = 1, free, error; ++ char *end; ++ ++ /* ++ * header -- Points either into bh, or to a temporarily ++ * allocated buffer. ++ * here -- The named entry found, or the place for inserting, within ++ * the block pointed to by header. ++ * last -- Points right after the last named entry within the block ++ * pointed to by header. ++ * min_offs -- The offset of the first value (values are aligned ++ * towards the end of the block). ++ * end -- Points right after the block pointed to by header. ++ */ ++ ++ ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", ++ name_index, name, value, (long)value_len); ++ ++ if (IS_RDONLY(inode)) ++ return -EROFS; ++ if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) ++ return -EPERM; ++ if (value == NULL) ++ value_len = 0; ++ if (name == NULL) ++ return -EINVAL; ++ name_len = strlen(name); ++ if (name_len > 255 || value_len > sb->s_blocksize) ++ return -ERANGE; ++ down(&ext2_xattr_sem); ++ ++ if (block) { ++ /* The inode already has an extended attribute block. */ ++ ++ bh = sb_bread(sb, block); ++ error = -EIO; ++ if (!bh) ++ goto cleanup; ++ ea_bdebug(bh, "b_count=%d, refcount=%d", ++ atomic_read(&(bh->b_count)), ++ le32_to_cpu(HDR(bh)->h_refcount)); ++ header = HDR(bh); ++ end = bh->b_data + bh->b_size; ++ if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || ++ header->h_blocks != cpu_to_le32(1)) { ++bad_block: ext2_error(sb, "ext2_xattr_set", ++ "inode %ld: bad block %d", inode->i_ino, block); ++ error = -EIO; ++ goto cleanup; ++ } ++ /* Find the named attribute. */ ++ here = FIRST_ENTRY(bh); ++ while (!IS_LAST_ENTRY(here)) { ++ struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here); ++ if ((char *)next >= end) ++ goto bad_block; ++ if (!here->e_value_block && here->e_value_size) { ++ int offs = le16_to_cpu(here->e_value_offs); ++ if (offs < min_offs) ++ min_offs = offs; ++ } ++ not_found = name_index - here->e_name_index; ++ if (!not_found) ++ not_found = name_len - here->e_name_len; ++ if (!not_found) ++ not_found = memcmp(name, here->e_name,name_len); ++ if (not_found <= 0) ++ break; ++ here = next; ++ } ++ last = here; ++ /* We still need to compute min_offs and last. */ ++ while (!IS_LAST_ENTRY(last)) { ++ struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last); ++ if ((char *)next >= end) ++ goto bad_block; ++ if (!last->e_value_block && last->e_value_size) { ++ int offs = le16_to_cpu(last->e_value_offs); ++ if (offs < min_offs) ++ min_offs = offs; ++ } ++ last = next; ++ } ++ ++ /* Check whether we have enough space left. */ ++ free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); ++ } else { ++ /* We will use a new extended attribute block. */ ++ free = sb->s_blocksize - ++ sizeof(struct ext2_xattr_header) - sizeof(__u32); ++ here = last = NULL; /* avoid gcc uninitialized warning. */ ++ } ++ ++ if (not_found) { ++ /* Request to remove a nonexistent attribute? */ ++ error = -ENOATTR; ++ if (flags & XATTR_REPLACE) ++ goto cleanup; ++ error = 0; ++ if (value == NULL) ++ goto cleanup; ++ else ++ free -= EXT2_XATTR_LEN(name_len); ++ } else { ++ /* Request to create an existing attribute? */ ++ error = -EEXIST; ++ if (flags & XATTR_CREATE) ++ goto cleanup; ++ if (!here->e_value_block && here->e_value_size) { ++ unsigned int size = le32_to_cpu(here->e_value_size); ++ ++ if (le16_to_cpu(here->e_value_offs) + size > ++ sb->s_blocksize || size > sb->s_blocksize) ++ goto bad_block; ++ free += EXT2_XATTR_SIZE(size); ++ } ++ } ++ free -= EXT2_XATTR_SIZE(value_len); ++ error = -ENOSPC; ++ if (free < 0) ++ goto cleanup; ++ ++ /* Here we know that we can set the new attribute. */ ++ ++ if (header) { ++ if (header->h_refcount == cpu_to_le32(1)) { ++ ea_bdebug(bh, "modifying in-place"); ++ ext2_xattr_cache_remove(bh); ++ } else { ++ int offset; ++ ++ ea_bdebug(bh, "cloning"); ++ header = kmalloc(bh->b_size, GFP_KERNEL); ++ error = -ENOMEM; ++ if (header == NULL) ++ goto cleanup; ++ memcpy(header, HDR(bh), bh->b_size); ++ header->h_refcount = cpu_to_le32(1); ++ offset = (char *)header - bh->b_data; ++ here = ENTRY((char *)here + offset); ++ last = ENTRY((char *)last + offset); ++ } ++ } else { ++ /* Allocate a buffer where we construct the new block. */ ++ header = kmalloc(sb->s_blocksize, GFP_KERNEL); ++ error = -ENOMEM; ++ if (header == NULL) ++ goto cleanup; ++ memset(header, 0, sb->s_blocksize); ++ end = (char *)header + sb->s_blocksize; ++ header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC); ++ header->h_blocks = header->h_refcount = cpu_to_le32(1); ++ last = here = ENTRY(header+1); ++ } ++ ++ if (not_found) { ++ /* Insert the new name. */ ++ int size = EXT2_XATTR_LEN(name_len); ++ int rest = (char *)last - (char *)here; ++ memmove((char *)here + size, here, rest); ++ memset(here, 0, size); ++ here->e_name_index = name_index; ++ here->e_name_len = name_len; ++ memcpy(here->e_name, name, name_len); ++ } else { ++ /* Remove the old value. */ ++ if (!here->e_value_block && here->e_value_size) { ++ char *first_val = (char *)header + min_offs; ++ int offs = le16_to_cpu(here->e_value_offs); ++ char *val = (char *)header + offs; ++ size_t size = EXT2_XATTR_SIZE( ++ le32_to_cpu(here->e_value_size)); ++ memmove(first_val + size, first_val, val - first_val); ++ memset(first_val, 0, size); ++ here->e_value_offs = 0; ++ min_offs += size; ++ ++ /* Adjust all value offsets. */ ++ last = ENTRY(header+1); ++ while (!IS_LAST_ENTRY(last)) { ++ int o = le16_to_cpu(last->e_value_offs); ++ if (!last->e_value_block && o < offs) ++ last->e_value_offs = ++ cpu_to_le16(o + size); ++ last = EXT2_XATTR_NEXT(last); ++ } ++ } ++ if (value == NULL) { ++ /* Remove this attribute. */ ++ if (EXT2_XATTR_NEXT(ENTRY(header+1)) == last) { ++ /* This block is now empty. */ ++ error = ext2_xattr_set2(inode, bh, NULL); ++ goto cleanup; ++ } else { ++ /* Remove the old name. */ ++ int size = EXT2_XATTR_LEN(name_len); ++ last = ENTRY((char *)last - size); ++ memmove(here, (char*)here + size, ++ (char*)last - (char*)here); ++ memset(last, 0, size); ++ } ++ } ++ } ++ ++ if (value != NULL) { ++ /* Insert the new value. */ ++ here->e_value_size = cpu_to_le32(value_len); ++ if (value_len) { ++ size_t size = EXT2_XATTR_SIZE(value_len); ++ char *val = (char *)header + min_offs - size; ++ here->e_value_offs = ++ cpu_to_le16((char *)val - (char *)header); ++ memset(val + size - EXT2_XATTR_PAD, 0, ++ EXT2_XATTR_PAD); /* Clear the pad bytes. */ ++ memcpy(val, value, value_len); ++ } ++ } ++ ext2_xattr_rehash(header, here); ++ ++ error = ext2_xattr_set2(inode, bh, header); ++ ++cleanup: ++ brelse(bh); ++ if (!(bh && header == HDR(bh))) ++ kfree(header); ++ up(&ext2_xattr_sem); ++ ++ return error; ++} ++ ++/* ++ * Second half of ext2_xattr_set(): Update the file system. ++ */ ++static int ++ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, ++ struct ext2_xattr_header *header) ++{ ++ struct super_block *sb = inode->i_sb; ++ struct buffer_head *new_bh = NULL; ++ int error; ++ ++ if (header) { ++ new_bh = ext2_xattr_cache_find(inode, header); ++ if (new_bh) { ++ /* ++ * We found an identical block in the cache. ++ * The old block will be released after updating ++ * the inode. ++ */ ++ ea_bdebug(old_bh, "reusing block %ld", ++ new_bh->b_blocknr); ++ ++ error = -EDQUOT; ++ if (ext2_xattr_quota_alloc(inode, 1)) ++ goto cleanup; ++ ++ HDR(new_bh)->h_refcount = cpu_to_le32( ++ le32_to_cpu(HDR(new_bh)->h_refcount) + 1); ++ ea_bdebug(new_bh, "refcount now=%d", ++ le32_to_cpu(HDR(new_bh)->h_refcount)); ++ } else if (old_bh && header == HDR(old_bh)) { ++ /* Keep this block. */ ++ new_bh = old_bh; ++ ext2_xattr_cache_insert(new_bh); ++ } else { ++ /* We need to allocate a new block */ ++ int force = EXT2_I(inode)->i_file_acl != 0; ++ int block = ext2_xattr_new_block(inode, &error, force); ++ if (error) ++ goto cleanup; ++ ea_idebug(inode, "creating block %d", block); ++ ++ new_bh = sb_getblk(sb, block); ++ if (!new_bh) { ++ ext2_xattr_free_block(inode, block); ++ error = -EIO; ++ goto cleanup; ++ } ++ lock_buffer(new_bh); ++ memcpy(new_bh->b_data, header, new_bh->b_size); ++ mark_buffer_uptodate(new_bh, 1); ++ unlock_buffer(new_bh); ++ ext2_xattr_cache_insert(new_bh); ++ ++ ext2_xattr_update_super_block(sb); ++ } ++ mark_buffer_dirty(new_bh); ++ if (IS_SYNC(inode)) { ++ ll_rw_block(WRITE, 1, &new_bh); ++ wait_on_buffer(new_bh); ++ error = -EIO; ++ if (buffer_req(new_bh) && !buffer_uptodate(new_bh)) ++ goto cleanup; ++ } ++ } ++ ++ /* Update the inode. */ ++ EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; ++ inode->i_ctime = CURRENT_TIME; ++ if (IS_SYNC(inode)) { ++ error = ext2_sync_inode (inode); ++ if (error) ++ goto cleanup; ++ } else ++ mark_inode_dirty(inode); ++ ++ error = 0; ++ if (old_bh && old_bh != new_bh) { ++ /* ++ * If there was an old block, and we are not still using it, ++ * we now release the old block. ++ */ ++ unsigned int refcount = le32_to_cpu(HDR(old_bh)->h_refcount); ++ ++ if (refcount == 1) { ++ /* Free the old block. */ ++ ea_bdebug(old_bh, "freeing"); ++ ext2_xattr_free_block(inode, old_bh->b_blocknr); ++ mark_buffer_clean(old_bh); ++ } else { ++ /* Decrement the refcount only. */ ++ refcount--; ++ HDR(old_bh)->h_refcount = cpu_to_le32(refcount); ++ ext2_xattr_quota_free(inode); ++ mark_buffer_dirty(old_bh); ++ ea_bdebug(old_bh, "refcount now=%d", refcount); ++ } ++ } ++ ++cleanup: ++ if (old_bh != new_bh) ++ brelse(new_bh); ++ ++ return error; ++} ++ ++/* ++ * ext2_xattr_delete_inode() ++ * ++ * Free extended attribute resources associated with this inode. This ++ * is called immediately before an inode is freed. ++ */ ++void ++ext2_xattr_delete_inode(struct inode *inode) ++{ ++ struct buffer_head *bh; ++ unsigned int block = EXT2_I(inode)->i_file_acl; ++ ++ if (!block) ++ return; ++ down(&ext2_xattr_sem); ++ ++ bh = sb_bread(inode->i_sb, block); ++ if (!bh) { ++ ext2_error(inode->i_sb, "ext2_xattr_delete_inode", ++ "inode %ld: block %d read error", inode->i_ino, block); ++ goto cleanup; ++ } ++ ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); ++ if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || ++ HDR(bh)->h_blocks != cpu_to_le32(1)) { ++ ext2_error(inode->i_sb, "ext2_xattr_delete_inode", ++ "inode %ld: bad block %d", inode->i_ino, block); ++ goto cleanup; ++ } ++ ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1); ++ if (HDR(bh)->h_refcount == cpu_to_le32(1)) { ++ ext2_xattr_cache_remove(bh); ++ ext2_xattr_free_block(inode, block); ++ bforget(bh); ++ bh = NULL; ++ } else { ++ HDR(bh)->h_refcount = cpu_to_le32( ++ le32_to_cpu(HDR(bh)->h_refcount) - 1); ++ mark_buffer_dirty(bh); ++ if (IS_SYNC(inode)) { ++ ll_rw_block(WRITE, 1, &bh); ++ wait_on_buffer(bh); ++ } ++ ext2_xattr_quota_free(inode); ++ } ++ EXT2_I(inode)->i_file_acl = 0; ++ ++cleanup: ++ brelse(bh); ++ up(&ext2_xattr_sem); ++} ++ ++/* ++ * ext2_xattr_put_super() ++ * ++ * This is called when a file system is unmounted. ++ */ ++void ++ext2_xattr_put_super(struct super_block *sb) ++{ ++#ifdef CONFIG_EXT2_FS_XATTR_SHARING ++ mb_cache_shrink(ext2_xattr_cache, sb->s_dev); ++#endif ++} ++ ++#ifdef CONFIG_EXT2_FS_XATTR_SHARING ++ ++/* ++ * ext2_xattr_cache_insert() ++ * ++ * Create a new entry in the extended attribute cache, and insert ++ * it unless such an entry is already in the cache. ++ * ++ * Returns 0, or a negative error number on failure. ++ */ ++static int ++ext2_xattr_cache_insert(struct buffer_head *bh) ++{ ++ __u32 hash = le32_to_cpu(HDR(bh)->h_hash); ++ struct mb_cache_entry *ce; ++ int error; ++ ++ ce = mb_cache_entry_alloc(ext2_xattr_cache); ++ if (!ce) ++ return -ENOMEM; ++ error = mb_cache_entry_insert(ce, bh->b_dev, bh->b_blocknr, &hash); ++ if (error) { ++ mb_cache_entry_free(ce); ++ if (error == -EBUSY) { ++ ea_bdebug(bh, "already in cache (%d cache entries)", ++ atomic_read(&ext2_xattr_cache->c_entry_count)); ++ error = 0; ++ } ++ } else { ++ ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash, ++ atomic_read(&ext2_xattr_cache->c_entry_count)); ++ mb_cache_entry_release(ce); ++ } ++ return error; ++} ++ ++/* ++ * ext2_xattr_cmp() ++ * ++ * Compare two extended attribute blocks for equality. ++ * ++ * Returns 0 if the blocks are equal, 1 if they differ, and ++ * a negative error number on errors. ++ */ ++static int ++ext2_xattr_cmp(struct ext2_xattr_header *header1, ++ struct ext2_xattr_header *header2) ++{ ++ struct ext2_xattr_entry *entry1, *entry2; ++ ++ entry1 = ENTRY(header1+1); ++ entry2 = ENTRY(header2+1); ++ while (!IS_LAST_ENTRY(entry1)) { ++ if (IS_LAST_ENTRY(entry2)) ++ return 1; ++ if (entry1->e_hash != entry2->e_hash || ++ entry1->e_name_len != entry2->e_name_len || ++ entry1->e_value_size != entry2->e_value_size || ++ memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) ++ return 1; ++ if (entry1->e_value_block != 0 || entry2->e_value_block != 0) ++ return -EIO; ++ if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), ++ (char *)header2 + le16_to_cpu(entry2->e_value_offs), ++ le32_to_cpu(entry1->e_value_size))) ++ return 1; ++ ++ entry1 = EXT2_XATTR_NEXT(entry1); ++ entry2 = EXT2_XATTR_NEXT(entry2); ++ } ++ if (!IS_LAST_ENTRY(entry2)) ++ return 1; ++ return 0; ++} ++ ++/* ++ * ext2_xattr_cache_find() ++ * ++ * Find an identical extended attribute block. ++ * ++ * Returns a pointer to the block found, or NULL if such a block was ++ * not found or an error occurred. ++ */ ++static struct buffer_head * ++ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) ++{ ++ __u32 hash = le32_to_cpu(header->h_hash); ++ struct mb_cache_entry *ce; ++ ++ if (!header->h_hash) ++ return NULL; /* never share */ ++ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); ++ ce = mb_cache_entry_find_first(ext2_xattr_cache, 0, inode->i_dev, hash); ++ while (ce) { ++ struct buffer_head *bh = sb_bread(inode->i_sb, ce->e_block); ++ ++ if (!bh) { ++ ext2_error(inode->i_sb, "ext2_xattr_cache_find", ++ "inode %ld: block %ld read error", ++ inode->i_ino, ce->e_block); ++ } else if (le32_to_cpu(HDR(bh)->h_refcount) > ++ EXT2_XATTR_REFCOUNT_MAX) { ++ ea_idebug(inode, "block %ld refcount %d>%d",ce->e_block, ++ le32_to_cpu(HDR(bh)->h_refcount), ++ EXT2_XATTR_REFCOUNT_MAX); ++ } else if (!ext2_xattr_cmp(header, HDR(bh))) { ++ ea_bdebug(bh, "b_count=%d",atomic_read(&(bh->b_count))); ++ mb_cache_entry_release(ce); ++ return bh; ++ } ++ brelse(bh); ++ ce = mb_cache_entry_find_next(ce, 0, inode->i_dev, hash); ++ } ++ return NULL; ++} ++ ++/* ++ * ext2_xattr_cache_remove() ++ * ++ * Remove the cache entry of a block from the cache. Called when a ++ * block becomes invalid. ++ */ ++static void ++ext2_xattr_cache_remove(struct buffer_head *bh) ++{ ++ struct mb_cache_entry *ce; ++ ++ ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_dev, bh->b_blocknr); ++ if (ce) { ++ ea_bdebug(bh, "removing (%d cache entries remaining)", ++ atomic_read(&ext2_xattr_cache->c_entry_count)-1); ++ mb_cache_entry_free(ce); ++ } else ++ ea_bdebug(bh, "no cache entry"); ++} ++ ++#define NAME_HASH_SHIFT 5 ++#define VALUE_HASH_SHIFT 16 ++ ++/* ++ * ext2_xattr_hash_entry() ++ * ++ * Compute the hash of an extended attribute. ++ */ ++static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header, ++ struct ext2_xattr_entry *entry) ++{ ++ __u32 hash = 0; ++ char *name = entry->e_name; ++ int n; ++ ++ for (n=0; n < entry->e_name_len; n++) { ++ hash = (hash << NAME_HASH_SHIFT) ^ ++ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ ++ *name++; ++ } ++ ++ if (entry->e_value_block == 0 && entry->e_value_size != 0) { ++ __u32 *value = (__u32 *)((char *)header + ++ le16_to_cpu(entry->e_value_offs)); ++ for (n = (le32_to_cpu(entry->e_value_size) + ++ EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) { ++ hash = (hash << VALUE_HASH_SHIFT) ^ ++ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ ++ le32_to_cpu(*value++); ++ } ++ } ++ entry->e_hash = cpu_to_le32(hash); ++} ++ ++#undef NAME_HASH_SHIFT ++#undef VALUE_HASH_SHIFT ++ ++#define BLOCK_HASH_SHIFT 16 ++ ++/* ++ * ext2_xattr_rehash() ++ * ++ * Re-compute the extended attribute hash value after an entry has changed. ++ */ ++static void ext2_xattr_rehash(struct ext2_xattr_header *header, ++ struct ext2_xattr_entry *entry) ++{ ++ struct ext2_xattr_entry *here; ++ __u32 hash = 0; ++ ++ ext2_xattr_hash_entry(header, entry); ++ here = ENTRY(header+1); ++ while (!IS_LAST_ENTRY(here)) { ++ if (!here->e_hash) { ++ /* Block is not shared if an entry's hash value == 0 */ ++ hash = 0; ++ break; ++ } ++ hash = (hash << BLOCK_HASH_SHIFT) ^ ++ (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ ++ le32_to_cpu(here->e_hash); ++ here = EXT2_XATTR_NEXT(here); ++ } ++ header->h_hash = cpu_to_le32(hash); ++} ++ ++#undef BLOCK_HASH_SHIFT ++ ++int __init ++init_ext2_xattr(void) ++{ ++ ext2_xattr_cache = mb_cache_create("ext2_xattr", NULL, ++ sizeof(struct mb_cache_entry) + ++ sizeof(struct mb_cache_entry_index), 1, 61); ++ if (!ext2_xattr_cache) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++void ++exit_ext2_xattr(void) ++{ ++ mb_cache_destroy(ext2_xattr_cache); ++} ++ ++#else /* CONFIG_EXT2_FS_XATTR_SHARING */ ++ ++int __init ++init_ext2_xattr(void) ++{ ++ return 0; ++} ++ ++void ++exit_ext2_xattr(void) ++{ ++} ++ ++#endif /* CONFIG_EXT2_FS_XATTR_SHARING */ +Index: linux-2.4.22-vanilla/fs/ext2/xattr_user.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext2/xattr_user.c 2003-11-03 23:41:29.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext2/xattr_user.c 2003-11-03 23:41:29.000000000 +0300 +@@ -0,0 +1,103 @@ ++/* ++ * linux/fs/ext2/xattr_user.c ++ * Handler for extended user attributes. ++ * ++ * Copyright (C) 2001 by Andreas Gruenbacher, ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_EXT2_FS_POSIX_ACL ++# include ++#endif ++ ++#define XATTR_USER_PREFIX "user." ++ ++static size_t ++ext2_xattr_user_list(char *list, struct inode *inode, ++ const char *name, int name_len) ++{ ++ const int prefix_len = sizeof(XATTR_USER_PREFIX)-1; ++ ++ if (!test_opt(inode->i_sb, XATTR_USER)) ++ return 0; ++ ++ if (list) { ++ memcpy(list, XATTR_USER_PREFIX, prefix_len); ++ memcpy(list+prefix_len, name, name_len); ++ list[prefix_len + name_len] = '\0'; ++ } ++ return prefix_len + name_len + 1; ++} ++ ++static int ++ext2_xattr_user_get(struct inode *inode, const char *name, ++ void *buffer, size_t size) ++{ ++ int error; ++ ++ if (strcmp(name, "") == 0) ++ return -EINVAL; ++ if (!test_opt(inode->i_sb, XATTR_USER)) ++ return -ENOTSUP; ++#ifdef CONFIG_EXT2_FS_POSIX_ACL ++ error = ext2_permission_locked(inode, MAY_READ); ++#else ++ error = permission(inode, MAY_READ); ++#endif ++ if (error) ++ return error; ++ ++ return ext2_xattr_get(inode, EXT2_XATTR_INDEX_USER, name, ++ buffer, size); ++} ++ ++static int ++ext2_xattr_user_set(struct inode *inode, const char *name, ++ const void *value, size_t size, int flags) ++{ ++ int error; ++ ++ if (strcmp(name, "") == 0) ++ return -EINVAL; ++ if (!test_opt(inode->i_sb, XATTR_USER)) ++ return -ENOTSUP; ++ if ( !S_ISREG(inode->i_mode) && ++ (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX)) ++ return -EPERM; ++#ifdef CONFIG_EXT2_FS_POSIX_ACL ++ error = ext2_permission_locked(inode, MAY_WRITE); ++#else ++ error = permission(inode, MAY_WRITE); ++#endif ++ if (error) ++ return error; ++ ++ return ext2_xattr_set(inode, EXT2_XATTR_INDEX_USER, name, ++ value, size, flags); ++} ++ ++struct ext2_xattr_handler ext2_xattr_user_handler = { ++ prefix: XATTR_USER_PREFIX, ++ list: ext2_xattr_user_list, ++ get: ext2_xattr_user_get, ++ set: ext2_xattr_user_set, ++}; ++ ++int __init ++init_ext2_xattr_user(void) ++{ ++ return ext2_xattr_register(EXT2_XATTR_INDEX_USER, ++ &ext2_xattr_user_handler); ++} ++ ++void ++exit_ext2_xattr_user(void) ++{ ++ ext2_xattr_unregister(EXT2_XATTR_INDEX_USER, ++ &ext2_xattr_user_handler); ++} +Index: linux-2.4.22-vanilla/fs/ext3/Makefile +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext3/Makefile 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext3/Makefile 2003-11-03 23:41:29.000000000 +0300 +@@ -1,5 +1,5 @@ + # +-# Makefile for the linux ext2-filesystem routines. ++# Makefile for the linux ext3-filesystem routines. + # + # Note! Dependencies are done automagically by 'make dep', which also + # removes any old dependencies. DON'T put your own dependencies here +@@ -9,10 +9,14 @@ + + O_TARGET := ext3.o + +-export-objs := super.o inode.o ++export-objs := ext3-exports.o + + obj-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \ +- ioctl.o namei.o super.o symlink.o hash.o ++ ioctl.o namei.o super.o symlink.o hash.o ext3-exports.o + obj-m := $(O_TARGET) + ++export-objs += xattr.o ++obj-$(CONFIG_EXT3_FS_XATTR) += xattr.o ++obj-$(CONFIG_EXT3_FS_XATTR_USER) += xattr_user.o ++ + include $(TOPDIR)/Rules.make +Index: linux-2.4.22-vanilla/fs/ext3/file.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext3/file.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext3/file.c 2003-11-03 23:41:29.000000000 +0300 +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -127,5 +128,9 @@ + struct inode_operations ext3_file_inode_operations = { + truncate: ext3_truncate, /* BKL held */ + setattr: ext3_setattr, /* BKL held */ ++ setxattr: ext3_setxattr, /* BKL held */ ++ getxattr: ext3_getxattr, /* BKL held */ ++ listxattr: ext3_listxattr, /* BKL held */ ++ removexattr: ext3_removexattr, /* BKL held */ + }; + +Index: linux-2.4.22-vanilla/fs/ext3/ialloc.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext3/ialloc.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext3/ialloc.c 2003-11-03 23:41:29.000000000 +0300 +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -216,6 +217,7 @@ + * as writing the quota to disk may need the lock as well. + */ + DQUOT_INIT(inode); ++ ext3_xattr_delete_inode(handle, inode); + DQUOT_FREE_INODE(inode); + DQUOT_DROP(inode); + +Index: linux-2.4.22-vanilla/fs/ext3/inode.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext3/inode.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext3/inode.c 2003-11-03 23:41:29.000000000 +0300 +@@ -39,6 +39,18 @@ + */ + #undef SEARCH_FROM_ZERO + ++/* ++ * Test whether an inode is a fast symlink. ++ */ ++static inline int ext3_inode_is_fast_symlink(struct inode *inode) ++{ ++ int ea_blocks = inode->u.ext3_i.i_file_acl ? ++ (inode->i_sb->s_blocksize >> 9) : 0; ++ ++ return (S_ISLNK(inode->i_mode) && ++ inode->i_blocks - ea_blocks == 0); ++} ++ + /* The ext3 forget function must perform a revoke if we are freeing data + * which has been journaled. Metadata (eg. indirect blocks) must be + * revoked in all cases. +@@ -48,7 +60,7 @@ + * still needs to be revoked. + */ + +-static int ext3_forget(handle_t *handle, int is_metadata, ++int ext3_forget(handle_t *handle, int is_metadata, + struct inode *inode, struct buffer_head *bh, + int blocknr) + { +@@ -179,9 +191,7 @@ + { + handle_t *handle; + +- if (is_bad_inode(inode) || +- inode->i_ino == EXT3_ACL_IDX_INO || +- inode->i_ino == EXT3_ACL_DATA_INO) ++ if (is_bad_inode(inode)) + goto no_delete; + + lock_kernel(); +@@ -1870,6 +1880,8 @@ + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode))) + return; ++ if (ext3_inode_is_fast_symlink(inode)) ++ return; + if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + return; + +@@ -2017,8 +2029,6 @@ + struct ext3_group_desc * gdp; + + if ((inode->i_ino != EXT3_ROOT_INO && +- inode->i_ino != EXT3_ACL_IDX_INO && +- inode->i_ino != EXT3_ACL_DATA_INO && + inode->i_ino != EXT3_JOURNAL_INO && + inode->i_ino < EXT3_FIRST_INO(inode->i_sb)) || + inode->i_ino > le32_to_cpu( +@@ -2159,10 +2169,7 @@ + inode->u.ext3_i.i_data[block] = iloc.raw_inode->i_block[block]; + INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan); + +- if (inode->i_ino == EXT3_ACL_IDX_INO || +- inode->i_ino == EXT3_ACL_DATA_INO) +- /* Nothing to do */ ; +- else if (S_ISREG(inode->i_mode)) { ++ if (S_ISREG(inode->i_mode)) { + inode->i_op = &ext3_file_inode_operations; + inode->i_fop = &ext3_file_operations; + inode->i_mapping->a_ops = &ext3_aops; +@@ -2170,15 +2177,17 @@ + inode->i_op = &ext3_dir_inode_operations; + inode->i_fop = &ext3_dir_operations; + } else if (S_ISLNK(inode->i_mode)) { +- if (!inode->i_blocks) ++ if (ext3_inode_is_fast_symlink(inode)) + inode->i_op = &ext3_fast_symlink_inode_operations; + else { +- inode->i_op = &page_symlink_inode_operations; ++ inode->i_op = &ext3_symlink_inode_operations; + inode->i_mapping->a_ops = &ext3_aops; + } +- } else ++ } else { ++ inode->i_op = &ext3_special_inode_operations; + init_special_inode(inode, inode->i_mode, + le32_to_cpu(iloc.raw_inode->i_block[0])); ++ } + brelse(iloc.bh); + ext3_set_inode_flags(inode); + return; +Index: linux-2.4.22-vanilla/fs/ext3/namei.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext3/namei.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext3/namei.c 2003-11-03 23:41:29.000000000 +0300 +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1613,7 +1614,7 @@ + if (IS_SYNC(dir)) + handle->h_sync = 1; + +- inode = ext3_new_inode (handle, dir, S_IFDIR); ++ inode = ext3_new_inode (handle, dir, S_IFDIR | mode); + err = PTR_ERR(inode); + if (IS_ERR(inode)) + goto out_stop; +@@ -1621,7 +1622,6 @@ + inode->i_op = &ext3_dir_inode_operations; + inode->i_fop = &ext3_dir_operations; + inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize; +- inode->i_blocks = 0; + dir_block = ext3_bread (handle, inode, 0, 1, &err); + if (!dir_block) { + inode->i_nlink--; /* is this nlink == 0? */ +@@ -1648,9 +1648,6 @@ + BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata"); + ext3_journal_dirty_metadata(handle, dir_block); + brelse (dir_block); +- inode->i_mode = S_IFDIR | mode; +- if (dir->i_mode & S_ISGID) +- inode->i_mode |= S_ISGID; + ext3_mark_inode_dirty(handle, inode); + err = ext3_add_entry (handle, dentry, inode); + if (err) { +@@ -2019,7 +2016,7 @@ + goto out_stop; + + if (l > sizeof (EXT3_I(inode)->i_data)) { +- inode->i_op = &page_symlink_inode_operations; ++ inode->i_op = &ext3_symlink_inode_operations; + inode->i_mapping->a_ops = &ext3_aops; + /* + * block_symlink() calls back into ext3_prepare/commit_write. +@@ -2244,4 +2241,16 @@ + rmdir: ext3_rmdir, /* BKL held */ + mknod: ext3_mknod, /* BKL held */ + rename: ext3_rename, /* BKL held */ ++ setxattr: ext3_setxattr, /* BKL held */ ++ getxattr: ext3_getxattr, /* BKL held */ ++ listxattr: ext3_listxattr, /* BKL held */ ++ removexattr: ext3_removexattr, /* BKL held */ + }; ++ ++struct inode_operations ext3_special_inode_operations = { ++ setxattr: ext3_setxattr, /* BKL held */ ++ getxattr: ext3_getxattr, /* BKL held */ ++ listxattr: ext3_listxattr, /* BKL held */ ++ removexattr: ext3_removexattr, /* BKL held */ ++}; ++ +Index: linux-2.4.22-vanilla/fs/ext3/super.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext3/super.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext3/super.c 2003-11-03 23:41:29.000000000 +0300 +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -406,6 +407,7 @@ + kdev_t j_dev = sbi->s_journal->j_dev; + int i; + ++ ext3_xattr_put_super(sb); + journal_destroy(sbi->s_journal); + if (!(sb->s_flags & MS_RDONLY)) { + EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); +@@ -505,6 +507,7 @@ + int is_remount) + { + unsigned long *mount_options = &sbi->s_mount_opt; ++ + uid_t *resuid = &sbi->s_resuid; + gid_t *resgid = &sbi->s_resgid; + char * this_char; +@@ -517,6 +520,13 @@ + this_char = strtok (NULL, ",")) { + if ((value = strchr (this_char, '=')) != NULL) + *value++ = 0; ++#ifdef CONFIG_EXT3_FS_XATTR_USER ++ if (!strcmp (this_char, "user_xattr")) ++ set_opt (*mount_options, XATTR_USER); ++ else if (!strcmp (this_char, "nouser_xattr")) ++ clear_opt (*mount_options, XATTR_USER); ++ else ++#endif + if (!strcmp (this_char, "bsddf")) + clear_opt (*mount_options, MINIX_DF); + else if (!strcmp (this_char, "nouid32")) { +@@ -934,6 +944,12 @@ + sbi->s_mount_opt = 0; + sbi->s_resuid = EXT3_DEF_RESUID; + sbi->s_resgid = EXT3_DEF_RESGID; ++ ++ /* Default extended attribute flags */ ++#ifdef CONFIG_EXT3_FS_XATTR_USER ++ /* set_opt(sbi->s_mount_opt, XATTR_USER); */ ++#endif ++ + if (!parse_options ((char *) data, &sb_block, sbi, &journal_inum, 0)) { + sb->s_dev = 0; + goto out_fail; +@@ -1827,17 +1843,29 @@ + old_sync_dquot = ext3_qops.sync_dquot; + ext3_qops.sync_dquot = ext3_sync_dquot; + #endif +- return register_filesystem(&ext3_fs_type); ++ int error = init_ext3_xattr(); ++ if (error) ++ return error; ++ error = init_ext3_xattr_user(); ++ if (error) ++ goto fail; ++ error = register_filesystem(&ext3_fs_type); ++ if (!error) ++ return 0; ++ ++ exit_ext3_xattr_user(); ++fail: ++ exit_ext3_xattr(); ++ return error; + } + + static void __exit exit_ext3_fs(void) + { + unregister_filesystem(&ext3_fs_type); ++ exit_ext3_xattr_user(); ++ exit_ext3_xattr(); + } + +-EXPORT_SYMBOL(ext3_force_commit); +-EXPORT_SYMBOL(ext3_bread); +- + MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); + MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions"); + MODULE_LICENSE("GPL"); +Index: linux-2.4.22-vanilla/fs/ext3/symlink.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext3/symlink.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext3/symlink.c 2003-11-03 23:41:29.000000000 +0300 +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + + static int ext3_readlink(struct dentry *dentry, char *buffer, int buflen) + { +@@ -33,7 +34,20 @@ + return vfs_follow_link(nd, s); + } + ++struct inode_operations ext3_symlink_inode_operations = { ++ readlink: page_readlink, /* BKL not held. Don't need */ ++ follow_link: page_follow_link, /* BKL not held. Don't need */ ++ setxattr: ext3_setxattr, /* BKL held */ ++ getxattr: ext3_getxattr, /* BKL held */ ++ listxattr: ext3_listxattr, /* BKL held */ ++ removexattr: ext3_removexattr, /* BKL held */ ++}; ++ + struct inode_operations ext3_fast_symlink_inode_operations = { + readlink: ext3_readlink, /* BKL not held. Don't need */ + follow_link: ext3_follow_link, /* BKL not held. Don't need */ ++ setxattr: ext3_setxattr, /* BKL held */ ++ getxattr: ext3_getxattr, /* BKL held */ ++ listxattr: ext3_listxattr, /* BKL held */ ++ removexattr: ext3_removexattr, /* BKL held */ + }; +Index: linux-2.4.22-vanilla/fs/ext3/xattr.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext3/xattr.c 2003-11-03 23:41:29.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext3/xattr.c 2003-11-03 23:41:29.000000000 +0300 +@@ -0,0 +1,1225 @@ ++/* ++ * linux/fs/ext3/xattr.c ++ * ++ * Copyright (C) 2001 by Andreas Gruenbacher, ++ * ++ * Fix by Harrison Xing . ++ * Ext3 code with a lot of help from Eric Jarman . ++ * Extended attributes for symlinks and special files added per ++ * suggestion of Luka Renko . ++ */ ++ ++/* ++ * Extended attributes are stored on disk blocks allocated outside of ++ * any inode. The i_file_acl field is then made to point to this allocated ++ * block. If all extended attributes of an inode are identical, these ++ * inodes may share the same extended attribute block. Such situations ++ * are automatically detected by keeping a cache of recent attribute block ++ * numbers and hashes over the block's contents in memory. ++ * ++ * ++ * Extended attribute block layout: ++ * ++ * +------------------+ ++ * | header | ++ * | entry 1 | | ++ * | entry 2 | | growing downwards ++ * | entry 3 | v ++ * | four null bytes | ++ * | . . . | ++ * | value 1 | ^ ++ * | value 3 | | growing upwards ++ * | value 2 | | ++ * +------------------+ ++ * ++ * The block header is followed by multiple entry descriptors. These entry ++ * descriptors are variable in size, and alligned to EXT3_XATTR_PAD ++ * byte boundaries. The entry descriptors are sorted by attribute name, ++ * so that two extended attribute blocks can be compared efficiently. ++ * ++ * Attribute values are aligned to the end of the block, stored in ++ * no specific order. They are also padded to EXT3_XATTR_PAD byte ++ * boundaries. No additional gaps are left between them. ++ * ++ * Locking strategy ++ * ---------------- ++ * The VFS already holds the BKL and the inode->i_sem semaphore when any of ++ * the xattr inode operations are called, so we are guaranteed that only one ++ * processes accesses extended attributes of an inode at any time. ++ * ++ * For writing we also grab the ext3_xattr_sem semaphore. This ensures that ++ * only a single process is modifying an extended attribute block, even ++ * if the block is shared among inodes. ++ * ++ * Note for porting to 2.5 ++ * ----------------------- ++ * The BKL will no longer be held in the xattr inode operations. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define EXT3_EA_USER "user." ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) ++# define mark_buffer_dirty(bh) mark_buffer_dirty(bh, 1) ++#endif ++ ++#define HDR(bh) ((struct ext3_xattr_header *)((bh)->b_data)) ++#define ENTRY(ptr) ((struct ext3_xattr_entry *)(ptr)) ++#define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) ++#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) ++ ++#ifdef EXT3_XATTR_DEBUG ++# define ea_idebug(inode, f...) do { \ ++ printk(KERN_DEBUG "inode %s:%ld: ", \ ++ kdevname(inode->i_dev), inode->i_ino); \ ++ printk(f); \ ++ printk("\n"); \ ++ } while (0) ++# define ea_bdebug(bh, f...) do { \ ++ printk(KERN_DEBUG "block %s:%ld: ", \ ++ kdevname(bh->b_dev), bh->b_blocknr); \ ++ printk(f); \ ++ printk("\n"); \ ++ } while (0) ++#else ++# define ea_idebug(f...) ++# define ea_bdebug(f...) ++#endif ++ ++static int ext3_xattr_set2(handle_t *, struct inode *, struct buffer_head *, ++ struct ext3_xattr_header *); ++ ++#ifdef CONFIG_EXT3_FS_XATTR_SHARING ++ ++static int ext3_xattr_cache_insert(struct buffer_head *); ++static struct buffer_head *ext3_xattr_cache_find(struct inode *, ++ struct ext3_xattr_header *); ++static void ext3_xattr_cache_remove(struct buffer_head *); ++static void ext3_xattr_rehash(struct ext3_xattr_header *, ++ struct ext3_xattr_entry *); ++ ++static struct mb_cache *ext3_xattr_cache; ++ ++#else ++# define ext3_xattr_cache_insert(bh) 0 ++# define ext3_xattr_cache_find(inode, header) NULL ++# define ext3_xattr_cache_remove(bh) while(0) {} ++# define ext3_xattr_rehash(header, entry) while(0) {} ++#endif ++ ++/* ++ * If a file system does not share extended attributes among inodes, ++ * we should not need the ext3_xattr_sem semaphore. However, the ++ * filesystem may still contain shared blocks, so we always take ++ * the lock. ++ */ ++ ++DECLARE_MUTEX(ext3_xattr_sem); ++ ++static inline int ++ext3_xattr_new_block(handle_t *handle, struct inode *inode, ++ int * errp, int force) ++{ ++ struct super_block *sb = inode->i_sb; ++ int goal = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + ++ EXT3_I(inode)->i_block_group * EXT3_BLOCKS_PER_GROUP(sb); ++ ++ /* How can we enforce the allocation? */ ++ int block = ext3_new_block(handle, inode, goal, 0, 0, errp); ++#ifdef OLD_QUOTAS ++ if (!*errp) ++ inode->i_blocks += inode->i_sb->s_blocksize >> 9; ++#endif ++ return block; ++} ++ ++static inline int ++ext3_xattr_quota_alloc(struct inode *inode, int force) ++{ ++ /* How can we enforce the allocation? */ ++#ifdef OLD_QUOTAS ++ int error = DQUOT_ALLOC_BLOCK(inode->i_sb, inode, 1); ++ if (!error) ++ inode->i_blocks += inode->i_sb->s_blocksize >> 9; ++#else ++ int error = DQUOT_ALLOC_BLOCK(inode, 1); ++#endif ++ return error; ++} ++ ++#ifdef OLD_QUOTAS ++ ++static inline void ++ext3_xattr_quota_free(struct inode *inode) ++{ ++ DQUOT_FREE_BLOCK(inode->i_sb, inode, 1); ++ inode->i_blocks -= inode->i_sb->s_blocksize >> 9; ++} ++ ++static inline void ++ext3_xattr_free_block(handle_t *handle, struct inode * inode, ++ unsigned long block) ++{ ++ ext3_free_blocks(handle, inode, block, 1); ++ inode->i_blocks -= inode->i_sb->s_blocksize >> 9; ++} ++ ++#else ++# define ext3_xattr_quota_free(inode) \ ++ DQUOT_FREE_BLOCK(inode, 1) ++# define ext3_xattr_free_block(handle, inode, block) \ ++ ext3_free_blocks(handle, inode, block, 1) ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ++ ++static inline struct buffer_head * ++sb_bread(struct super_block *sb, int block) ++{ ++ return bread(sb->s_dev, block, sb->s_blocksize); ++} ++ ++static inline struct buffer_head * ++sb_getblk(struct super_block *sb, int block) ++{ ++ return getblk(sb->s_dev, block, sb->s_blocksize); ++} ++ ++#endif ++ ++struct ext3_xattr_handler *ext3_xattr_handlers[EXT3_XATTR_INDEX_MAX]; ++rwlock_t ext3_handler_lock = RW_LOCK_UNLOCKED; ++ ++int ++ext3_xattr_register(int name_index, struct ext3_xattr_handler *handler) ++{ ++ int error = -EINVAL; ++ ++ if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) { ++ write_lock(&ext3_handler_lock); ++ if (!ext3_xattr_handlers[name_index-1]) { ++ ext3_xattr_handlers[name_index-1] = handler; ++ error = 0; ++ } ++ write_unlock(&ext3_handler_lock); ++ } ++ return error; ++} ++ ++void ++ext3_xattr_unregister(int name_index, struct ext3_xattr_handler *handler) ++{ ++ if (name_index > 0 || name_index <= EXT3_XATTR_INDEX_MAX) { ++ write_lock(&ext3_handler_lock); ++ ext3_xattr_handlers[name_index-1] = NULL; ++ write_unlock(&ext3_handler_lock); ++ } ++} ++ ++static inline const char * ++strcmp_prefix(const char *a, const char *a_prefix) ++{ ++ while (*a_prefix && *a == *a_prefix) { ++ a++; ++ a_prefix++; ++ } ++ return *a_prefix ? NULL : a; ++} ++ ++/* ++ * Decode the extended attribute name, and translate it into ++ * the name_index and name suffix. ++ */ ++static inline struct ext3_xattr_handler * ++ext3_xattr_resolve_name(const char **name) ++{ ++ struct ext3_xattr_handler *handler = NULL; ++ int i; ++ ++ if (!*name) ++ return NULL; ++ read_lock(&ext3_handler_lock); ++ for (i=0; iprefix); ++ if (n) { ++ handler = ext3_xattr_handlers[i]; ++ *name = n; ++ break; ++ } ++ } ++ } ++ read_unlock(&ext3_handler_lock); ++ return handler; ++} ++ ++static inline struct ext3_xattr_handler * ++ext3_xattr_handler(int name_index) ++{ ++ struct ext3_xattr_handler *handler = NULL; ++ if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) { ++ read_lock(&ext3_handler_lock); ++ handler = ext3_xattr_handlers[name_index-1]; ++ read_unlock(&ext3_handler_lock); ++ } ++ return handler; ++} ++ ++/* ++ * Inode operation getxattr() ++ * ++ * dentry->d_inode->i_sem down ++ * BKL held [before 2.5.x] ++ */ ++ssize_t ++ext3_getxattr(struct dentry *dentry, const char *name, ++ void *buffer, size_t size) ++{ ++ struct ext3_xattr_handler *handler; ++ struct inode *inode = dentry->d_inode; ++ ++ handler = ext3_xattr_resolve_name(&name); ++ if (!handler) ++ return -ENOTSUP; ++ return handler->get(inode, name, buffer, size); ++} ++ ++/* ++ * Inode operation listxattr() ++ * ++ * dentry->d_inode->i_sem down ++ * BKL held [before 2.5.x] ++ */ ++ssize_t ++ext3_listxattr(struct dentry *dentry, char *buffer, size_t size) ++{ ++ return ext3_xattr_list(dentry->d_inode, buffer, size); ++} ++ ++/* ++ * Inode operation setxattr() ++ * ++ * dentry->d_inode->i_sem down ++ * BKL held [before 2.5.x] ++ */ ++int ++ext3_setxattr(struct dentry *dentry, const char *name, ++ const void *value, size_t size, int flags) ++{ ++ struct ext3_xattr_handler *handler; ++ struct inode *inode = dentry->d_inode; ++ ++ if (size == 0) ++ value = ""; /* empty EA, do not remove */ ++ handler = ext3_xattr_resolve_name(&name); ++ if (!handler) ++ return -ENOTSUP; ++ return handler->set(inode, name, value, size, flags); ++} ++ ++/* ++ * Inode operation removexattr() ++ * ++ * dentry->d_inode->i_sem down ++ * BKL held [before 2.5.x] ++ */ ++int ++ext3_removexattr(struct dentry *dentry, const char *name) ++{ ++ struct ext3_xattr_handler *handler; ++ struct inode *inode = dentry->d_inode; ++ ++ handler = ext3_xattr_resolve_name(&name); ++ if (!handler) ++ return -ENOTSUP; ++ return handler->set(inode, name, NULL, 0, XATTR_REPLACE); ++} ++ ++/* ++ * ext3_xattr_get() ++ * ++ * Copy an extended attribute into the buffer ++ * provided, or compute the buffer size required. ++ * Buffer is NULL to compute the size of the buffer required. ++ * ++ * Returns a negative error number on failure, or the number of bytes ++ * used / required on success. ++ */ ++int ++ext3_xattr_get(struct inode *inode, int name_index, const char *name, ++ void *buffer, size_t buffer_size) ++{ ++ struct buffer_head *bh = NULL; ++ struct ext3_xattr_entry *entry; ++ unsigned int block, size; ++ char *end; ++ int name_len, error; ++ ++ ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", ++ name_index, name, buffer, (long)buffer_size); ++ ++ if (name == NULL) ++ return -EINVAL; ++ if (!EXT3_I(inode)->i_file_acl) ++ return -ENOATTR; ++ block = EXT3_I(inode)->i_file_acl; ++ ea_idebug(inode, "reading block %d", block); ++ bh = sb_bread(inode->i_sb, block); ++ if (!bh) ++ return -EIO; ++ ea_bdebug(bh, "b_count=%d, refcount=%d", ++ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); ++ end = bh->b_data + bh->b_size; ++ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || ++ HDR(bh)->h_blocks != cpu_to_le32(1)) { ++bad_block: ext3_error(inode->i_sb, "ext3_xattr_get", ++ "inode %ld: bad block %d", inode->i_ino, block); ++ error = -EIO; ++ goto cleanup; ++ } ++ /* find named attribute */ ++ name_len = strlen(name); ++ ++ error = -ERANGE; ++ if (name_len > 255) ++ goto cleanup; ++ entry = FIRST_ENTRY(bh); ++ while (!IS_LAST_ENTRY(entry)) { ++ struct ext3_xattr_entry *next = ++ EXT3_XATTR_NEXT(entry); ++ if ((char *)next >= end) ++ goto bad_block; ++ if (name_index == entry->e_name_index && ++ name_len == entry->e_name_len && ++ memcmp(name, entry->e_name, name_len) == 0) ++ goto found; ++ entry = next; ++ } ++ /* Check the remaining name entries */ ++ while (!IS_LAST_ENTRY(entry)) { ++ struct ext3_xattr_entry *next = ++ EXT3_XATTR_NEXT(entry); ++ if ((char *)next >= end) ++ goto bad_block; ++ entry = next; ++ } ++ if (ext3_xattr_cache_insert(bh)) ++ ea_idebug(inode, "cache insert failed"); ++ error = -ENOATTR; ++ goto cleanup; ++found: ++ /* check the buffer size */ ++ if (entry->e_value_block != 0) ++ goto bad_block; ++ size = le32_to_cpu(entry->e_value_size); ++ if (size > inode->i_sb->s_blocksize || ++ le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize) ++ goto bad_block; ++ ++ if (ext3_xattr_cache_insert(bh)) ++ ea_idebug(inode, "cache insert failed"); ++ if (buffer) { ++ error = -ERANGE; ++ if (size > buffer_size) ++ goto cleanup; ++ /* return value of attribute */ ++ memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), ++ size); ++ } ++ error = size; ++ ++cleanup: ++ brelse(bh); ++ ++ return error; ++} ++ ++/* ++ * ext3_xattr_list() ++ * ++ * Copy a list of attribute names into the buffer ++ * provided, or compute the buffer size required. ++ * Buffer is NULL to compute the size of the buffer required. ++ * ++ * Returns a negative error number on failure, or the number of bytes ++ * used / required on success. ++ */ ++int ++ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size) ++{ ++ struct buffer_head *bh = NULL; ++ struct ext3_xattr_entry *entry; ++ unsigned int block, size = 0; ++ char *buf, *end; ++ int error; ++ ++ ea_idebug(inode, "buffer=%p, buffer_size=%ld", ++ buffer, (long)buffer_size); ++ ++ if (!EXT3_I(inode)->i_file_acl) ++ return 0; ++ block = EXT3_I(inode)->i_file_acl; ++ ea_idebug(inode, "reading block %d", block); ++ bh = sb_bread(inode->i_sb, block); ++ if (!bh) ++ return -EIO; ++ ea_bdebug(bh, "b_count=%d, refcount=%d", ++ atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); ++ end = bh->b_data + bh->b_size; ++ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || ++ HDR(bh)->h_blocks != cpu_to_le32(1)) { ++bad_block: ext3_error(inode->i_sb, "ext3_xattr_list", ++ "inode %ld: bad block %d", inode->i_ino, block); ++ error = -EIO; ++ goto cleanup; ++ } ++ /* compute the size required for the list of attribute names */ ++ for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); ++ entry = EXT3_XATTR_NEXT(entry)) { ++ struct ext3_xattr_handler *handler; ++ struct ext3_xattr_entry *next = ++ EXT3_XATTR_NEXT(entry); ++ if ((char *)next >= end) ++ goto bad_block; ++ ++ handler = ext3_xattr_handler(entry->e_name_index); ++ if (handler) ++ size += handler->list(NULL, inode, entry->e_name, ++ entry->e_name_len); ++ } ++ ++ if (ext3_xattr_cache_insert(bh)) ++ ea_idebug(inode, "cache insert failed"); ++ if (!buffer) { ++ error = size; ++ goto cleanup; ++ } else { ++ error = -ERANGE; ++ if (size > buffer_size) ++ goto cleanup; ++ } ++ ++ /* list the attribute names */ ++ buf = buffer; ++ for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); ++ entry = EXT3_XATTR_NEXT(entry)) { ++ struct ext3_xattr_handler *handler; ++ ++ handler = ext3_xattr_handler(entry->e_name_index); ++ if (handler) ++ buf += handler->list(buf, inode, entry->e_name, ++ entry->e_name_len); ++ } ++ error = size; ++ ++cleanup: ++ brelse(bh); ++ ++ return error; ++} ++ ++/* ++ * If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is ++ * not set, set it. ++ */ ++static void ext3_xattr_update_super_block(handle_t *handle, ++ struct super_block *sb) ++{ ++ if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR)) ++ return; ++ ++ lock_super(sb); ++ ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) ++ EXT3_SB(sb)->s_feature_compat |= EXT3_FEATURE_COMPAT_EXT_ATTR; ++#endif ++ EXT3_SB(sb)->s_es->s_feature_compat |= ++ cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR); ++ sb->s_dirt = 1; ++ ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); ++ unlock_super(sb); ++} ++ ++/* ++ * ext3_xattr_set() ++ * ++ * Create, replace or remove an extended attribute for this inode. Buffer ++ * is NULL to remove an existing extended attribute, and non-NULL to ++ * either replace an existing extended attribute, or create a new extended ++ * attribute. The flags XATTR_REPLACE and XATTR_CREATE ++ * specify that an extended attribute must exist and must not exist ++ * previous to the call, respectively. ++ * ++ * Returns 0, or a negative error number on failure. ++ */ ++int ++ext3_xattr_set(handle_t *handle, struct inode *inode, int name_index, ++ const char *name, const void *value, size_t value_len, int flags) ++{ ++ struct super_block *sb = inode->i_sb; ++ struct buffer_head *bh = NULL; ++ struct ext3_xattr_header *header = NULL; ++ struct ext3_xattr_entry *here, *last; ++ unsigned int name_len; ++ int block = EXT3_I(inode)->i_file_acl; ++ int min_offs = sb->s_blocksize, not_found = 1, free, error; ++ char *end; ++ ++ /* ++ * header -- Points either into bh, or to a temporarily ++ * allocated buffer. ++ * here -- The named entry found, or the place for inserting, within ++ * the block pointed to by header. ++ * last -- Points right after the last named entry within the block ++ * pointed to by header. ++ * min_offs -- The offset of the first value (values are aligned ++ * towards the end of the block). ++ * end -- Points right after the block pointed to by header. ++ */ ++ ++ ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", ++ name_index, name, value, (long)value_len); ++ ++ if (IS_RDONLY(inode)) ++ return -EROFS; ++ if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) ++ return -EPERM; ++ if (value == NULL) ++ value_len = 0; ++ if (name == NULL) ++ return -EINVAL; ++ name_len = strlen(name); ++ if (name_len > 255 || value_len > sb->s_blocksize) ++ return -ERANGE; ++ down(&ext3_xattr_sem); ++ ++ if (block) { ++ /* The inode already has an extended attribute block. */ ++ bh = sb_bread(sb, block); ++ error = -EIO; ++ if (!bh) ++ goto cleanup; ++ ea_bdebug(bh, "b_count=%d, refcount=%d", ++ atomic_read(&(bh->b_count)), ++ le32_to_cpu(HDR(bh)->h_refcount)); ++ header = HDR(bh); ++ end = bh->b_data + bh->b_size; ++ if (header->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || ++ header->h_blocks != cpu_to_le32(1)) { ++bad_block: ext3_error(sb, "ext3_xattr_set", ++ "inode %ld: bad block %d", inode->i_ino, block); ++ error = -EIO; ++ goto cleanup; ++ } ++ /* Find the named attribute. */ ++ here = FIRST_ENTRY(bh); ++ while (!IS_LAST_ENTRY(here)) { ++ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(here); ++ if ((char *)next >= end) ++ goto bad_block; ++ if (!here->e_value_block && here->e_value_size) { ++ int offs = le16_to_cpu(here->e_value_offs); ++ if (offs < min_offs) ++ min_offs = offs; ++ } ++ not_found = name_index - here->e_name_index; ++ if (!not_found) ++ not_found = name_len - here->e_name_len; ++ if (!not_found) ++ not_found = memcmp(name, here->e_name,name_len); ++ if (not_found <= 0) ++ break; ++ here = next; ++ } ++ last = here; ++ /* We still need to compute min_offs and last. */ ++ while (!IS_LAST_ENTRY(last)) { ++ struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last); ++ if ((char *)next >= end) ++ goto bad_block; ++ if (!last->e_value_block && last->e_value_size) { ++ int offs = le16_to_cpu(last->e_value_offs); ++ if (offs < min_offs) ++ min_offs = offs; ++ } ++ last = next; ++ } ++ ++ /* Check whether we have enough space left. */ ++ free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); ++ } else { ++ /* We will use a new extended attribute block. */ ++ free = sb->s_blocksize - ++ sizeof(struct ext3_xattr_header) - sizeof(__u32); ++ here = last = NULL; /* avoid gcc uninitialized warning. */ ++ } ++ ++ if (not_found) { ++ /* Request to remove a nonexistent attribute? */ ++ error = -ENOATTR; ++ if (flags & XATTR_REPLACE) ++ goto cleanup; ++ error = 0; ++ if (value == NULL) ++ goto cleanup; ++ else ++ free -= EXT3_XATTR_LEN(name_len); ++ } else { ++ /* Request to create an existing attribute? */ ++ error = -EEXIST; ++ if (flags & XATTR_CREATE) ++ goto cleanup; ++ if (!here->e_value_block && here->e_value_size) { ++ unsigned int size = le32_to_cpu(here->e_value_size); ++ ++ if (le16_to_cpu(here->e_value_offs) + size > ++ sb->s_blocksize || size > sb->s_blocksize) ++ goto bad_block; ++ free += EXT3_XATTR_SIZE(size); ++ } ++ } ++ free -= EXT3_XATTR_SIZE(value_len); ++ error = -ENOSPC; ++ if (free < 0) ++ goto cleanup; ++ ++ /* Here we know that we can set the new attribute. */ ++ ++ if (header) { ++ if (header->h_refcount == cpu_to_le32(1)) { ++ ea_bdebug(bh, "modifying in-place"); ++ ext3_xattr_cache_remove(bh); ++ error = ext3_journal_get_write_access(handle, bh); ++ if (error) ++ goto cleanup; ++ } else { ++ int offset; ++ ++ ea_bdebug(bh, "cloning"); ++ header = kmalloc(bh->b_size, GFP_KERNEL); ++ error = -ENOMEM; ++ if (header == NULL) ++ goto cleanup; ++ memcpy(header, HDR(bh), bh->b_size); ++ header->h_refcount = cpu_to_le32(1); ++ offset = (char *)header - bh->b_data; ++ here = ENTRY((char *)here + offset); ++ last = ENTRY((char *)last + offset); ++ } ++ } else { ++ /* Allocate a buffer where we construct the new block. */ ++ header = kmalloc(sb->s_blocksize, GFP_KERNEL); ++ error = -ENOMEM; ++ if (header == NULL) ++ goto cleanup; ++ memset(header, 0, sb->s_blocksize); ++ end = (char *)header + sb->s_blocksize; ++ header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC); ++ header->h_blocks = header->h_refcount = cpu_to_le32(1); ++ last = here = ENTRY(header+1); ++ } ++ ++ if (not_found) { ++ /* Insert the new name. */ ++ int size = EXT3_XATTR_LEN(name_len); ++ int rest = (char *)last - (char *)here; ++ memmove((char *)here + size, here, rest); ++ memset(here, 0, size); ++ here->e_name_index = name_index; ++ here->e_name_len = name_len; ++ memcpy(here->e_name, name, name_len); ++ } else { ++ /* Remove the old value. */ ++ if (!here->e_value_block && here->e_value_size) { ++ char *first_val = (char *)header + min_offs; ++ int offs = le16_to_cpu(here->e_value_offs); ++ char *val = (char *)header + offs; ++ size_t size = EXT3_XATTR_SIZE( ++ le32_to_cpu(here->e_value_size)); ++ memmove(first_val + size, first_val, val - first_val); ++ memset(first_val, 0, size); ++ here->e_value_offs = 0; ++ min_offs += size; ++ ++ /* Adjust all value offsets. */ ++ last = ENTRY(header+1); ++ while (!IS_LAST_ENTRY(last)) { ++ int o = le16_to_cpu(last->e_value_offs); ++ if (!last->e_value_block && o < offs) ++ last->e_value_offs = ++ cpu_to_le16(o + size); ++ last = EXT3_XATTR_NEXT(last); ++ } ++ } ++ if (value == NULL) { ++ /* Remove this attribute. */ ++ if (EXT3_XATTR_NEXT(ENTRY(header+1)) == last) { ++ /* This block is now empty. */ ++ error = ext3_xattr_set2(handle, inode, bh,NULL); ++ goto cleanup; ++ } else { ++ /* Remove the old name. */ ++ int size = EXT3_XATTR_LEN(name_len); ++ last = ENTRY((char *)last - size); ++ memmove(here, (char*)here + size, ++ (char*)last - (char*)here); ++ memset(last, 0, size); ++ } ++ } ++ } ++ ++ if (value != NULL) { ++ /* Insert the new value. */ ++ here->e_value_size = cpu_to_le32(value_len); ++ if (value_len) { ++ size_t size = EXT3_XATTR_SIZE(value_len); ++ char *val = (char *)header + min_offs - size; ++ here->e_value_offs = ++ cpu_to_le16((char *)val - (char *)header); ++ memset(val + size - EXT3_XATTR_PAD, 0, ++ EXT3_XATTR_PAD); /* Clear the pad bytes. */ ++ memcpy(val, value, value_len); ++ } ++ } ++ ext3_xattr_rehash(header, here); ++ ++ error = ext3_xattr_set2(handle, inode, bh, header); ++ ++cleanup: ++ brelse(bh); ++ if (!(bh && header == HDR(bh))) ++ kfree(header); ++ up(&ext3_xattr_sem); ++ ++ return error; ++} ++ ++/* ++ * Second half of ext3_xattr_set(): Update the file system. ++ */ ++static int ++ext3_xattr_set2(handle_t *handle, struct inode *inode, ++ struct buffer_head *old_bh, struct ext3_xattr_header *header) ++{ ++ struct super_block *sb = inode->i_sb; ++ struct buffer_head *new_bh = NULL; ++ int error; ++ ++ if (header) { ++ new_bh = ext3_xattr_cache_find(inode, header); ++ if (new_bh) { ++ /* ++ * We found an identical block in the cache. ++ * The old block will be released after updating ++ * the inode. ++ */ ++ ea_bdebug(old_bh, "reusing block %ld", ++ new_bh->b_blocknr); ++ ++ error = -EDQUOT; ++ if (ext3_xattr_quota_alloc(inode, 1)) ++ goto cleanup; ++ ++ error = ext3_journal_get_write_access(handle, new_bh); ++ if (error) ++ goto cleanup; ++ HDR(new_bh)->h_refcount = cpu_to_le32( ++ le32_to_cpu(HDR(new_bh)->h_refcount) + 1); ++ ea_bdebug(new_bh, "refcount now=%d", ++ le32_to_cpu(HDR(new_bh)->h_refcount)); ++ } else if (old_bh && header == HDR(old_bh)) { ++ /* Keep this block. */ ++ new_bh = old_bh; ++ ext3_xattr_cache_insert(new_bh); ++ } else { ++ /* We need to allocate a new block */ ++ int force = EXT3_I(inode)->i_file_acl != 0; ++ int block = ext3_xattr_new_block(handle, inode, ++ &error, force); ++ if (error) ++ goto cleanup; ++ ea_idebug(inode, "creating block %d", block); ++ ++ new_bh = sb_getblk(sb, block); ++ if (!new_bh) { ++getblk_failed: ext3_xattr_free_block(handle, inode, block); ++ error = -EIO; ++ goto cleanup; ++ } ++ lock_buffer(new_bh); ++ error = ext3_journal_get_create_access(handle, new_bh); ++ if (error) { ++ unlock_buffer(new_bh); ++ goto getblk_failed; ++ } ++ memcpy(new_bh->b_data, header, new_bh->b_size); ++ mark_buffer_uptodate(new_bh, 1); ++ unlock_buffer(new_bh); ++ ext3_xattr_cache_insert(new_bh); ++ ++ ext3_xattr_update_super_block(handle, sb); ++ } ++ error = ext3_journal_dirty_metadata(handle, new_bh); ++ if (error) ++ goto cleanup; ++ } ++ ++ /* Update the inode. */ ++ EXT3_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; ++ inode->i_ctime = CURRENT_TIME; ++ ext3_mark_inode_dirty(handle, inode); ++ if (IS_SYNC(inode)) ++ handle->h_sync = 1; ++ ++ error = 0; ++ if (old_bh && old_bh != new_bh) { ++ /* ++ * If there was an old block, and we are not still using it, ++ * we now release the old block. ++ */ ++ unsigned int refcount = le32_to_cpu(HDR(old_bh)->h_refcount); ++ ++ error = ext3_journal_get_write_access(handle, old_bh); ++ if (error) ++ goto cleanup; ++ if (refcount == 1) { ++ /* Free the old block. */ ++ ea_bdebug(old_bh, "freeing"); ++ ext3_xattr_free_block(handle, inode, old_bh->b_blocknr); ++ ++ /* ext3_forget() calls bforget() for us, but we ++ let our caller release old_bh, so we need to ++ duplicate the handle before. */ ++ get_bh(old_bh); ++ ext3_forget(handle, 1, inode, old_bh,old_bh->b_blocknr); ++ } else { ++ /* Decrement the refcount only. */ ++ refcount--; ++ HDR(old_bh)->h_refcount = cpu_to_le32(refcount); ++ ext3_xattr_quota_free(inode); ++ ext3_journal_dirty_metadata(handle, old_bh); ++ ea_bdebug(old_bh, "refcount now=%d", refcount); ++ } ++ } ++ ++cleanup: ++ if (old_bh != new_bh) ++ brelse(new_bh); ++ ++ return error; ++} ++ ++/* ++ * ext3_xattr_delete_inode() ++ * ++ * Free extended attribute resources associated with this inode. This ++ * is called immediately before an inode is freed. ++ */ ++void ++ext3_xattr_delete_inode(handle_t *handle, struct inode *inode) ++{ ++ struct buffer_head *bh; ++ unsigned int block = EXT3_I(inode)->i_file_acl; ++ ++ if (!block) ++ return; ++ down(&ext3_xattr_sem); ++ ++ bh = sb_bread(inode->i_sb, block); ++ if (!bh) { ++ ext3_error(inode->i_sb, "ext3_xattr_delete_inode", ++ "inode %ld: block %d read error", inode->i_ino, block); ++ goto cleanup; ++ } ++ ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); ++ if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || ++ HDR(bh)->h_blocks != cpu_to_le32(1)) { ++ ext3_error(inode->i_sb, "ext3_xattr_delete_inode", ++ "inode %ld: bad block %d", inode->i_ino, block); ++ goto cleanup; ++ } ++ ext3_journal_get_write_access(handle, bh); ++ ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1); ++ if (HDR(bh)->h_refcount == cpu_to_le32(1)) { ++ ext3_xattr_cache_remove(bh); ++ ext3_xattr_free_block(handle, inode, block); ++ ext3_forget(handle, 1, inode, bh, block); ++ bh = NULL; ++ } else { ++ HDR(bh)->h_refcount = cpu_to_le32( ++ le32_to_cpu(HDR(bh)->h_refcount) - 1); ++ ext3_journal_dirty_metadata(handle, bh); ++ if (IS_SYNC(inode)) ++ handle->h_sync = 1; ++ ext3_xattr_quota_free(inode); ++ } ++ EXT3_I(inode)->i_file_acl = 0; ++ ++cleanup: ++ brelse(bh); ++ up(&ext3_xattr_sem); ++} ++ ++/* ++ * ext3_xattr_put_super() ++ * ++ * This is called when a file system is unmounted. ++ */ ++void ++ext3_xattr_put_super(struct super_block *sb) ++{ ++#ifdef CONFIG_EXT3_FS_XATTR_SHARING ++ mb_cache_shrink(ext3_xattr_cache, sb->s_dev); ++#endif ++} ++ ++#ifdef CONFIG_EXT3_FS_XATTR_SHARING ++ ++/* ++ * ext3_xattr_cache_insert() ++ * ++ * Create a new entry in the extended attribute cache, and insert ++ * it unless such an entry is already in the cache. ++ * ++ * Returns 0, or a negative error number on failure. ++ */ ++static int ++ext3_xattr_cache_insert(struct buffer_head *bh) ++{ ++ __u32 hash = le32_to_cpu(HDR(bh)->h_hash); ++ struct mb_cache_entry *ce; ++ int error; ++ ++ ce = mb_cache_entry_alloc(ext3_xattr_cache); ++ if (!ce) ++ return -ENOMEM; ++ error = mb_cache_entry_insert(ce, bh->b_dev, bh->b_blocknr, &hash); ++ if (error) { ++ mb_cache_entry_free(ce); ++ if (error == -EBUSY) { ++ ea_bdebug(bh, "already in cache (%d cache entries)", ++ atomic_read(&ext3_xattr_cache->c_entry_count)); ++ error = 0; ++ } ++ } else { ++ ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash, ++ atomic_read(&ext3_xattr_cache->c_entry_count)); ++ mb_cache_entry_release(ce); ++ } ++ return error; ++} ++ ++/* ++ * ext3_xattr_cmp() ++ * ++ * Compare two extended attribute blocks for equality. ++ * ++ * Returns 0 if the blocks are equal, 1 if they differ, and ++ * a negative error number on errors. ++ */ ++static int ++ext3_xattr_cmp(struct ext3_xattr_header *header1, ++ struct ext3_xattr_header *header2) ++{ ++ struct ext3_xattr_entry *entry1, *entry2; ++ ++ entry1 = ENTRY(header1+1); ++ entry2 = ENTRY(header2+1); ++ while (!IS_LAST_ENTRY(entry1)) { ++ if (IS_LAST_ENTRY(entry2)) ++ return 1; ++ if (entry1->e_hash != entry2->e_hash || ++ entry1->e_name_len != entry2->e_name_len || ++ entry1->e_value_size != entry2->e_value_size || ++ memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) ++ return 1; ++ if (entry1->e_value_block != 0 || entry2->e_value_block != 0) ++ return -EIO; ++ if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), ++ (char *)header2 + le16_to_cpu(entry2->e_value_offs), ++ le32_to_cpu(entry1->e_value_size))) ++ return 1; ++ ++ entry1 = EXT3_XATTR_NEXT(entry1); ++ entry2 = EXT3_XATTR_NEXT(entry2); ++ } ++ if (!IS_LAST_ENTRY(entry2)) ++ return 1; ++ return 0; ++} ++ ++/* ++ * ext3_xattr_cache_find() ++ * ++ * Find an identical extended attribute block. ++ * ++ * Returns a pointer to the block found, or NULL if such a block was ++ * not found or an error occurred. ++ */ ++static struct buffer_head * ++ext3_xattr_cache_find(struct inode *inode, struct ext3_xattr_header *header) ++{ ++ __u32 hash = le32_to_cpu(header->h_hash); ++ struct mb_cache_entry *ce; ++ ++ if (!header->h_hash) ++ return NULL; /* never share */ ++ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); ++ ce = mb_cache_entry_find_first(ext3_xattr_cache, 0, inode->i_dev, hash); ++ while (ce) { ++ struct buffer_head *bh = sb_bread(inode->i_sb, ce->e_block); ++ ++ if (!bh) { ++ ext3_error(inode->i_sb, "ext3_xattr_cache_find", ++ "inode %ld: block %ld read error", ++ inode->i_ino, ce->e_block); ++ } else if (le32_to_cpu(HDR(bh)->h_refcount) > ++ EXT3_XATTR_REFCOUNT_MAX) { ++ ea_idebug(inode, "block %ld refcount %d>%d",ce->e_block, ++ le32_to_cpu(HDR(bh)->h_refcount), ++ EXT3_XATTR_REFCOUNT_MAX); ++ } else if (!ext3_xattr_cmp(header, HDR(bh))) { ++ ea_bdebug(bh, "b_count=%d",atomic_read(&(bh->b_count))); ++ mb_cache_entry_release(ce); ++ return bh; ++ } ++ brelse(bh); ++ ce = mb_cache_entry_find_next(ce, 0, inode->i_dev, hash); ++ } ++ return NULL; ++} ++ ++/* ++ * ext3_xattr_cache_remove() ++ * ++ * Remove the cache entry of a block from the cache. Called when a ++ * block becomes invalid. ++ */ ++static void ++ext3_xattr_cache_remove(struct buffer_head *bh) ++{ ++ struct mb_cache_entry *ce; ++ ++ ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_dev, bh->b_blocknr); ++ if (ce) { ++ ea_bdebug(bh, "removing (%d cache entries remaining)", ++ atomic_read(&ext3_xattr_cache->c_entry_count)-1); ++ mb_cache_entry_free(ce); ++ } else ++ ea_bdebug(bh, "no cache entry"); ++} ++ ++#define NAME_HASH_SHIFT 5 ++#define VALUE_HASH_SHIFT 16 ++ ++/* ++ * ext3_xattr_hash_entry() ++ * ++ * Compute the hash of an extended attribute. ++ */ ++static inline void ext3_xattr_hash_entry(struct ext3_xattr_header *header, ++ struct ext3_xattr_entry *entry) ++{ ++ __u32 hash = 0; ++ char *name = entry->e_name; ++ int n; ++ ++ for (n=0; n < entry->e_name_len; n++) { ++ hash = (hash << NAME_HASH_SHIFT) ^ ++ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ ++ *name++; ++ } ++ ++ if (entry->e_value_block == 0 && entry->e_value_size != 0) { ++ __u32 *value = (__u32 *)((char *)header + ++ le16_to_cpu(entry->e_value_offs)); ++ for (n = (le32_to_cpu(entry->e_value_size) + ++ EXT3_XATTR_ROUND) >> EXT3_XATTR_PAD_BITS; n; n--) { ++ hash = (hash << VALUE_HASH_SHIFT) ^ ++ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ ++ le32_to_cpu(*value++); ++ } ++ } ++ entry->e_hash = cpu_to_le32(hash); ++} ++ ++#undef NAME_HASH_SHIFT ++#undef VALUE_HASH_SHIFT ++ ++#define BLOCK_HASH_SHIFT 16 ++ ++/* ++ * ext3_xattr_rehash() ++ * ++ * Re-compute the extended attribute hash value after an entry has changed. ++ */ ++static void ext3_xattr_rehash(struct ext3_xattr_header *header, ++ struct ext3_xattr_entry *entry) ++{ ++ struct ext3_xattr_entry *here; ++ __u32 hash = 0; ++ ++ ext3_xattr_hash_entry(header, entry); ++ here = ENTRY(header+1); ++ while (!IS_LAST_ENTRY(here)) { ++ if (!here->e_hash) { ++ /* Block is not shared if an entry's hash value == 0 */ ++ hash = 0; ++ break; ++ } ++ hash = (hash << BLOCK_HASH_SHIFT) ^ ++ (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ ++ le32_to_cpu(here->e_hash); ++ here = EXT3_XATTR_NEXT(here); ++ } ++ header->h_hash = cpu_to_le32(hash); ++} ++ ++#undef BLOCK_HASH_SHIFT ++ ++int __init ++init_ext3_xattr(void) ++{ ++ ext3_xattr_cache = mb_cache_create("ext3_xattr", NULL, ++ sizeof(struct mb_cache_entry) + ++ sizeof(struct mb_cache_entry_index), 1, 61); ++ if (!ext3_xattr_cache) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++void ++exit_ext3_xattr(void) ++{ ++ if (ext3_xattr_cache) ++ mb_cache_destroy(ext3_xattr_cache); ++ ext3_xattr_cache = NULL; ++} ++ ++#else /* CONFIG_EXT3_FS_XATTR_SHARING */ ++ ++int __init ++init_ext3_xattr(void) ++{ ++ return 0; ++} ++ ++void ++exit_ext3_xattr(void) ++{ ++} ++ ++#endif /* CONFIG_EXT3_FS_XATTR_SHARING */ +Index: linux-2.4.22-vanilla/fs/ext3/xattr_user.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext3/xattr_user.c 2003-11-03 23:41:29.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext3/xattr_user.c 2003-11-03 23:41:29.000000000 +0300 +@@ -0,0 +1,111 @@ ++/* ++ * linux/fs/ext3/xattr_user.c ++ * Handler for extended user attributes. ++ * ++ * Copyright (C) 2001 by Andreas Gruenbacher, ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_EXT3_FS_POSIX_ACL ++# include ++#endif ++ ++#define XATTR_USER_PREFIX "user." ++ ++static size_t ++ext3_xattr_user_list(char *list, struct inode *inode, ++ const char *name, int name_len) ++{ ++ const int prefix_len = sizeof(XATTR_USER_PREFIX)-1; ++ ++ if (!test_opt(inode->i_sb, XATTR_USER)) ++ return 0; ++ ++ if (list) { ++ memcpy(list, XATTR_USER_PREFIX, prefix_len); ++ memcpy(list+prefix_len, name, name_len); ++ list[prefix_len + name_len] = '\0'; ++ } ++ return prefix_len + name_len + 1; ++} ++ ++static int ++ext3_xattr_user_get(struct inode *inode, const char *name, ++ void *buffer, size_t size) ++{ ++ int error; ++ ++ if (strcmp(name, "") == 0) ++ return -EINVAL; ++ if (!test_opt(inode->i_sb, XATTR_USER)) ++ return -ENOTSUP; ++#ifdef CONFIG_EXT3_FS_POSIX_ACL ++ error = ext3_permission_locked(inode, MAY_READ); ++#else ++ error = permission(inode, MAY_READ); ++#endif ++ if (error) ++ return error; ++ ++ return ext3_xattr_get(inode, EXT3_XATTR_INDEX_USER, name, ++ buffer, size); ++} ++ ++static int ++ext3_xattr_user_set(struct inode *inode, const char *name, ++ const void *value, size_t size, int flags) ++{ ++ handle_t *handle; ++ int error; ++ ++ if (strcmp(name, "") == 0) ++ return -EINVAL; ++ if (!test_opt(inode->i_sb, XATTR_USER)) ++ return -ENOTSUP; ++ if ( !S_ISREG(inode->i_mode) && ++ (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX)) ++ return -EPERM; ++#ifdef CONFIG_EXT3_FS_POSIX_ACL ++ error = ext3_permission_locked(inode, MAY_WRITE); ++#else ++ error = permission(inode, MAY_WRITE); ++#endif ++ if (error) ++ return error; ++ ++ handle = ext3_journal_start(inode, EXT3_XATTR_TRANS_BLOCKS); ++ if (IS_ERR(handle)) ++ return PTR_ERR(handle); ++ error = ext3_xattr_set(handle, inode, EXT3_XATTR_INDEX_USER, name, ++ value, size, flags); ++ ext3_journal_stop(handle, inode); ++ ++ return error; ++} ++ ++struct ext3_xattr_handler ext3_xattr_user_handler = { ++ prefix: XATTR_USER_PREFIX, ++ list: ext3_xattr_user_list, ++ get: ext3_xattr_user_get, ++ set: ext3_xattr_user_set, ++}; ++ ++int __init ++init_ext3_xattr_user(void) ++{ ++ return ext3_xattr_register(EXT3_XATTR_INDEX_USER, ++ &ext3_xattr_user_handler); ++} ++ ++void ++exit_ext3_xattr_user(void) ++{ ++ ext3_xattr_unregister(EXT3_XATTR_INDEX_USER, ++ &ext3_xattr_user_handler); ++} +Index: linux-2.4.22-vanilla/fs/jfs/jfs_xattr.h +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/jfs/jfs_xattr.h 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/jfs/jfs_xattr.h 2003-11-03 23:41:29.000000000 +0300 +@@ -52,8 +52,10 @@ + #define END_EALIST(ealist) \ + ((struct jfs_ea *) (((char *) (ealist)) + EALIST_SIZE(ealist))) + +-extern int __jfs_setxattr(struct inode *, const char *, void *, size_t, int); +-extern int jfs_setxattr(struct dentry *, const char *, void *, size_t, int); ++extern int __jfs_setxattr(struct inode *, const char *, const void *, size_t, ++ int); ++extern int jfs_setxattr(struct dentry *, const char *, const void *, size_t, ++ int); + extern ssize_t __jfs_getxattr(struct inode *, const char *, void *, size_t); + extern ssize_t jfs_getxattr(struct dentry *, const char *, void *, size_t); + extern ssize_t jfs_listxattr(struct dentry *, char *, size_t); +Index: linux-2.4.22-vanilla/fs/jfs/xattr.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/jfs/xattr.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/jfs/xattr.c 2003-11-03 23:41:29.000000000 +0300 +@@ -641,7 +641,7 @@ + } + + static int can_set_xattr(struct inode *inode, const char *name, +- void *value, size_t value_len) ++ const void *value, size_t value_len) + { + if (IS_RDONLY(inode)) + return -EROFS; +@@ -660,7 +660,7 @@ + return permission(inode, MAY_WRITE); + } + +-int __jfs_setxattr(struct inode *inode, const char *name, void *value, ++int __jfs_setxattr(struct inode *inode, const char *name, const void *value, + size_t value_len, int flags) + { + struct jfs_ea_list *ealist; +@@ -799,7 +799,7 @@ + return rc; + } + +-int jfs_setxattr(struct dentry *dentry, const char *name, void *value, ++int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, + size_t value_len, int flags) + { + if (value == NULL) { /* empty EA, do not remove */ +Index: linux-2.4.22-vanilla/fs/mbcache.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/mbcache.c 2003-11-03 23:41:29.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/mbcache.c 2003-11-03 23:41:29.000000000 +0300 +@@ -0,0 +1,648 @@ ++/* ++ * linux/fs/mbcache.c ++ * (C) 2001-2002 Andreas Gruenbacher, ++ */ ++ ++/* ++ * Filesystem Meta Information Block Cache (mbcache) ++ * ++ * The mbcache caches blocks of block devices that need to be located ++ * by their device/block number, as well as by other criteria (such ++ * as the block's contents). ++ * ++ * There can only be one cache entry in a cache per device and block number. ++ * Additional indexes need not be unique in this sense. The number of ++ * additional indexes (=other criteria) can be hardwired at compile time ++ * or specified at cache create time. ++ * ++ * Each cache entry is of fixed size. An entry may be `valid' or `invalid' ++ * in the cache. A valid entry is in the main hash tables of the cache, ++ * and may also be in the lru list. An invalid entry is not in any hashes ++ * or lists. ++ * ++ * A valid cache entry is only in the lru list if no handles refer to it. ++ * Invalid cache entries will be freed when the last handle to the cache ++ * entry is released. Entries that cannot be freed immediately are put ++ * back on the lru list. ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++#ifdef MB_CACHE_DEBUG ++# define mb_debug(f...) do { \ ++ printk(KERN_DEBUG f); \ ++ printk("\n"); \ ++ } while (0) ++#define mb_assert(c) do { if (!(c)) \ ++ printk(KERN_ERR "assertion " #c " failed\n"); \ ++ } while(0) ++#else ++# define mb_debug(f...) do { } while(0) ++# define mb_assert(c) do { } while(0) ++#endif ++#define mb_error(f...) do { \ ++ printk(KERN_ERR f); \ ++ printk("\n"); \ ++ } while(0) ++ ++MODULE_AUTHOR("Andreas Gruenbacher "); ++MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) ++MODULE_LICENSE("GPL"); ++#endif ++ ++EXPORT_SYMBOL(mb_cache_create); ++EXPORT_SYMBOL(mb_cache_shrink); ++EXPORT_SYMBOL(mb_cache_destroy); ++EXPORT_SYMBOL(mb_cache_entry_alloc); ++EXPORT_SYMBOL(mb_cache_entry_insert); ++EXPORT_SYMBOL(mb_cache_entry_release); ++EXPORT_SYMBOL(mb_cache_entry_takeout); ++EXPORT_SYMBOL(mb_cache_entry_free); ++EXPORT_SYMBOL(mb_cache_entry_dup); ++EXPORT_SYMBOL(mb_cache_entry_get); ++#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) ++EXPORT_SYMBOL(mb_cache_entry_find_first); ++EXPORT_SYMBOL(mb_cache_entry_find_next); ++#endif ++ ++ ++/* ++ * Global data: list of all mbcache's, lru list, and a spinlock for ++ * accessing cache data structures on SMP machines. The lru list is ++ * global across all mbcaches. ++ */ ++ ++static LIST_HEAD(mb_cache_list); ++static LIST_HEAD(mb_cache_lru_list); ++static spinlock_t mb_cache_spinlock = SPIN_LOCK_UNLOCKED; ++ ++static inline int ++mb_cache_indexes(struct mb_cache *cache) ++{ ++#ifdef MB_CACHE_INDEXES_COUNT ++ return MB_CACHE_INDEXES_COUNT; ++#else ++ return cache->c_indexes_count; ++#endif ++} ++ ++/* ++ * What the mbcache registers as to get shrunk dynamically. ++ */ ++ ++static void ++mb_cache_memory_pressure(int priority, unsigned int gfp_mask); ++ ++static struct cache_definition mb_cache_definition = { ++ "mb_cache", ++ mb_cache_memory_pressure ++}; ++ ++ ++static inline int ++__mb_cache_entry_is_hashed(struct mb_cache_entry *ce) ++{ ++ return !list_empty(&ce->e_block_list); ++} ++ ++ ++static inline void ++__mb_cache_entry_unhash(struct mb_cache_entry *ce) ++{ ++ int n; ++ ++ if (__mb_cache_entry_is_hashed(ce)) { ++ list_del_init(&ce->e_block_list); ++ for (n=0; ne_cache); n++) ++ list_del(&ce->e_indexes[n].o_list); ++ } ++} ++ ++ ++static inline void ++__mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask) ++{ ++ struct mb_cache *cache = ce->e_cache; ++ ++ mb_assert(atomic_read(&ce->e_used) == 0); ++ if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) { ++ /* free failed -- put back on the lru list ++ for freeing later. */ ++ spin_lock(&mb_cache_spinlock); ++ list_add(&ce->e_lru_list, &mb_cache_lru_list); ++ spin_unlock(&mb_cache_spinlock); ++ } else { ++ kmem_cache_free(cache->c_entry_cache, ce); ++ atomic_dec(&cache->c_entry_count); ++ } ++} ++ ++ ++static inline void ++__mb_cache_entry_release_unlock(struct mb_cache_entry *ce) ++{ ++ if (atomic_dec_and_test(&ce->e_used)) { ++ if (__mb_cache_entry_is_hashed(ce)) ++ list_add_tail(&ce->e_lru_list, &mb_cache_lru_list); ++ else { ++ spin_unlock(&mb_cache_spinlock); ++ __mb_cache_entry_forget(ce, GFP_KERNEL); ++ return; ++ } ++ } ++ spin_unlock(&mb_cache_spinlock); ++} ++ ++ ++/* ++ * mb_cache_memory_pressure() memory pressure callback ++ * ++ * This function is called by the kernel memory management when memory ++ * gets low. ++ * ++ * @priority: Amount by which to shrink the cache (0 = highes priority) ++ * @gfp_mask: (ignored) ++ */ ++static void ++mb_cache_memory_pressure(int priority, unsigned int gfp_mask) ++{ ++ LIST_HEAD(free_list); ++ struct list_head *l, *ltmp; ++ int count = 0; ++ ++ spin_lock(&mb_cache_spinlock); ++ list_for_each(l, &mb_cache_list) { ++ struct mb_cache *cache = ++ list_entry(l, struct mb_cache, c_cache_list); ++ mb_debug("cache %s (%d)", cache->c_name, ++ atomic_read(&cache->c_entry_count)); ++ count += atomic_read(&cache->c_entry_count); ++ } ++ mb_debug("trying to free %d of %d entries", ++ count / (priority ? priority : 1), count); ++ if (priority) ++ count /= priority; ++ while (count-- && !list_empty(&mb_cache_lru_list)) { ++ struct mb_cache_entry *ce = ++ list_entry(mb_cache_lru_list.next, ++ struct mb_cache_entry, e_lru_list); ++ list_del(&ce->e_lru_list); ++ __mb_cache_entry_unhash(ce); ++ list_add_tail(&ce->e_lru_list, &free_list); ++ } ++ spin_unlock(&mb_cache_spinlock); ++ list_for_each_safe(l, ltmp, &free_list) { ++ __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, ++ e_lru_list), gfp_mask); ++ } ++} ++ ++ ++/* ++ * mb_cache_create() create a new cache ++ * ++ * All entries in one cache are equal size. Cache entries may be from ++ * multiple devices. If this is the first mbcache created, registers ++ * the cache with kernel memory management. Returns NULL if no more ++ * memory was available. ++ * ++ * @name: name of the cache (informal) ++ * @cache_op: contains the callback called when freeing a cache entry ++ * @entry_size: The size of a cache entry, including ++ * struct mb_cache_entry ++ * @indexes_count: number of additional indexes in the cache. Must equal ++ * MB_CACHE_INDEXES_COUNT if the number of indexes is ++ * hardwired. ++ * @bucket_count: number of hash buckets ++ */ ++struct mb_cache * ++mb_cache_create(const char *name, struct mb_cache_op *cache_op, ++ size_t entry_size, int indexes_count, int bucket_count) ++{ ++ int m=0, n; ++ struct mb_cache *cache = NULL; ++ ++ if(entry_size < sizeof(struct mb_cache_entry) + ++ indexes_count * sizeof(struct mb_cache_entry_index)) ++ return NULL; ++ ++ MOD_INC_USE_COUNT; ++ cache = kmalloc(sizeof(struct mb_cache) + ++ indexes_count * sizeof(struct list_head), GFP_KERNEL); ++ if (!cache) ++ goto fail; ++ cache->c_name = name; ++ cache->c_op.free = NULL; ++ if (cache_op) ++ cache->c_op.free = cache_op->free; ++ atomic_set(&cache->c_entry_count, 0); ++ cache->c_bucket_count = bucket_count; ++#ifdef MB_CACHE_INDEXES_COUNT ++ mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT); ++#else ++ cache->c_indexes_count = indexes_count; ++#endif ++ cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), ++ GFP_KERNEL); ++ if (!cache->c_block_hash) ++ goto fail; ++ for (n=0; nc_block_hash[n]); ++ for (m=0; mc_indexes_hash[m] = kmalloc(bucket_count * ++ sizeof(struct list_head), ++ GFP_KERNEL); ++ if (!cache->c_indexes_hash[m]) ++ goto fail; ++ for (n=0; nc_indexes_hash[m][n]); ++ } ++ cache->c_entry_cache = kmem_cache_create(name, entry_size, 0, ++ 0 /*SLAB_POISON | SLAB_RED_ZONE*/, NULL, NULL); ++ if (!cache->c_entry_cache) ++ goto fail; ++ ++ spin_lock(&mb_cache_spinlock); ++ list_add(&cache->c_cache_list, &mb_cache_list); ++ spin_unlock(&mb_cache_spinlock); ++ return cache; ++ ++fail: ++ if (cache) { ++ while (--m >= 0) ++ kfree(cache->c_indexes_hash[m]); ++ if (cache->c_block_hash) ++ kfree(cache->c_block_hash); ++ kfree(cache); ++ } ++ MOD_DEC_USE_COUNT; ++ return NULL; ++} ++ ++ ++/* ++ * mb_cache_shrink() ++ * ++ * Removes all cache entires of a device from the cache. All cache entries ++ * currently in use cannot be freed, and thus remain in the cache. ++ * ++ * @cache: which cache to shrink ++ * @dev: which device's cache entries to shrink ++ */ ++void ++mb_cache_shrink(struct mb_cache *cache, kdev_t dev) ++{ ++ LIST_HEAD(free_list); ++ struct list_head *l, *ltmp; ++ ++ spin_lock(&mb_cache_spinlock); ++ list_for_each_safe(l, ltmp, &mb_cache_lru_list) { ++ struct mb_cache_entry *ce = ++ list_entry(l, struct mb_cache_entry, e_lru_list); ++ if (ce->e_dev == dev) { ++ list_del(&ce->e_lru_list); ++ list_add_tail(&ce->e_lru_list, &free_list); ++ __mb_cache_entry_unhash(ce); ++ } ++ } ++ spin_unlock(&mb_cache_spinlock); ++ list_for_each_safe(l, ltmp, &free_list) { ++ __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, ++ e_lru_list), GFP_KERNEL); ++ } ++} ++ ++ ++/* ++ * mb_cache_destroy() ++ * ++ * Shrinks the cache to its minimum possible size (hopefully 0 entries), ++ * and then destroys it. If this was the last mbcache, un-registers the ++ * mbcache from kernel memory management. ++ */ ++void ++mb_cache_destroy(struct mb_cache *cache) ++{ ++ LIST_HEAD(free_list); ++ struct list_head *l, *ltmp; ++ int n; ++ ++ spin_lock(&mb_cache_spinlock); ++ list_for_each_safe(l, ltmp, &mb_cache_lru_list) { ++ struct mb_cache_entry *ce = ++ list_entry(l, struct mb_cache_entry, e_lru_list); ++ if (ce->e_cache == cache) { ++ list_del(&ce->e_lru_list); ++ list_add_tail(&ce->e_lru_list, &free_list); ++ __mb_cache_entry_unhash(ce); ++ } ++ } ++ list_del(&cache->c_cache_list); ++ spin_unlock(&mb_cache_spinlock); ++ list_for_each_safe(l, ltmp, &free_list) { ++ __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, ++ e_lru_list), GFP_KERNEL); ++ } ++ ++ if (atomic_read(&cache->c_entry_count) > 0) { ++ mb_error("cache %s: %d orphaned entries", ++ cache->c_name, ++ atomic_read(&cache->c_entry_count)); ++ } ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)) ++ /* We don't have kmem_cache_destroy() in 2.2.x */ ++ kmem_cache_shrink(cache->c_entry_cache); ++#else ++ kmem_cache_destroy(cache->c_entry_cache); ++#endif ++ for (n=0; n < mb_cache_indexes(cache); n++) ++ kfree(cache->c_indexes_hash[n]); ++ kfree(cache->c_block_hash); ++ kfree(cache); ++ ++ MOD_DEC_USE_COUNT; ++} ++ ++ ++/* ++ * mb_cache_entry_alloc() ++ * ++ * Allocates a new cache entry. The new entry will not be valid initially, ++ * and thus cannot be looked up yet. It should be filled with data, and ++ * then inserted into the cache using mb_cache_entry_insert(). Returns NULL ++ * if no more memory was available. ++ */ ++struct mb_cache_entry * ++mb_cache_entry_alloc(struct mb_cache *cache) ++{ ++ struct mb_cache_entry *ce; ++ ++ atomic_inc(&cache->c_entry_count); ++ ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL); ++ if (ce) { ++ INIT_LIST_HEAD(&ce->e_lru_list); ++ INIT_LIST_HEAD(&ce->e_block_list); ++ ce->e_cache = cache; ++ atomic_set(&ce->e_used, 1); ++ } ++ return ce; ++} ++ ++ ++/* ++ * mb_cache_entry_insert() ++ * ++ * Inserts an entry that was allocated using mb_cache_entry_alloc() into ++ * the cache. After this, the cache entry can be looked up, but is not yet ++ * in the lru list as the caller still holds a handle to it. Returns 0 on ++ * success, or -EBUSY if a cache entry for that device + inode exists ++ * already (this may happen after a failed lookup, if another process has ++ * inserted the same cache entry in the meantime). ++ * ++ * @dev: device the cache entry belongs to ++ * @block: block number ++ * @keys: array of additional keys. There must be indexes_count entries ++ * in the array (as specified when creating the cache). ++ */ ++int ++mb_cache_entry_insert(struct mb_cache_entry *ce, kdev_t dev, ++ unsigned long block, unsigned int keys[]) ++{ ++ struct mb_cache *cache = ce->e_cache; ++ unsigned int bucket = (HASHDEV(dev) + block) % cache->c_bucket_count; ++ struct list_head *l; ++ int error = -EBUSY, n; ++ ++ spin_lock(&mb_cache_spinlock); ++ list_for_each(l, &cache->c_block_hash[bucket]) { ++ struct mb_cache_entry *ce = ++ list_entry(l, struct mb_cache_entry, e_block_list); ++ if (ce->e_dev == dev && ce->e_block == block) ++ goto out; ++ } ++ __mb_cache_entry_unhash(ce); ++ ce->e_dev = dev; ++ ce->e_block = block; ++ list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); ++ for (n=0; ne_indexes[n].o_key = keys[n]; ++ bucket = keys[n] % cache->c_bucket_count; ++ list_add(&ce->e_indexes[n].o_list, ++ &cache->c_indexes_hash[n][bucket]); ++ } ++out: ++ spin_unlock(&mb_cache_spinlock); ++ return error; ++} ++ ++ ++/* ++ * mb_cache_entry_release() ++ * ++ * Release a handle to a cache entry. When the last handle to a cache entry ++ * is released it is either freed (if it is invalid) or otherwise inserted ++ * in to the lru list. ++ */ ++void ++mb_cache_entry_release(struct mb_cache_entry *ce) ++{ ++ spin_lock(&mb_cache_spinlock); ++ __mb_cache_entry_release_unlock(ce); ++} ++ ++ ++/* ++ * mb_cache_entry_takeout() ++ * ++ * Take a cache entry out of the cache, making it invalid. The entry can later ++ * be re-inserted using mb_cache_entry_insert(), or released using ++ * mb_cache_entry_release(). ++ */ ++void ++mb_cache_entry_takeout(struct mb_cache_entry *ce) ++{ ++ spin_lock(&mb_cache_spinlock); ++ mb_assert(list_empty(&ce->e_lru_list)); ++ __mb_cache_entry_unhash(ce); ++ spin_unlock(&mb_cache_spinlock); ++} ++ ++ ++/* ++ * mb_cache_entry_free() ++ * ++ * This is equivalent to the sequence mb_cache_entry_takeout() -- ++ * mb_cache_entry_release(). ++ */ ++void ++mb_cache_entry_free(struct mb_cache_entry *ce) ++{ ++ spin_lock(&mb_cache_spinlock); ++ mb_assert(list_empty(&ce->e_lru_list)); ++ __mb_cache_entry_unhash(ce); ++ __mb_cache_entry_release_unlock(ce); ++} ++ ++ ++/* ++ * mb_cache_entry_dup() ++ * ++ * Duplicate a handle to a cache entry (does not duplicate the cache entry ++ * itself). After the call, both the old and the new handle must be released. ++ */ ++struct mb_cache_entry * ++mb_cache_entry_dup(struct mb_cache_entry *ce) ++{ ++ atomic_inc(&ce->e_used); ++ return ce; ++} ++ ++ ++/* ++ * mb_cache_entry_get() ++ * ++ * Get a cache entry by device / block number. (There can only be one entry ++ * in the cache per device and block.) Returns NULL if no such cache entry ++ * exists. ++ */ ++struct mb_cache_entry * ++mb_cache_entry_get(struct mb_cache *cache, kdev_t dev, unsigned long block) ++{ ++ unsigned int bucket = (HASHDEV(dev) + block) % cache->c_bucket_count; ++ struct list_head *l; ++ struct mb_cache_entry *ce; ++ ++ spin_lock(&mb_cache_spinlock); ++ list_for_each(l, &cache->c_block_hash[bucket]) { ++ ce = list_entry(l, struct mb_cache_entry, e_block_list); ++ if (ce->e_dev == dev && ce->e_block == block) { ++ if (!list_empty(&ce->e_lru_list)) ++ list_del_init(&ce->e_lru_list); ++ atomic_inc(&ce->e_used); ++ goto cleanup; ++ } ++ } ++ ce = NULL; ++ ++cleanup: ++ spin_unlock(&mb_cache_spinlock); ++ return ce; ++} ++ ++#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) ++ ++static struct mb_cache_entry * ++__mb_cache_entry_find(struct list_head *l, struct list_head *head, ++ int index, kdev_t dev, unsigned int key) ++{ ++ while (l != head) { ++ struct mb_cache_entry *ce = ++ list_entry(l, struct mb_cache_entry, ++ e_indexes[index].o_list); ++ if (ce->e_dev == dev && ce->e_indexes[index].o_key == key) { ++ if (!list_empty(&ce->e_lru_list)) ++ list_del_init(&ce->e_lru_list); ++ atomic_inc(&ce->e_used); ++ return ce; ++ } ++ l = l->next; ++ } ++ return NULL; ++} ++ ++ ++/* ++ * mb_cache_entry_find_first() ++ * ++ * Find the first cache entry on a given device with a certain key in ++ * an additional index. Additonal matches can be found with ++ * mb_cache_entry_find_next(). Returns NULL if no match was found. ++ * ++ * @cache: the cache to search ++ * @index: the number of the additonal index to search (0<=indexc_bucket_count; ++ struct list_head *l; ++ struct mb_cache_entry *ce; ++ ++ mb_assert(index < mb_cache_indexes(cache)); ++ spin_lock(&mb_cache_spinlock); ++ l = cache->c_indexes_hash[index][bucket].next; ++ ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], ++ index, dev, key); ++ spin_unlock(&mb_cache_spinlock); ++ return ce; ++} ++ ++ ++/* ++ * mb_cache_entry_find_next() ++ * ++ * Find the next cache entry on a given device with a certain key in an ++ * additional index. Returns NULL if no match could be found. The previous ++ * entry is atomatically released, so that mb_cache_entry_find_next() can ++ * be called like this: ++ * ++ * entry = mb_cache_entry_find_first(); ++ * while (entry) { ++ * ... ++ * entry = mb_cache_entry_find_next(entry, ...); ++ * } ++ * ++ * @prev: The previous match ++ * @index: the number of the additonal index to search (0<=indexe_cache; ++ unsigned int bucket = key % cache->c_bucket_count; ++ struct list_head *l; ++ struct mb_cache_entry *ce; ++ ++ mb_assert(index < mb_cache_indexes(cache)); ++ spin_lock(&mb_cache_spinlock); ++ l = prev->e_indexes[index].o_list.next; ++ ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], ++ index, dev, key); ++ __mb_cache_entry_release_unlock(prev); ++ return ce; ++} ++ ++#endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */ ++ ++static int __init init_mbcache(void) ++{ ++ register_cache(&mb_cache_definition); ++ return 0; ++} ++ ++static void __exit exit_mbcache(void) ++{ ++ unregister_cache(&mb_cache_definition); ++} ++ ++module_init(init_mbcache) ++module_exit(exit_mbcache) ++ +Index: linux-2.4.22-vanilla/include/asm-arm/unistd.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/asm-arm/unistd.h 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/include/asm-arm/unistd.h 2003-11-03 23:41:29.000000000 +0300 +@@ -250,7 +250,6 @@ + #define __NR_security (__NR_SYSCALL_BASE+223) + #define __NR_gettid (__NR_SYSCALL_BASE+224) + #define __NR_readahead (__NR_SYSCALL_BASE+225) +-#if 0 /* allocated in 2.5 */ + #define __NR_setxattr (__NR_SYSCALL_BASE+226) + #define __NR_lsetxattr (__NR_SYSCALL_BASE+227) + #define __NR_fsetxattr (__NR_SYSCALL_BASE+228) +@@ -263,7 +262,6 @@ + #define __NR_removexattr (__NR_SYSCALL_BASE+235) + #define __NR_lremovexattr (__NR_SYSCALL_BASE+236) + #define __NR_fremovexattr (__NR_SYSCALL_BASE+237) +-#endif + #define __NR_tkill (__NR_SYSCALL_BASE+238) + #if 0 /* allocated in 2.5 */ + #define __NR_sendfile64 (__NR_SYSCALL_BASE+239) +Index: linux-2.4.22-vanilla/include/asm-ppc64/unistd.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/asm-ppc64/unistd.h 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/include/asm-ppc64/unistd.h 2003-11-03 23:41:29.000000000 +0300 +@@ -218,6 +218,7 @@ + #define __NR_mincore 206 + #define __NR_gettid 207 + #define __NR_tkill 208 ++#endif + #define __NR_setxattr 209 + #define __NR_lsetxattr 210 + #define __NR_fsetxattr 211 +@@ -230,6 +231,7 @@ + #define __NR_removexattr 218 + #define __NR_lremovexattr 219 + #define __NR_fremovexattr 220 ++#if 0 /* Reserved syscalls */ + #define __NR_futex 221 + #define __NR_sched_setaffinity 222 + #define __NR_sched_getaffinity 223 +Index: linux-2.4.22-vanilla/include/asm-s390/unistd.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/asm-s390/unistd.h 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/include/asm-s390/unistd.h 2003-11-03 23:41:29.000000000 +0300 +@@ -213,9 +213,18 @@ + #define __NR_getdents64 220 + #define __NR_fcntl64 221 + #define __NR_readahead 222 +-/* +- * Numbers 224-235 are reserved for posix acl +- */ ++#define __NR_setxattr 224 ++#define __NR_lsetxattr 225 ++#define __NR_fsetxattr 226 ++#define __NR_getxattr 227 ++#define __NR_lgetxattr 228 ++#define __NR_fgetxattr 229 ++#define __NR_listxattr 230 ++#define __NR_llistxattr 231 ++#define __NR_flistxattr 232 ++#define __NR_removexattr 233 ++#define __NR_lremovexattr 234 ++#define __NR_fremovexattr 235 + #define __NR_gettid 236 + #define __NR_tkill 237 + +Index: linux-2.4.22-vanilla/include/asm-s390x/unistd.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/asm-s390x/unistd.h 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/include/asm-s390x/unistd.h 2003-11-03 23:41:29.000000000 +0300 +@@ -181,9 +181,18 @@ + #define __NR_mincore 218 + #define __NR_madvise 219 + #define __NR_readahead 222 +-/* +- * Numbers 224-235 are reserved for posix acl +- */ ++#define __NR_setxattr 224 ++#define __NR_lsetxattr 225 ++#define __NR_fsetxattr 226 ++#define __NR_getxattr 227 ++#define __NR_lgetxattr 228 ++#define __NR_fgetxattr 229 ++#define __NR_listxattr 230 ++#define __NR_llistxattr 231 ++#define __NR_flistxattr 232 ++#define __NR_removexattr 233 ++#define __NR_lremovexattr 234 ++#define __NR_fremovexattr 235 + #define __NR_gettid 236 + #define __NR_tkill 237 + +Index: linux-2.4.22-vanilla/include/linux/cache_def.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/linux/cache_def.h 2003-11-03 23:41:29.000000000 +0300 ++++ linux-2.4.22-vanilla/include/linux/cache_def.h 2003-11-03 23:41:29.000000000 +0300 +@@ -0,0 +1,15 @@ ++/* ++ * linux/cache_def.h ++ * Handling of caches defined in drivers, filesystems, ... ++ * ++ * Copyright (C) 2002 by Andreas Gruenbacher, ++ */ ++ ++struct cache_definition { ++ const char *name; ++ void (*shrink)(int, unsigned int); ++ struct list_head link; ++}; ++ ++extern void register_cache(struct cache_definition *); ++extern void unregister_cache(struct cache_definition *); +Index: linux-2.4.22-vanilla/include/linux/errno.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/linux/errno.h 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/include/linux/errno.h 2003-11-03 23:41:29.000000000 +0300 +@@ -23,4 +23,8 @@ + + #endif + ++/* Defined for extended attributes */ ++#define ENOATTR ENODATA /* No such attribute */ ++#define ENOTSUP EOPNOTSUPP /* Operation not supported */ ++ + #endif +Index: linux-2.4.22-vanilla/include/linux/ext2_fs.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/linux/ext2_fs.h 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/include/linux/ext2_fs.h 2003-11-03 23:41:29.000000000 +0300 +@@ -57,8 +57,6 @@ + */ + #define EXT2_BAD_INO 1 /* Bad blocks inode */ + #define EXT2_ROOT_INO 2 /* Root inode */ +-#define EXT2_ACL_IDX_INO 3 /* ACL inode */ +-#define EXT2_ACL_DATA_INO 4 /* ACL inode */ + #define EXT2_BOOT_LOADER_INO 5 /* Boot loader inode */ + #define EXT2_UNDEL_DIR_INO 6 /* Undelete directory inode */ + +@@ -86,7 +84,6 @@ + #else + # define EXT2_BLOCK_SIZE(s) (EXT2_MIN_BLOCK_SIZE << (s)->s_log_block_size) + #endif +-#define EXT2_ACLE_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (struct ext2_acl_entry)) + #define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32)) + #ifdef __KERNEL__ + # define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits) +@@ -121,28 +118,6 @@ + #endif + + /* +- * ACL structures +- */ +-struct ext2_acl_header /* Header of Access Control Lists */ +-{ +- __u32 aclh_size; +- __u32 aclh_file_count; +- __u32 aclh_acle_count; +- __u32 aclh_first_acle; +-}; +- +-struct ext2_acl_entry /* Access Control List Entry */ +-{ +- __u32 acle_size; +- __u16 acle_perms; /* Access permissions */ +- __u16 acle_type; /* Type of entry */ +- __u16 acle_tag; /* User or group identity */ +- __u16 acle_pad1; +- __u32 acle_next; /* Pointer on next entry for the */ +- /* same inode or on next free entry */ +-}; +- +-/* + * Structure of a blocks group descriptor + */ + struct ext2_group_desc +@@ -314,6 +289,7 @@ + #define EXT2_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ + #define EXT2_MOUNT_MINIX_DF 0x0080 /* Mimics the Minix statfs */ + #define EXT2_MOUNT_NO_UID32 0x0200 /* Disable 32-bit UIDs */ ++#define EXT2_MOUNT_XATTR_USER 0x4000 /* Extended user attributes */ + + #define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt + #define set_opt(o, opt) o |= EXT2_MOUNT_##opt +@@ -397,6 +373,7 @@ + + #ifdef __KERNEL__ + #define EXT2_SB(sb) (&((sb)->u.ext2_sb)) ++#define EXT2_I(inode) (&((inode)->u.ext2_i)) + #else + /* Assume that user mode programs are passing in an ext2fs superblock, not + * a kernel struct super_block. This will allow us to call the feature-test +@@ -466,7 +443,7 @@ + #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 + #define EXT2_FEATURE_INCOMPAT_ANY 0xffffffff + +-#define EXT2_FEATURE_COMPAT_SUPP 0 ++#define EXT2_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR + #define EXT2_FEATURE_INCOMPAT_SUPP EXT2_FEATURE_INCOMPAT_FILETYPE + #define EXT2_FEATURE_RO_COMPAT_SUPP (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \ + EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \ +@@ -624,8 +601,10 @@ + + /* namei.c */ + extern struct inode_operations ext2_dir_inode_operations; ++extern struct inode_operations ext2_special_inode_operations; + + /* symlink.c */ ++extern struct inode_operations ext2_symlink_inode_operations; + extern struct inode_operations ext2_fast_symlink_inode_operations; + + #endif /* __KERNEL__ */ +Index: linux-2.4.22-vanilla/include/linux/ext2_xattr.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/linux/ext2_xattr.h 2003-11-03 23:41:29.000000000 +0300 ++++ linux-2.4.22-vanilla/include/linux/ext2_xattr.h 2003-11-03 23:41:29.000000000 +0300 +@@ -0,0 +1,157 @@ ++/* ++ File: linux/ext2_xattr.h ++ ++ On-disk format of extended attributes for the ext2 filesystem. ++ ++ (C) 2001 Andreas Gruenbacher, ++*/ ++ ++#include ++#include ++#include ++ ++/* Magic value in attribute blocks */ ++#define EXT2_XATTR_MAGIC 0xEA020000 ++ ++/* Maximum number of references to one attribute block */ ++#define EXT2_XATTR_REFCOUNT_MAX 1024 ++ ++/* Name indexes */ ++#define EXT2_XATTR_INDEX_MAX 10 ++#define EXT2_XATTR_INDEX_USER 1 ++#define EXT2_XATTR_INDEX_POSIX_ACL_ACCESS 2 ++#define EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT 3 ++ ++struct ext2_xattr_header { ++ __u32 h_magic; /* magic number for identification */ ++ __u32 h_refcount; /* reference count */ ++ __u32 h_blocks; /* number of disk blocks used */ ++ __u32 h_hash; /* hash value of all attributes */ ++ __u32 h_reserved[4]; /* zero right now */ ++}; ++ ++struct ext2_xattr_entry { ++ __u8 e_name_len; /* length of name */ ++ __u8 e_name_index; /* attribute name index */ ++ __u16 e_value_offs; /* offset in disk block of value */ ++ __u32 e_value_block; /* disk block attribute is stored on (n/i) */ ++ __u32 e_value_size; /* size of attribute value */ ++ __u32 e_hash; /* hash value of name and value */ ++ char e_name[0]; /* attribute name */ ++}; ++ ++#define EXT2_XATTR_PAD_BITS 2 ++#define EXT2_XATTR_PAD (1<e_name_len)) ) ++#define EXT2_XATTR_SIZE(size) \ ++ (((size) + EXT2_XATTR_ROUND) & ~EXT2_XATTR_ROUND) ++ ++#ifdef __KERNEL__ ++ ++# ifdef CONFIG_EXT2_FS_XATTR ++ ++struct ext2_xattr_handler { ++ char *prefix; ++ size_t (*list)(char *list, struct inode *inode, const char *name, ++ int name_len); ++ int (*get)(struct inode *inode, const char *name, void *buffer, ++ size_t size); ++ int (*set)(struct inode *inode, const char *name, const void *buffer, ++ size_t size, int flags); ++}; ++ ++extern int ext2_xattr_register(int, struct ext2_xattr_handler *); ++extern void ext2_xattr_unregister(int, struct ext2_xattr_handler *); ++ ++extern int ext2_setxattr(struct dentry *, const char *, const void *, size_t, int); ++extern ssize_t ext2_getxattr(struct dentry *, const char *, void *, size_t); ++extern ssize_t ext2_listxattr(struct dentry *, char *, size_t); ++extern int ext2_removexattr(struct dentry *, const char *); ++ ++extern int ext2_xattr_get(struct inode *, int, const char *, void *, size_t); ++extern int ext2_xattr_list(struct inode *, char *, size_t); ++extern int ext2_xattr_set(struct inode *, int, const char *, const void *, size_t, int); ++ ++extern void ext2_xattr_delete_inode(struct inode *); ++extern void ext2_xattr_put_super(struct super_block *); ++ ++extern int init_ext2_xattr(void) __init; ++extern void exit_ext2_xattr(void); ++ ++# else /* CONFIG_EXT2_FS_XATTR */ ++# define ext2_setxattr NULL ++# define ext2_getxattr NULL ++# define ext2_listxattr NULL ++# define ext2_removexattr NULL ++ ++static inline int ++ext2_xattr_get(struct inode *inode, int name_index, ++ const char *name, void *buffer, size_t size) ++{ ++ return -ENOTSUP; ++} ++ ++static inline int ++ext2_xattr_list(struct inode *inode, char *buffer, size_t size) ++{ ++ return -ENOTSUP; ++} ++ ++static inline int ++ext2_xattr_set(struct inode *inode, int name_index, const char *name, ++ const void *value, size_t size, int flags) ++{ ++ return -ENOTSUP; ++} ++ ++static inline void ++ext2_xattr_delete_inode(struct inode *inode) ++{ ++} ++ ++static inline void ++ext2_xattr_put_super(struct super_block *sb) ++{ ++} ++ ++static inline int ++init_ext2_xattr(void) ++{ ++ return 0; ++} ++ ++static inline void ++exit_ext2_xattr(void) ++{ ++} ++ ++# endif /* CONFIG_EXT2_FS_XATTR */ ++ ++# ifdef CONFIG_EXT2_FS_XATTR_USER ++ ++extern int init_ext2_xattr_user(void) __init; ++extern void exit_ext2_xattr_user(void); ++ ++# else /* CONFIG_EXT2_FS_XATTR_USER */ ++ ++static inline int ++init_ext2_xattr_user(void) ++{ ++ return 0; ++} ++ ++static inline void ++exit_ext2_xattr_user(void) ++{ ++} ++ ++# endif /* CONFIG_EXT2_FS_XATTR_USER */ ++ ++#endif /* __KERNEL__ */ ++ +Index: linux-2.4.22-vanilla/include/linux/ext3_fs.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/linux/ext3_fs.h 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/include/linux/ext3_fs.h 2003-11-03 23:41:29.000000000 +0300 +@@ -63,8 +63,6 @@ + */ + #define EXT3_BAD_INO 1 /* Bad blocks inode */ + #define EXT3_ROOT_INO 2 /* Root inode */ +-#define EXT3_ACL_IDX_INO 3 /* ACL inode */ +-#define EXT3_ACL_DATA_INO 4 /* ACL inode */ + #define EXT3_BOOT_LOADER_INO 5 /* Boot loader inode */ + #define EXT3_UNDEL_DIR_INO 6 /* Undelete directory inode */ + #define EXT3_RESIZE_INO 7 /* Reserved group descriptors inode */ +@@ -94,7 +92,6 @@ + #else + # define EXT3_BLOCK_SIZE(s) (EXT3_MIN_BLOCK_SIZE << (s)->s_log_block_size) + #endif +-#define EXT3_ACLE_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_acl_entry)) + #define EXT3_ADDR_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (__u32)) + #ifdef __KERNEL__ + # define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits) +@@ -129,28 +126,6 @@ + #endif + + /* +- * ACL structures +- */ +-struct ext3_acl_header /* Header of Access Control Lists */ +-{ +- __u32 aclh_size; +- __u32 aclh_file_count; +- __u32 aclh_acle_count; +- __u32 aclh_first_acle; +-}; +- +-struct ext3_acl_entry /* Access Control List Entry */ +-{ +- __u32 acle_size; +- __u16 acle_perms; /* Access permissions */ +- __u16 acle_type; /* Type of entry */ +- __u16 acle_tag; /* User or group identity */ +- __u16 acle_pad1; +- __u32 acle_next; /* Pointer on next entry for the */ +- /* same inode or on next free entry */ +-}; +- +-/* + * Structure of a blocks group descriptor + */ + struct ext3_group_desc +@@ -344,6 +319,7 @@ + #define EXT3_MOUNT_WRITEBACK_DATA 0x0C00 /* No data ordering */ + #define EXT3_MOUNT_UPDATE_JOURNAL 0x1000 /* Update the journal format */ + #define EXT3_MOUNT_NO_UID32 0x2000 /* Disable 32-bit UIDs */ ++#define EXT3_MOUNT_XATTR_USER 0x4000 /* Extended user attributes */ + + /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */ + #ifndef _LINUX_EXT2_FS_H +@@ -521,7 +497,7 @@ + #define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */ + #define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */ + +-#define EXT3_FEATURE_COMPAT_SUPP 0 ++#define EXT3_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR + #define EXT3_FEATURE_INCOMPAT_SUPP (EXT3_FEATURE_INCOMPAT_FILETYPE| \ + EXT3_FEATURE_INCOMPAT_RECOVER) + #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \ +@@ -704,6 +680,7 @@ + extern unsigned long ext3_count_free (struct buffer_head *, unsigned); + + /* inode.c */ ++extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); + extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); + extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); + +@@ -773,8 +750,10 @@ + + /* namei.c */ + extern struct inode_operations ext3_dir_inode_operations; ++extern struct inode_operations ext3_special_inode_operations; + + /* symlink.c */ ++extern struct inode_operations ext3_symlink_inode_operations; + extern struct inode_operations ext3_fast_symlink_inode_operations; + + +Index: linux-2.4.22-vanilla/include/linux/ext3_jbd.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/linux/ext3_jbd.h 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/include/linux/ext3_jbd.h 2003-11-03 23:41:29.000000000 +0300 +@@ -30,13 +30,19 @@ + + #define EXT3_SINGLEDATA_TRANS_BLOCKS 8U + ++/* Extended attributes may touch two data buffers, two bitmap buffers, ++ * and two group and summaries. */ ++ ++#define EXT3_XATTR_TRANS_BLOCKS 8 ++ + /* Define the minimum size for a transaction which modifies data. This + * needs to take into account the fact that we may end up modifying two + * quota files too (one for the group, one for the user quota). The + * superblock only gets updated once, of course, so don't bother + * counting that again for the quota updates. */ + +-#define EXT3_DATA_TRANS_BLOCKS (3 * EXT3_SINGLEDATA_TRANS_BLOCKS - 2) ++#define EXT3_DATA_TRANS_BLOCKS (3 * EXT3_SINGLEDATA_TRANS_BLOCKS + \ ++ EXT3_XATTR_TRANS_BLOCKS - 2) + + extern int ext3_writepage_trans_blocks(struct inode *inode); + +Index: linux-2.4.22-vanilla/include/linux/ext3_xattr.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/linux/ext3_xattr.h 2003-11-03 23:41:29.000000000 +0300 ++++ linux-2.4.22-vanilla/include/linux/ext3_xattr.h 2003-11-03 23:41:29.000000000 +0300 +@@ -0,0 +1,157 @@ ++/* ++ File: linux/ext3_xattr.h ++ ++ On-disk format of extended attributes for the ext3 filesystem. ++ ++ (C) 2001 Andreas Gruenbacher, ++*/ ++ ++#include ++#include ++#include ++ ++/* Magic value in attribute blocks */ ++#define EXT3_XATTR_MAGIC 0xEA020000 ++ ++/* Maximum number of references to one attribute block */ ++#define EXT3_XATTR_REFCOUNT_MAX 1024 ++ ++/* Name indexes */ ++#define EXT3_XATTR_INDEX_MAX 10 ++#define EXT3_XATTR_INDEX_USER 1 ++#define EXT3_XATTR_INDEX_POSIX_ACL_ACCESS 2 ++#define EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT 3 ++ ++struct ext3_xattr_header { ++ __u32 h_magic; /* magic number for identification */ ++ __u32 h_refcount; /* reference count */ ++ __u32 h_blocks; /* number of disk blocks used */ ++ __u32 h_hash; /* hash value of all attributes */ ++ __u32 h_reserved[4]; /* zero right now */ ++}; ++ ++struct ext3_xattr_entry { ++ __u8 e_name_len; /* length of name */ ++ __u8 e_name_index; /* attribute name index */ ++ __u16 e_value_offs; /* offset in disk block of value */ ++ __u32 e_value_block; /* disk block attribute is stored on (n/i) */ ++ __u32 e_value_size; /* size of attribute value */ ++ __u32 e_hash; /* hash value of name and value */ ++ char e_name[0]; /* attribute name */ ++}; ++ ++#define EXT3_XATTR_PAD_BITS 2 ++#define EXT3_XATTR_PAD (1<e_name_len)) ) ++#define EXT3_XATTR_SIZE(size) \ ++ (((size) + EXT3_XATTR_ROUND) & ~EXT3_XATTR_ROUND) ++ ++#ifdef __KERNEL__ ++ ++# ifdef CONFIG_EXT3_FS_XATTR ++ ++struct ext3_xattr_handler { ++ char *prefix; ++ size_t (*list)(char *list, struct inode *inode, const char *name, ++ int name_len); ++ int (*get)(struct inode *inode, const char *name, void *buffer, ++ size_t size); ++ int (*set)(struct inode *inode, const char *name, const void *buffer, ++ size_t size, int flags); ++}; ++ ++extern int ext3_xattr_register(int, struct ext3_xattr_handler *); ++extern void ext3_xattr_unregister(int, struct ext3_xattr_handler *); ++ ++extern int ext3_setxattr(struct dentry *, const char *, const void *, size_t, int); ++extern ssize_t ext3_getxattr(struct dentry *, const char *, void *, size_t); ++extern ssize_t ext3_listxattr(struct dentry *, char *, size_t); ++extern int ext3_removexattr(struct dentry *, const char *); ++ ++extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t); ++extern int ext3_xattr_list(struct inode *, char *, size_t); ++extern int ext3_xattr_set(handle_t *handle, struct inode *, int, const char *, const void *, size_t, int); ++ ++extern void ext3_xattr_delete_inode(handle_t *, struct inode *); ++extern void ext3_xattr_put_super(struct super_block *); ++ ++extern int init_ext3_xattr(void) __init; ++extern void exit_ext3_xattr(void); ++ ++# else /* CONFIG_EXT3_FS_XATTR */ ++# define ext3_setxattr NULL ++# define ext3_getxattr NULL ++# define ext3_listxattr NULL ++# define ext3_removexattr NULL ++ ++static inline int ++ext3_xattr_get(struct inode *inode, int name_index, const char *name, ++ void *buffer, size_t size) ++{ ++ return -ENOTSUP; ++} ++ ++static inline int ++ext3_xattr_list(struct inode *inode, void *buffer, size_t size) ++{ ++ return -ENOTSUP; ++} ++ ++static inline int ++ext3_xattr_set(handle_t *handle, struct inode *inode, int name_index, ++ const char *name, const void *value, size_t size, int flags) ++{ ++ return -ENOTSUP; ++} ++ ++static inline void ++ext3_xattr_delete_inode(handle_t *handle, struct inode *inode) ++{ ++} ++ ++static inline void ++ext3_xattr_put_super(struct super_block *sb) ++{ ++} ++ ++static inline int ++init_ext3_xattr(void) ++{ ++ return 0; ++} ++ ++static inline void ++exit_ext3_xattr(void) ++{ ++} ++ ++# endif /* CONFIG_EXT3_FS_XATTR */ ++ ++# ifdef CONFIG_EXT3_FS_XATTR_USER ++ ++extern int init_ext3_xattr_user(void) __init; ++extern void exit_ext3_xattr_user(void); ++ ++# else /* CONFIG_EXT3_FS_XATTR_USER */ ++ ++static inline int ++init_ext3_xattr_user(void) ++{ ++ return 0; ++} ++ ++static inline void ++exit_ext3_xattr_user(void) ++{ ++} ++ ++#endif /* CONFIG_EXT3_FS_XATTR_USER */ ++ ++#endif /* __KERNEL__ */ ++ +Index: linux-2.4.22-vanilla/include/linux/fs.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/linux/fs.h 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/include/linux/fs.h 2003-11-03 23:41:29.000000000 +0300 +@@ -913,7 +913,7 @@ + int (*setattr) (struct dentry *, struct iattr *); + int (*setattr_raw) (struct inode *, struct iattr *); + int (*getattr) (struct dentry *, struct iattr *); +- int (*setxattr) (struct dentry *, const char *, void *, size_t, int); ++ int (*setxattr) (struct dentry *, const char *, const void *, size_t, int); + ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); + ssize_t (*listxattr) (struct dentry *, char *, size_t); + int (*removexattr) (struct dentry *, const char *); +Index: linux-2.4.22-vanilla/include/linux/mbcache.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/linux/mbcache.h 2003-11-03 23:41:29.000000000 +0300 ++++ linux-2.4.22-vanilla/include/linux/mbcache.h 2003-11-03 23:41:29.000000000 +0300 +@@ -0,0 +1,69 @@ ++/* ++ File: linux/mbcache.h ++ ++ (C) 2001 by Andreas Gruenbacher, ++*/ ++ ++/* Hardwire the number of additional indexes */ ++#define MB_CACHE_INDEXES_COUNT 1 ++ ++struct mb_cache_entry; ++ ++struct mb_cache_op { ++ int (*free)(struct mb_cache_entry *, int); ++}; ++ ++struct mb_cache { ++ struct list_head c_cache_list; ++ const char *c_name; ++ struct mb_cache_op c_op; ++ atomic_t c_entry_count; ++ int c_bucket_count; ++#ifndef MB_CACHE_INDEXES_COUNT ++ int c_indexes_count; ++#endif ++ kmem_cache_t *c_entry_cache; ++ struct list_head *c_block_hash; ++ struct list_head *c_indexes_hash[0]; ++}; ++ ++struct mb_cache_entry_index { ++ struct list_head o_list; ++ unsigned int o_key; ++}; ++ ++struct mb_cache_entry { ++ struct list_head e_lru_list; ++ struct mb_cache *e_cache; ++ atomic_t e_used; ++ kdev_t e_dev; ++ unsigned long e_block; ++ struct list_head e_block_list; ++ struct mb_cache_entry_index e_indexes[0]; ++}; ++ ++/* Functions on caches */ ++ ++struct mb_cache * mb_cache_create(const char *, struct mb_cache_op *, size_t, ++ int, int); ++void mb_cache_shrink(struct mb_cache *, kdev_t); ++void mb_cache_destroy(struct mb_cache *); ++ ++/* Functions on cache entries */ ++ ++struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *); ++int mb_cache_entry_insert(struct mb_cache_entry *, kdev_t, unsigned long, ++ unsigned int[]); ++void mb_cache_entry_rehash(struct mb_cache_entry *, unsigned int[]); ++void mb_cache_entry_release(struct mb_cache_entry *); ++void mb_cache_entry_takeout(struct mb_cache_entry *); ++void mb_cache_entry_free(struct mb_cache_entry *); ++struct mb_cache_entry *mb_cache_entry_dup(struct mb_cache_entry *); ++struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *, kdev_t, ++ unsigned long); ++#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) ++struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, int, ++ kdev_t, unsigned int); ++struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *, int, ++ kdev_t, unsigned int); ++#endif +Index: linux-2.4.22-vanilla/kernel/ksyms.c +=================================================================== +--- linux-2.4.22-vanilla.orig/kernel/ksyms.c 2003-11-03 23:41:26.000000000 +0300 ++++ linux-2.4.22-vanilla/kernel/ksyms.c 2003-11-03 23:41:29.000000000 +0300 +@@ -11,6 +11,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -91,6 +92,7 @@ + EXPORT_SYMBOL(exit_files); + EXPORT_SYMBOL(exit_fs); + EXPORT_SYMBOL(exit_sighand); ++EXPORT_SYMBOL(copy_fs_struct); + + /* internal kernel memory management */ + EXPORT_SYMBOL(_alloc_pages); +@@ -109,6 +111,8 @@ + EXPORT_SYMBOL(kmem_cache_alloc); + EXPORT_SYMBOL(kmem_cache_free); + EXPORT_SYMBOL(kmem_cache_size); ++EXPORT_SYMBOL(register_cache); ++EXPORT_SYMBOL(unregister_cache); + EXPORT_SYMBOL(kmalloc); + EXPORT_SYMBOL(kfree); + EXPORT_SYMBOL(vfree); +Index: linux-2.4.22-vanilla/mm/vmscan.c +=================================================================== +--- linux-2.4.22-vanilla.orig/mm/vmscan.c 2003-11-03 23:41:27.000000000 +0300 ++++ linux-2.4.22-vanilla/mm/vmscan.c 2003-11-03 23:41:29.000000000 +0300 +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -34,6 +35,39 @@ + */ + #define DEF_PRIORITY (6) + ++static DECLARE_MUTEX(other_caches_sem); ++static LIST_HEAD(cache_definitions); ++ ++void register_cache(struct cache_definition *cache) ++{ ++ down(&other_caches_sem); ++ list_add(&cache->link, &cache_definitions); ++ up(&other_caches_sem); ++} ++ ++void unregister_cache(struct cache_definition *cache) ++{ ++ down(&other_caches_sem); ++ list_del(&cache->link); ++ up(&other_caches_sem); ++} ++ ++static void shrink_other_caches(unsigned int priority, int gfp_mask) ++{ ++ struct list_head *p; ++ ++ if (down_trylock(&other_caches_sem)) ++ return; ++ ++ list_for_each_prev(p, &cache_definitions) { ++ struct cache_definition *cache = ++ list_entry(p, struct cache_definition, link); ++ ++ cache->shrink(priority, gfp_mask); ++ } ++ up(&other_caches_sem); ++} ++ + /* + * The swap-out function returns 1 if it successfully + * scanned all the pages it was asked to (`count'). +@@ -577,6 +611,7 @@ + + shrink_dcache_memory(priority, gfp_mask); + shrink_icache_memory(priority, gfp_mask); ++ shrink_other_caches(priority, gfp_mask); + #ifdef CONFIG_QUOTA + shrink_dqcache_memory(DEF_PRIORITY, gfp_mask); + #endif +Index: linux-2.4.22-vanilla/fs/ext3/ext3-exports.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/ext3/ext3-exports.c 2003-11-03 23:41:29.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/ext3/ext3-exports.c 2003-11-03 23:41:29.000000000 +0300 +@@ -0,0 +1,13 @@ ++#include ++#include ++#include ++#include ++#include ++ ++EXPORT_SYMBOL(ext3_force_commit); ++EXPORT_SYMBOL(ext3_bread); ++EXPORT_SYMBOL(ext3_xattr_register); ++EXPORT_SYMBOL(ext3_xattr_unregister); ++EXPORT_SYMBOL(ext3_xattr_get); ++EXPORT_SYMBOL(ext3_xattr_list); ++EXPORT_SYMBOL(ext3_xattr_set); diff --git a/lustre/kernel_patches/patches/nfs_export_kernel-2.4.22.patch b/lustre/kernel_patches/patches/nfs_export_kernel-2.4.22.patch new file mode 100644 index 0000000..3ca1abc --- /dev/null +++ b/lustre/kernel_patches/patches/nfs_export_kernel-2.4.22.patch @@ -0,0 +1,746 @@ +Index: linux-2.4.22-vanilla/fs/Makefile +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/Makefile 2003-11-03 23:41:40.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/Makefile 2003-11-03 23:45:07.000000000 +0300 +@@ -7,7 +7,8 @@ + + O_TARGET := fs.o + +-export-objs := filesystems.o open.o dcache.o buffer.o dquot.o inode.o ++export-objs := filesystems.o open.o dcache.o buffer.o dquot.o inode.o \ ++ namei.o file_table.o + mod-subdirs := nls + + obj-y := open.o read_write.o devices.o file_table.o buffer.o \ +Index: linux-2.4.22-vanilla/fs/file_table.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/file_table.c 2003-05-16 05:29:12.000000000 +0400 ++++ linux-2.4.22-vanilla/fs/file_table.c 2003-11-03 23:44:38.000000000 +0300 +@@ -82,7 +82,8 @@ + * and call the open function (if any). The caller must verify that + * inode->i_fop is not NULL. + */ +-int init_private_file(struct file *filp, struct dentry *dentry, int mode) ++int init_private_file_it(struct file *filp, struct dentry *dentry, int mode, ++ struct lookup_intent *it) + { + memset(filp, 0, sizeof(*filp)); + filp->f_mode = mode; +@@ -90,12 +91,20 @@ + filp->f_dentry = dentry; + filp->f_uid = current->fsuid; + filp->f_gid = current->fsgid; ++ if (it) ++ filp->f_it = it; + filp->f_op = dentry->d_inode->i_fop; + if (filp->f_op->open) + return filp->f_op->open(dentry->d_inode, filp); + else + return 0; + } ++EXPORT_SYMBOL(init_private_file_it); ++ ++int init_private_file(struct file *filp, struct dentry *dentry, int mode) ++{ ++ return init_private_file_it(filp, dentry, mode, NULL); ++} + + void fput(struct file * file) + { +Index: linux-2.4.22-vanilla/fs/inode.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/inode.c 2003-11-03 23:25:33.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/inode.c 2003-11-03 23:44:38.000000000 +0300 +@@ -970,9 +970,10 @@ + } + + +-struct inode *iget4(struct super_block *sb, unsigned long ino, find_inode_t find_actor, void *opaque) ++static inline struct inode *ifind(struct super_block *sb, unsigned long ino, ++ struct list_head *head, ++ find_inode_t find_actor, void *opaque) + { +- struct list_head * head = inode_hashtable + hash(sb,ino); + struct inode * inode; + + spin_lock(&inode_lock); +@@ -985,6 +986,24 @@ + } + spin_unlock(&inode_lock); + ++ return NULL; ++} ++ ++struct inode *ilookup4(struct super_block *sb, unsigned long ino, ++ find_inode_t find_actor, void *opaque) ++{ ++ struct list_head * head = inode_hashtable + hash(sb,ino); ++ return ifind(sb, ino, head, find_actor, opaque); ++} ++ ++struct inode *iget4(struct super_block *sb, unsigned long ino, ++ find_inode_t find_actor, void *opaque) ++{ ++ struct list_head * head = inode_hashtable + hash(sb,ino); ++ struct inode *inode = ifind(sb, ino, head, find_actor, opaque); ++ if (inode) ++ return inode; ++ + /* + * get_new_inode() will do the right thing, re-trying the search + * in case it had to block at any point. +Index: linux-2.4.22-vanilla/fs/namei.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/namei.c 2003-11-03 23:22:22.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/namei.c 2003-11-03 23:44:38.000000000 +0300 +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -100,6 +101,7 @@ + it->it_op_release(it); + + } ++EXPORT_SYMBOL(intent_release); + + /* In order to reduce some races, while at the same time doing additional + * checking and hopefully speeding things up, we copy filenames to the +@@ -902,7 +904,8 @@ + + + /* SMP-safe */ +-struct dentry * lookup_one_len(const char * name, struct dentry * base, int len) ++struct dentry * lookup_one_len_it(const char * name, struct dentry * base, ++ int len, struct lookup_intent *it) + { + unsigned long hash; + struct qstr this; +@@ -922,11 +925,16 @@ + } + this.hash = end_name_hash(hash); + +- return lookup_hash_it(&this, base, NULL); ++ return lookup_hash_it(&this, base, it); + access: + return ERR_PTR(-EACCES); + } + ++struct dentry * lookup_one_len(const char * name, struct dentry * base, int len) ++{ ++ return lookup_one_len_it(name, base, len, NULL); ++} ++ + /* + * namei() + * +Index: linux-2.4.22-vanilla/fs/nfsd/export.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/nfsd/export.c 2003-11-03 23:22:11.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/nfsd/export.c 2003-11-03 23:44:38.000000000 +0300 +@@ -223,6 +223,11 @@ + inode = nd.dentry->d_inode; + dev = inode->i_dev; + ino = inode->i_ino; ++ if ((inode->i_sb->s_type->fs_flags & FS_NFSEXP_FSID) && ++ !(nxp->ex_flags & NFSEXP_FSID)) { ++ nxp->ex_dev = inode->i_sb->s_dev; ++ nxp->ex_flags |= NFSEXP_FSID; ++ } + err = -EINVAL; + + exp = exp_get(clp, dev, ino); +Index: linux-2.4.22-vanilla/fs/nfsd/nfsfh.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/nfsd/nfsfh.c 2003-11-03 23:22:11.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/nfsd/nfsfh.c 2003-11-03 23:44:38.000000000 +0300 +@@ -36,6 +36,15 @@ + int sequence; /* sequence counter */ + }; + ++static struct dentry *lookup_it(struct inode *inode, struct dentry * dentry) ++{ ++ if (inode->i_op->lookup_it) ++ return inode->i_op->lookup_it(inode, dentry, NULL, 0); ++ else ++ return inode->i_op->lookup(inode, dentry); ++ ++} ++ + /* + * A rather strange filldir function to capture + * the name matching the specified inode number. +@@ -75,6 +84,8 @@ + int error; + struct file file; + struct nfsd_getdents_callback buffer; ++ struct lookup_intent it; ++ struct file *filp = NULL; + + error = -ENOTDIR; + if (!dir || !S_ISDIR(dir->i_mode)) +@@ -85,9 +96,37 @@ + /* + * Open the directory ... + */ +- error = init_private_file(&file, dentry, FMODE_READ); +- if (error) ++ if (dentry->d_op && dentry->d_op->d_revalidate_it) { ++ if ((dentry->d_flags & DCACHE_NFSD_DISCONNECTED) && ++ (dentry->d_parent == dentry) ) { ++ it.it_op_release = NULL; ++ /* ++ * XXX Temporary Hack: Simulating init_private_file without ++ * f_op->open for disconnected dentry Since we don't have actual ++ * dentry->d_name to revalidate in revalidate_it() ++ */ ++ filp = &file; ++ memset(filp, 0, sizeof(*filp)); ++ filp->f_mode = FMODE_READ; ++ atomic_set(&filp->f_count, 1); ++ filp->f_dentry = dentry; ++ filp->f_uid = current->fsuid; ++ filp->f_gid = current->fsgid; ++ filp->f_op = dentry->d_inode->i_fop; ++ error = 0; ++ } else { ++ intent_init(&it, IT_OPEN, 0); ++ error = revalidate_it(dentry, &it); ++ if (error) ++ goto out; ++ error = init_private_file_it(&file, dentry, FMODE_READ, &it); ++ } ++ } else { ++ error = init_private_file_it(&file, dentry, FMODE_READ, NULL); ++ } ++ if (error) + goto out; ++ + error = -EINVAL; + if (!file.f_op->readdir) + goto out_close; +@@ -113,9 +152,13 @@ + } + + out_close: +- if (file.f_op->release) ++ if (file.f_op->release && !filp) + file.f_op->release(dir, &file); + out: ++ if (dentry->d_op && ++ dentry->d_op->d_revalidate_it && ++ it.it_op_release && !filp) ++ intent_release(&it); + return error; + } + +@@ -274,7 +317,7 @@ + * it is well connected. But nobody returns different dentrys do they? + */ + down(&child->d_inode->i_sem); +- pdentry = child->d_inode->i_op->lookup(child->d_inode, tdentry); ++ pdentry = lookup_it(child->d_inode, tdentry); + up(&child->d_inode->i_sem); + d_drop(tdentry); /* we never want ".." hashed */ + if (!pdentry && tdentry->d_inode == NULL) { +@@ -306,6 +349,8 @@ + igrab(tdentry->d_inode); + pdentry->d_flags |= DCACHE_NFSD_DISCONNECTED; + } ++ if (child->d_op && child->d_op->d_revalidate_it) ++ pdentry->d_op = child->d_op; + } + if (pdentry == NULL) + pdentry = ERR_PTR(-ENOMEM); +@@ -463,6 +508,8 @@ + struct dentry *pdentry; + struct inode *parent; + ++ if (result->d_op && result->d_op->d_revalidate_it) ++ dentry->d_op = result->d_op; + pdentry = nfsd_findparent(dentry); + err = PTR_ERR(pdentry); + if (IS_ERR(pdentry)) +@@ -669,6 +716,11 @@ + + inode = dentry->d_inode; + ++ /* cache coherency for non-device filesystems */ ++ if (inode->i_op && inode->i_op->revalidate_it) { ++ inode->i_op->revalidate_it(dentry, NULL); ++ } ++ + /* Type check. The correct error return for type mismatches + * does not seem to be generally agreed upon. SunOS seems to + * use EISDIR if file isn't S_IFREG; a comment in the NFSv3 +@@ -902,8 +954,9 @@ + dentry->d_parent->d_name.name, dentry->d_name.name); + goto out; + out_uptodate: +- printk(KERN_ERR "fh_update: %s/%s already up-to-date!\n", +- dentry->d_parent->d_name.name, dentry->d_name.name); ++ if(!dentry->d_parent->d_inode->i_op->mkdir_raw) ++ printk(KERN_ERR "fh_update: %s/%s already up-to-date!\n", ++ dentry->d_parent->d_name.name, dentry->d_name.name); + goto out; + } + +Index: linux-2.4.22-vanilla/fs/nfsd/vfs.c +=================================================================== +--- linux-2.4.22-vanilla.orig/fs/nfsd/vfs.c 2003-11-03 23:22:11.000000000 +0300 ++++ linux-2.4.22-vanilla/fs/nfsd/vfs.c 2003-11-03 23:47:41.000000000 +0300 +@@ -77,6 +77,128 @@ + static struct raparms * raparml; + static struct raparms * raparm_cache; + ++static int link_raw(struct dentry *dold, struct dentry *ddir, ++ struct dentry *dnew) ++{ ++ int err; ++ ++ struct nameidata old_nd = { .dentry = dold }; ++ struct nameidata nd = { .dentry = ddir, .last = dnew->d_name }; ++ struct inode_operations *op = nd.dentry->d_inode->i_op; ++ err = op->link_raw(&old_nd, &nd); ++ d_instantiate(dnew, dold->d_inode); ++ if(dold->d_inode->i_op && dold->d_inode->i_op->revalidate_it) ++ dold->d_inode->i_op->revalidate_it(dnew, NULL); ++ ++ return err; ++} ++ ++static int unlink_raw(struct dentry *dentry, char *fname, int flen, ++ struct dentry *rdentry) ++{ ++ int err; ++ struct qstr last = { .name = fname, .len = flen }; ++ struct nameidata nd = { .dentry = dentry, .last = last }; ++ struct inode_operations *op = nd.dentry->d_inode->i_op; ++ err = op->unlink_raw(&nd); ++ if (!err) ++ d_delete(rdentry); ++ ++ return err; ++} ++ ++static int rmdir_raw(struct dentry *dentry, char *fname, int flen, ++ struct dentry *rdentry) ++{ ++ int err; ++ struct qstr last = { .name = fname, .len = flen }; ++ struct nameidata nd = { .dentry = dentry, .last = last }; ++ struct inode_operations *op = nd.dentry->d_inode->i_op; ++ err = op->rmdir_raw(&nd); ++ if(!err) { ++ rdentry->d_inode->i_flags |= S_DEAD; ++ d_delete(rdentry); ++ } ++ ++ return err; ++} ++ ++static int symlink_raw(struct dentry *dentry, char *fname, int flen, ++ char *path) ++{ ++ int err; ++ struct qstr last = { .name = fname, .len = flen }; ++ struct nameidata nd = { .dentry = dentry, .last = last }; ++ struct inode_operations *op = nd.dentry->d_inode->i_op; ++ err = op->symlink_raw(&nd, path); ++ ++ return err; ++} ++ ++static int mkdir_raw(struct dentry *dentry, char *fname, int flen, int mode) ++{ ++ int err; ++ struct qstr last = { .name = fname, .len = flen }; ++ struct nameidata nd = { .dentry = dentry, .last = last }; ++ struct inode_operations *op = nd.dentry->d_inode->i_op; ++ err = op->mkdir_raw(&nd, mode); ++ ++ return err; ++} ++ ++static int mknod_raw(struct dentry *dentry, char *fname, int flen, int mode, ++ dev_t dev) ++{ ++ int err; ++ struct qstr last = { .name = fname, .len = flen }; ++ struct nameidata nd = { .dentry = dentry, .last = last }; ++ struct inode_operations *op = nd.dentry->d_inode->i_op; ++ err = op->mknod_raw(&nd, mode, dev); ++ ++ return err; ++} ++ ++static int rename_raw(struct dentry *fdentry, struct dentry *tdentry, ++ struct dentry *odentry, struct dentry *ndentry) ++{ ++ int err; ++ ++ struct nameidata old_nd = { .dentry = fdentry, .last = odentry->d_name}; ++ struct nameidata new_nd = { .dentry = tdentry, .last = ndentry->d_name}; ++ struct inode_operations *op = old_nd.dentry->d_inode->i_op; ++ err = op->rename_raw(&old_nd, &new_nd); ++ d_move(odentry, ndentry); ++ ++ return err; ++} ++ ++static int setattr_raw(struct inode *inode, struct iattr *iap) ++{ ++ int err; ++ ++ iap->ia_valid |= ATTR_RAW; ++ err = inode->i_op->setattr_raw(inode, iap); ++ ++ return err; ++} ++ ++int revalidate_it(struct dentry *dentry, struct lookup_intent *it) ++{ ++ int err = 0; ++ ++ if (dentry && dentry->d_op && dentry->d_op->d_revalidate_it) { ++ if (!dentry->d_op->d_revalidate_it(dentry, 0, it) && ++ !d_invalidate(dentry)) { ++ dput(dentry); ++ err = -EINVAL; ++ dentry = NULL; ++ return err; ++ } ++ } ++ ++ return err; ++} ++ + /* + * Look up one component of a pathname. + * N.B. After this call _both_ fhp and resfh need an fh_put +@@ -302,7 +424,10 @@ + } + err = nfserr_notsync; + if (!check_guard || guardtime == inode->i_ctime) { +- err = notify_change(dentry, iap); ++ if ( dentry->d_inode->i_op && dentry->d_inode->i_op->setattr_raw) ++ err = setattr_raw(dentry->d_inode, iap); ++ else ++ err = notify_change(dentry, iap); + err = nfserrno(err); + } + if (size_change) { +@@ -429,6 +554,7 @@ + { + struct dentry *dentry; + struct inode *inode; ++ struct lookup_intent it; + int err; + + /* If we get here, then the client has already done an "open", and (hopefully) +@@ -475,6 +601,14 @@ + filp->f_mode = FMODE_READ; + } + ++ intent_init(&it, IT_OPEN, (filp->f_flags & ~O_ACCMODE) | filp->f_mode); ++ ++ err = revalidate_it(dentry, &it); ++ if (err) ++ goto out_nfserr; ++ ++ filp->f_it = ⁢ ++ + err = 0; + if (filp->f_op && filp->f_op->open) { + err = filp->f_op->open(inode, filp); +@@ -489,7 +623,11 @@ + atomic_dec(&filp->f_count); + } + } ++ + out_nfserr: ++ if (it.it_op_release) ++ intent_release(&it); ++ + if (err) + err = nfserrno(err); + out: +@@ -820,7 +958,7 @@ + { + struct dentry *dentry, *dchild; + struct inode *dirp; +- int err; ++ int err, error = -EOPNOTSUPP; + + err = nfserr_perm; + if (!flen) +@@ -836,20 +974,44 @@ + dentry = fhp->fh_dentry; + dirp = dentry->d_inode; + ++ switch (type) { ++ case S_IFDIR: ++ if (dirp->i_op->mkdir_raw) ++ error = mkdir_raw(dentry, fname, flen, iap->ia_mode); ++ break; ++ case S_IFCHR: ++ case S_IFBLK: ++ case S_IFIFO: ++ case S_IFSOCK: ++ case S_IFREG: ++ if (dirp->i_op->mknod_raw) { ++ if (type == S_IFREG) ++ rdev = 0; ++ error = mknod_raw(dentry, fname, flen, iap->ia_mode, rdev); ++ } ++ break; ++ default: ++ printk("nfsd: bad file type %o in nfsd_create\n", type); ++ } ++ + err = nfserr_notdir; +- if(!dirp->i_op || !dirp->i_op->lookup) ++ if(!dirp->i_op || !(dirp->i_op->lookup || dirp->i_op->lookup_it)) + goto out; + /* + * Check whether the response file handle has been verified yet. + * If it has, the parent directory should already be locked. + */ +- if (!resfhp->fh_dentry) { +- /* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create */ +- fh_lock(fhp); ++ if (!resfhp->fh_dentry || dirp->i_op->lookup_it) { ++ /* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create ++ and nfsd_proc_create in case of lustre ++ */ ++ if (!resfhp->fh_dentry) ++ fh_lock(fhp); + dchild = lookup_one_len(fname, dentry, flen); + err = PTR_ERR(dchild); + if (IS_ERR(dchild)) + goto out_nfserr; ++ resfhp->fh_dentry = NULL; + err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); + if (err) + goto out; +@@ -870,10 +1032,12 @@ + * Make sure the child dentry is still negative ... + */ + err = nfserr_exist; +- if (dchild->d_inode) { +- dprintk("nfsd_create: dentry %s/%s not negative!\n", +- dentry->d_name.name, dchild->d_name.name); +- goto out; ++ if ( error == -EOPNOTSUPP) { ++ if (dchild->d_inode) { ++ dprintk("nfsd_create: dentry %s/%s not negative!\n", ++ dentry->d_name.name, dchild->d_name.name); ++ goto out; ++ } + } + + if (!(iap->ia_valid & ATTR_MODE)) +@@ -886,16 +1050,19 @@ + err = nfserr_perm; + switch (type) { + case S_IFREG: +- err = vfs_create(dirp, dchild, iap->ia_mode); ++ if (error == -EOPNOTSUPP) ++ err = vfs_create(dirp, dchild, iap->ia_mode); + break; + case S_IFDIR: +- err = vfs_mkdir(dirp, dchild, iap->ia_mode); ++ if (error == -EOPNOTSUPP) ++ err = vfs_mkdir(dirp, dchild, iap->ia_mode); + break; + case S_IFCHR: + case S_IFBLK: + case S_IFIFO: + case S_IFSOCK: +- err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev); ++ if (error == -EOPNOTSUPP) ++ err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev); + break; + default: + printk("nfsd: bad file type %o in nfsd_create\n", type); +@@ -964,7 +1131,13 @@ + /* Get all the sanity checks out of the way before + * we lock the parent. */ + err = nfserr_notdir; +- if(!dirp->i_op || !dirp->i_op->lookup) ++ if (dirp->i_op->mknod_raw) { ++ err = mknod_raw(dentry, fname, flen, iap->ia_mode, 0); ++ if (err && err != -EOPNOTSUPP) ++ goto out; ++ } ++ ++ if(!dirp->i_op || !(dirp->i_op->lookup || dirp->i_op->lookup_it)) + goto out; + fh_lock(fhp); + +@@ -1015,6 +1188,8 @@ + case NFS3_CREATE_GUARDED: + err = nfserr_exist; + } ++ if(dirp->i_op->mknod_raw) ++ err = 0; + goto out; + } + +@@ -1121,7 +1296,7 @@ + struct iattr *iap) + { + struct dentry *dentry, *dnew; +- int err, cerr; ++ int err, cerr, error = -EOPNOTSUPP; + + err = nfserr_noent; + if (!flen || !plen) +@@ -1135,12 +1310,18 @@ + goto out; + fh_lock(fhp); + dentry = fhp->fh_dentry; ++ ++ if (dentry->d_inode->i_op->symlink_raw) ++ error = symlink_raw(dentry, fname, flen, path); ++ + dnew = lookup_one_len(fname, dentry, flen); + err = PTR_ERR(dnew); + if (IS_ERR(dnew)) + goto out_nfserr; + +- err = vfs_symlink(dentry->d_inode, dnew, path); ++ err = error; ++ if (err == -EOPNOTSUPP || !dentry->d_inode->i_op->symlink_raw) ++ err = vfs_symlink(dentry->d_inode, dnew, path); + if (!err) { + if (EX_ISSYNC(fhp->fh_export)) + nfsd_sync_dir(dentry); +@@ -1150,7 +1331,10 @@ + iap->ia_valid |= ATTR_CTIME; + iap->ia_mode = (iap->ia_mode&S_IALLUGO) + | S_IFLNK; +- err = notify_change(dnew, iap); ++ if (dnew->d_inode->i_op && dnew->d_inode->i_op->setattr_raw) ++ err = setattr_raw(dnew->d_inode, iap); ++ else ++ err = notify_change(dnew, iap); + if (err) + err = nfserrno(err); + else if (EX_ISSYNC(fhp->fh_export)) +@@ -1210,7 +1394,10 @@ + dold = tfhp->fh_dentry; + dest = dold->d_inode; + +- err = vfs_link(dold, dirp, dnew); ++ if (dirp->i_op->link_raw) ++ err = link_raw(dold, ddir, dnew); ++ else ++ err = vfs_link(dold, dirp, dnew); + if (!err) { + if (EX_ISSYNC(ffhp->fh_export)) { + nfsd_sync_dir(ddir); +@@ -1295,7 +1482,10 @@ + err = nfserr_perm; + } else + #endif +- err = vfs_rename(fdir, odentry, tdir, ndentry); ++ if(fdir->i_op->rename_raw) ++ err = rename_raw(fdentry, tdentry, odentry, ndentry); ++ else ++ err = vfs_rename(fdir, odentry, tdir, ndentry); + if (!err && EX_ISSYNC(tfhp->fh_export)) { + nfsd_sync_dir(tdentry); + nfsd_sync_dir(fdentry); +@@ -1316,7 +1506,7 @@ + fill_post_wcc(tfhp); + double_up(&tdir->i_sem, &fdir->i_sem); + ffhp->fh_locked = tfhp->fh_locked = 0; +- ++ + out: + return err; + } +@@ -1362,9 +1552,15 @@ + err = nfserr_perm; + } else + #endif +- err = vfs_unlink(dirp, rdentry); ++ if (dirp->i_op->unlink_raw) ++ err = unlink_raw(dentry, fname, flen, rdentry); ++ else ++ err = vfs_unlink(dirp, rdentry); + } else { /* It's RMDIR */ +- err = vfs_rmdir(dirp, rdentry); ++ if (dirp->i_op->rmdir_raw) ++ err = rmdir_raw(dentry, fname, flen, rdentry); ++ else ++ err = vfs_rmdir(dirp, rdentry); + } + + dput(rdentry); +Index: linux-2.4.22-vanilla/include/linux/fs.h +=================================================================== +--- linux-2.4.22-vanilla.orig/include/linux/fs.h 2003-11-03 23:41:40.000000000 +0300 ++++ linux-2.4.22-vanilla/include/linux/fs.h 2003-11-03 23:44:38.000000000 +0300 +@@ -93,6 +93,9 @@ + #define FS_SINGLE 8 /* Filesystem that can have only one superblock */ + #define FS_NOMOUNT 16 /* Never mount from userland */ + #define FS_LITTER 32 /* Keeps the tree in dcache */ ++#define FS_NFSEXP_FSID 64 /* Use file system specific fsid for ++ * exporting non device filesystems. ++ */ + #define FS_ODD_RENAME 32768 /* Temporary stuff; will go away as soon + * as nfs_rename() will be cleaned up + */ +@@ -1115,6 +1118,9 @@ + struct nameidata *nd, struct lookup_intent *it); + extern struct file *dentry_open_it(struct dentry *dentry, struct vfsmount *mnt, + int flags, struct lookup_intent *it); ++extern int revalidate_it(struct dentry *dentry, struct lookup_intent *it); ++extern int init_private_file_it(struct file *, struct dentry *dentry, int mode, ++ struct lookup_intent *it); + extern int filp_close(struct file *, fl_owner_t id); + extern char * getname(const char *); + +@@ -1411,6 +1417,8 @@ + extern int follow_down(struct vfsmount **, struct dentry **); + extern int follow_up(struct vfsmount **, struct dentry **); + extern struct dentry * lookup_one_len(const char *, struct dentry *, int); ++extern struct dentry * lookup_one_len_it(const char *, struct dentry *, int, ++ struct lookup_intent *); + extern struct dentry * lookup_hash(struct qstr *, struct dentry *); + #define user_path_walk(name,nd) __user_walk(name, LOOKUP_FOLLOW|LOOKUP_POSITIVE, nd) + #define user_path_walk_link(name,nd) __user_walk(name, LOOKUP_POSITIVE, nd) +@@ -1425,6 +1433,8 @@ + + typedef int (*find_inode_t)(struct inode *, unsigned long, void *); + extern struct inode * iget4(struct super_block *, unsigned long, find_inode_t, void *); ++extern struct inode * ilookup4(struct super_block *, unsigned long, ++ find_inode_t, void *); + static inline struct inode *iget(struct super_block *sb, unsigned long ino) + { + return iget4(sb, ino, NULL, NULL); +Index: linux-2.4.22-vanilla/kernel/ksyms.c +=================================================================== +--- linux-2.4.22-vanilla.orig/kernel/ksyms.c 2003-11-03 23:41:40.000000000 +0300 ++++ linux-2.4.22-vanilla/kernel/ksyms.c 2003-11-03 23:44:38.000000000 +0300 +@@ -149,6 +149,7 @@ + EXPORT_SYMBOL(igrab); + EXPORT_SYMBOL(iunique); + EXPORT_SYMBOL(iget4); ++EXPORT_SYMBOL(ilookup4); + EXPORT_SYMBOL(iput); + EXPORT_SYMBOL(inode_init_once); + EXPORT_SYMBOL(force_delete); +@@ -160,6 +161,7 @@ + EXPORT_SYMBOL(path_release); + EXPORT_SYMBOL(__user_walk); + EXPORT_SYMBOL(lookup_one_len); ++EXPORT_SYMBOL(lookup_one_len_it); + EXPORT_SYMBOL(lookup_hash); + EXPORT_SYMBOL(sys_close); + EXPORT_SYMBOL(dcache_lock); diff --git a/lustre/kernel_patches/series/vanilla-2.4.22 b/lustre/kernel_patches/series/vanilla-2.4.22 new file mode 100644 index 0000000..a2726cf --- /dev/null +++ b/lustre/kernel_patches/series/vanilla-2.4.22 @@ -0,0 +1,30 @@ +dev_read_only_2.4.20-rh.patch +exports_2.4.20.patch +kmem_cache_validate_2.4.20.patch +lustre_version.patch +vfs_intent-2.4.20-vanilla.patch +invalidate_show.patch +export-truncate.patch +iod-stock-exports-2.4.22.patch +ext3-htree-2.4.22-rh.patch +linux-2.4.21-xattr-0.8.54-suse.patch +ext3-orphan_lock-2.4.22-rh.patch +ext3-noread-2.4.20.patch +ext3-delete_thread-suse.patch +extN-wantedi.patch +ext3-san-2.4.20.patch +ext3-map_inode_page.patch +ext3-error-export.patch +iopen-2.4.20.patch +tcp-zero-copy-2.4.22-rh.patch +jbd-dont-account-blocks-twice.patch +jbd-commit-tricks.patch +ext3-no-write-super-chaos.patch +add_page_private.patch +socket-exports-vanilla.patch +nfs_export_kernel-2.4.20.patch +ext3-raw-lookup.patch +ext3-ea-in-inode-2.4.20.patch +listman-2.4.20.patch +ext3-trusted_ea-2.4.20.patch +ext3-inode-reuse-2.4.20.patch diff --git a/lustre/portals/archdep.m4 b/lustre/portals/archdep.m4 index 0aa83b7..a9c4ba8 100644 --- a/lustre/portals/archdep.m4 +++ b/lustre/portals/archdep.m4 @@ -297,6 +297,28 @@ AC_SUBST(with_gm) AC_SUBST(GMNAL) +#fixme: where are the default IB includes? +default_ib_include_dir=/usr/local/ib/include +an_ib_include_file=vapi.h + +AC_ARG_WITH(ib, [ --with-ib=[yes/no/path] Path to IB includes], with_ib=$withval, with_ib=$default_ib) +AC_MSG_CHECKING(if IB headers are present) +if test "$with_ib" = yes; then + with_ib=$default_ib_include_dir +fi +if test "$with_ib" != no -a -f ${with_ib}/${an_ib_include_file}; then + AC_MSG_RESULT(yes) + IBNAL="ibnal" + with_ib="-I${with_ib}" +else + AC_MSG_RESULT(no) + IBNAL="" + with_ib="" +fi +AC_SUBST(IBNAL) +AC_SUBST(with_ib) + + def_scamac=/opt/scali/include AC_ARG_WITH(scamac, [ --with-scamac=[yes/no/path] Path to ScaMAC includes (default=/opt/scali/include)], with_scamac=$withval, with_scamac=$def_scamac) AC_MSG_CHECKING(if ScaMAC headers are present) @@ -317,7 +339,7 @@ AC_SUBST(with_scamac) AC_SUBST(SCIMACNAL) CFLAGS="$KCFLAGS" -CPPFLAGS="$KINCFLAGS $KCPPFLAGS $MFLAGS $enable_zerocopy $enable_affinity $with_quadrics $with_gm $with_scamac " +CPPFLAGS="$KINCFLAGS $KCPPFLAGS $MFLAGS $enable_zerocopy $enable_affinity $with_quadrics $with_gm $with_scamac $with_ib" AM_CONDITIONAL(LIBLUSTRE, test x$host_cpu = xlib) AC_SUBST(MOD_LINK) diff --git a/lustre/portals/include/linux/kp30.h b/lustre/portals/include/linux/kp30.h index 37bf8ce..f676c35 100644 --- a/lustre/portals/include/linux/kp30.h +++ b/lustre/portals/include/linux/kp30.h @@ -43,6 +43,7 @@ extern unsigned int portal_cerror; #define S_GMNAL (1 << 19) #define S_PTLROUTER (1 << 20) #define S_COBD (1 << 21) +#define S_IBNAL (1 << 22) /* If you change these values, please keep portals/utils/debug.c * up to date! */ @@ -77,6 +78,8 @@ extern unsigned int portal_cerror; # define THREAD_SIZE 8192 #endif +#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5) + #ifdef __KERNEL__ # ifdef __ia64__ # define CDEBUG_STACK (THREAD_SIZE - \ @@ -595,6 +598,10 @@ extern struct prof_ent prof_ents[MAX_PROFS]; #endif /* PORTALS_PROFILING */ /* debug.c */ +extern spinlock_t stack_backtrace_lock; + +char *portals_debug_dumpstack(void); +char *portals_nid2str(int nal, ptl_nid_t nid, char *str); void portals_run_upcall(char **argv); void portals_run_lbug_upcall(char * file, const char *fn, const int line); void portals_debug_dumplog(void); @@ -1034,6 +1041,8 @@ enum { TOENAL, TCPNAL, SCIMACNAL, + ROUTER, + IBNAL, NAL_ENUM_END_MARKER }; @@ -1042,9 +1051,12 @@ extern ptl_handle_ni_t kqswnal_ni; extern ptl_handle_ni_t ksocknal_ni; extern ptl_handle_ni_t ktoenal_ni; extern ptl_handle_ni_t kgmnal_ni; +extern ptl_handle_ni_t kibnal_ni; extern ptl_handle_ni_t kscimacnal_ni; #endif +#define PTL_NALFMT_SIZE 16 + #define NAL_MAX_NR (NAL_ENUM_END_MARKER - 1) #define NAL_CMD_REGISTER_PEER_FD 100 diff --git a/lustre/portals/include/linux/portals_lib.h b/lustre/portals/include/linux/portals_lib.h index 14d60c6..609290d 100644 --- a/lustre/portals/include/linux/portals_lib.h +++ b/lustre/portals/include/linux/portals_lib.h @@ -47,6 +47,11 @@ static inline int size_round16(int val) return (val + 0xf) & (~0xf); } +static inline int size_round32(int val) +{ + return (val + 0x1f) & (~0x1f); +} + static inline int size_round0(int val) { if (!val) diff --git a/lustre/portals/include/portals/lib-types.h b/lustre/portals/include/portals/lib-types.h index e5447d7..30e56af 100644 --- a/lustre/portals/include/portals/lib-types.h +++ b/lustre/portals/include/portals/lib-types.h @@ -54,72 +54,68 @@ typedef enum { PTL_MSG_HELLO, } ptl_msg_type_t; -/* Each of these structs should start with an odd number of - * __u32, or the compiler could add its own padding and confuse - * everyone. - * - * Also, "length" needs to be at offset 28 of each struct. - */ +/* The variant fields of the portals message header are aligned on an 8 + * byte boundary in the message header. Note that all types used in these + * wire structs MUST be fixed size and the smaller types are placed at the + * end. */ typedef struct ptl_ack { - ptl_size_t mlength; - ptl_handle_wire_t dst_wmd; - ptl_match_bits_t match_bits; - ptl_size_t length; /* common length (0 for acks) moving out RSN */ + ptl_handle_wire_t dst_wmd; + ptl_match_bits_t match_bits; + ptl_size_t mlength; } WIRE_ATTR ptl_ack_t; typedef struct ptl_put { - ptl_pt_index_t ptl_index; - ptl_handle_wire_t ack_wmd; - ptl_match_bits_t match_bits; - ptl_size_t length; /* common length moving out RSN */ - ptl_size_t offset; - ptl_hdr_data_t hdr_data; + ptl_handle_wire_t ack_wmd; + ptl_match_bits_t match_bits; + ptl_hdr_data_t hdr_data; + ptl_pt_index_t ptl_index; + ptl_size_t offset; } WIRE_ATTR ptl_put_t; typedef struct ptl_get { - ptl_pt_index_t ptl_index; - ptl_handle_wire_t return_wmd; - ptl_match_bits_t match_bits; - ptl_size_t length; /* common length (0 for gets) moving out RSN */ - ptl_size_t src_offset; - ptl_size_t return_offset; /* unused: going RSN */ - ptl_size_t sink_length; + ptl_handle_wire_t return_wmd; + ptl_match_bits_t match_bits; + ptl_pt_index_t ptl_index; + ptl_size_t src_offset; + ptl_size_t sink_length; } WIRE_ATTR ptl_get_t; typedef struct ptl_reply { - __u32 unused1; /* unused fields going RSN */ - ptl_handle_wire_t dst_wmd; - ptl_size_t dst_offset; /* unused: going RSN */ - __u32 unused2; - ptl_size_t length; /* common length moving out RSN */ + ptl_handle_wire_t dst_wmd; } WIRE_ATTR ptl_reply_t; +typedef struct ptl_hello { + __u64 incarnation; + __u32 type; +} WIRE_ATTR ptl_hello_t; + typedef struct { - ptl_nid_t dest_nid; - ptl_nid_t src_nid; - ptl_pid_t dest_pid; - ptl_pid_t src_pid; - __u32 type; /* ptl_msg_type_t */ + ptl_nid_t dest_nid; + ptl_nid_t src_nid; + ptl_pid_t dest_pid; + ptl_pid_t src_pid; + __u32 type; /* ptl_msg_type_t */ + __u32 payload_length; /* payload data to follow */ + /*<------__u64 aligned------->*/ union { - ptl_ack_t ack; - ptl_put_t put; - ptl_get_t get; + ptl_ack_t ack; + ptl_put_t put; + ptl_get_t get; ptl_reply_t reply; + ptl_hello_t hello; } msg; } WIRE_ATTR ptl_hdr_t; -/* All length fields in individual unions at same offset */ -/* LASSERT for same in lib-move.c */ -#define PTL_HDR_LENGTH(h) ((h)->msg.ack.length) - /* A HELLO message contains the portals magic number and protocol version * code in the header's dest_nid, the peer's NID in the src_nid, and - * PTL_MSG_HELLO in the type field. All other fields are zero (including - * PTL_HDR_LENGTH; i.e. no payload). + * PTL_MSG_HELLO in the type field. All other common fields are zero + * (including payload_size; i.e. no payload). * This is for use by byte-stream NALs (e.g. TCP/IP) to check the peer is * running the same protocol and to find out its NID, so that hosts with * multiple IP interfaces can have a single NID. These NALs should exchange - * HELLO messages when a connection is first established. */ + * HELLO messages when a connection is first established. + * Individual NALs can put whatever else they fancy in ptl_hdr_t::msg. + */ typedef struct { __u32 magic; /* PORTALS_PROTO_MAGIC */ __u16 version_major; /* increment on incompatible change */ @@ -129,7 +125,7 @@ typedef struct { #define PORTALS_PROTO_MAGIC 0xeebc0ded #define PORTALS_PROTO_VERSION_MAJOR 0 -#define PORTALS_PROTO_VERSION_MINOR 1 +#define PORTALS_PROTO_VERSION_MINOR 3 typedef struct { long recv_count, recv_length, send_count, send_length, drop_count, diff --git a/lustre/portals/knals/Makefile.am b/lustre/portals/knals/Makefile.am index fed2785..25aab9d 100644 --- a/lustre/portals/knals/Makefile.am +++ b/lustre/portals/knals/Makefile.am @@ -3,5 +3,5 @@ # This code is issued under the GNU General Public License. # See the file COPYING in this distribution -DIST_SUBDIRS= socknal toenal qswnal gmnal scimacnal -SUBDIRS= socknal toenal @QSWNAL@ @GMNAL@ @SCIMACNAL@ +DIST_SUBDIRS= socknal toenal qswnal gmnal scimacnal ibnal +SUBDIRS= socknal toenal @QSWNAL@ @GMNAL@ @SCIMACNAL@ @IBNAL@ diff --git a/lustre/portals/knals/ibnal/.cvsignore b/lustre/portals/knals/ibnal/.cvsignore new file mode 100644 index 0000000..e995588 --- /dev/null +++ b/lustre/portals/knals/ibnal/.cvsignore @@ -0,0 +1,3 @@ +.deps +Makefile +Makefile.in diff --git a/lustre/portals/knals/ibnal/Makefile.am b/lustre/portals/knals/ibnal/Makefile.am new file mode 100644 index 0000000..84818dc --- /dev/null +++ b/lustre/portals/knals/ibnal/Makefile.am @@ -0,0 +1,10 @@ +include ../../Rules.linux + +MODULE = kibnal +modulenet_DATA = kibnal.o +EXTRA_PROGRAMS = kibnal + + +DEFS = +CPPFLAGS=@CPPFLAGS@ @with_ib@ +kibnal_SOURCES = ibnal.h ibnal.c ibnal_cb.c diff --git a/lustre/portals/knals/ibnal/ibnal.c b/lustre/portals/knals/ibnal/ibnal.c new file mode 100644 index 0000000..948badf --- /dev/null +++ b/lustre/portals/knals/ibnal/ibnal.c @@ -0,0 +1,2146 @@ +/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- + * vim:expandtab:shiftwidth=8:tabstop=8: + * + * Based on ksocknal, qswnal, and gmnal + * + * Copyright (C) 2003 LANL + * Author: HB Chen + * Los Alamos National Lab + * + * Portals is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * Portals is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Portals; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include "ibnal.h" + +// portal handle ID for this IB-NAL +ptl_handle_ni_t kibnal_ni; + +// message send buffer mutex +spinlock_t MSBuf_mutex[NUM_MBUF]; + +// message recv buffer mutex +spinlock_t MRBuf_mutex[NUM_MBUF]; + +// IB-NAL API information +nal_t kibnal_api; + +// nal's private data +kibnal_data_t kibnal_data; + +int ibnal_debug = 0; +VAPI_pd_hndl_t Pd_hndl; +unsigned int Num_posted_recv_buf; + +// registered send buffer list +Memory_buffer_info MSbuf_list[NUM_MBUF]; + +// registered recv buffer list +Memory_buffer_info MRbuf_list[NUM_MBUF]; + +// +// for router +// currently there is no need fo IBA +// +kpr_nal_interface_t kibnal_router_interface = { + kprni_nalid: IBNAL, + kprni_arg: &kibnal_data, + kprni_fwd: kibnal_fwd_packet, // forward data to router + // is router invloving the + // data transmision +}; + + +// Queue-pair list +QP_info QP_list[NUM_QPS]; + +// information associated with a HCA +HCA_info Hca_data; + +// something about HCA +VAPI_hca_hndl_t Hca_hndl; // assume we only use one HCA now +VAPI_hca_vendor_t Hca_vendor; +VAPI_hca_cap_t Hca_cap; +VAPI_hca_port_t Hca_port_1_props; +VAPI_hca_port_t Hca_port_2_props; +VAPI_hca_attr_t Hca_attr; +VAPI_hca_attr_mask_t Hca_attr_mask; +VAPI_cq_hndl_t Cq_RQ_hndl; // CQ's handle +VAPI_cq_hndl_t Cq_SQ_hndl; // CQ's handle +VAPI_cq_hndl_t Cq_hndl; // CQ's handle +Remote_QP_Info L_QP_data; +Remote_QP_Info R_QP_data; + + +// +// forward API +// +int +kibnal_forward(nal_t *nal, + int id, + void *args, + size_t args_len, + void *ret, + size_t ret_len) +{ + kibnal_data_t *knal_data = nal->nal_data; + nal_cb_t *nal_cb = knal_data->kib_cb; + + // ASSERT checking + LASSERT (nal == &kibnal_api); + LASSERT (knal_data == &kibnal_data); + LASSERT (nal_cb == &kibnal_lib); + + // dispatch forward API function + + CDEBUG(D_NET,"kibnal_forward: function id = %d\n", id); + + lib_dispatch(nal_cb, knal_data, id, args, ret); + + CDEBUG(D_TRACE,"IBNAL- Done kibnal_forward\n"); + + return PTL_OK; // always return PTL_OK +} + +// +// lock API +// +void +kibnal_lock(nal_t *nal, unsigned long *flags) +{ + kibnal_data_t *knal_data = nal->nal_data; + nal_cb_t *nal_cb = knal_data->kib_cb; + + // ASSERT checking + LASSERT (nal == &kibnal_api); + LASSERT (knal_data == &kibnal_data); + LASSERT (nal_cb == &kibnal_lib); + + // disable logical interrrupt + nal_cb->cb_cli(nal_cb,flags); + + CDEBUG(D_TRACE,"IBNAL-Done kibnal_lock\n"); + +} + +// +// unlock API +// +void +kibnal_unlock(nal_t *nal, unsigned long *flags) +{ + kibnal_data_t *k = nal->nal_data; + nal_cb_t *nal_cb = k->kib_cb; + + // ASSERT checking + LASSERT (nal == &kibnal_api); + LASSERT (k == &kibnal_data); + LASSERT (nal_cb == &kibnal_lib); + + // enable logical interrupt + nal_cb->cb_sti(nal_cb,flags); + + CDEBUG(D_TRACE,"IBNAL-Done kibnal_unlock"); + +} + +// +// shutdown API +// showdown this network interface +// +int +kibnal_shutdown(nal_t *nal, int ni) +{ + VAPI_ret_t vstat; + kibnal_data_t *k = nal->nal_data; + nal_cb_t *nal_cb = k->kib_cb; + + // assert checking + LASSERT (nal == &kibnal_api); + LASSERT (k == &kibnal_data); + LASSERT (nal_cb == &kibnal_lib); + + // take down this IB network interface + // there is not corresponding cb function to hande this + // do we actually need this one + // reference to IB network interface shutdown + // + + vstat = IB_Close_HCA(); + + if (vstat != VAPI_OK) { + CERROR("Failed to close HCA - %s\n",VAPI_strerror(vstat)); + return (~PTL_OK); + } + + CDEBUG(D_TRACE,"IBNAL- Done kibnal_shutdown\n"); + + return PTL_OK; +} + +// +// yield +// when do we call this yield function +// +void +kibnal_yield( nal_t *nal ) +{ + kibnal_data_t *k = nal->nal_data; + nal_cb_t *nal_cb = k->kib_cb; + + // assert checking + LASSERT (nal == &kibnal_api); + LASSERT (k == &kibnal_data); + LASSERT (nal_cb == &kibnal_lib); + + // check under what condition that we need to + // call schedule() + // who set this need_resched + if (current->need_resched) + schedule(); + + CDEBUG(D_TRACE,"IBNAL-Done kibnal_yield"); + + return; +} + +// +// ibnal init +// +nal_t * +kibnal_init(int interface, // no use here + ptl_pt_index_t ptl_size, + ptl_ac_index_t ac_size, + ptl_pid_t requested_pid // no use here + ) +{ + nal_t *nal = NULL; + nal_cb_t *nal_cb = NULL; + kibnal_data_t *nal_data = NULL; + int rc; + + unsigned int nnids = 1; // number of nids + // do we know how many nodes are in this + // system related to this kib_nid + // + + CDEBUG(D_NET, "kibnal_init:calling lib_init with nid 0x%u\n", + kibnal_data.kib_nid); + + + CDEBUG(D_NET, "kibnal_init: interface [%d], ptl_size [%d], ac_size[%d]\n", + interface, ptl_size, ac_size); + CDEBUG(D_NET, "kibnal_init: &kibnal_lib 0x%X\n", &kibnal_lib); + CDEBUG(D_NET, "kibnal_init: kibnal_data.kib_nid %d\n", kibnal_data.kib_nid); + + rc = lib_init(&kibnal_lib, + kibnal_data.kib_nid, + 0, // process id is set as 0 + nnids, + ptl_size, + ac_size); + + if(rc != PTL_OK) { + CERROR("kibnal_init: Failed lib_init with nid 0x%u, rc=%d\n", + kibnal_data.kib_nid,rc); + } + else { + CDEBUG(D_NET,"kibnal_init: DONE lib_init with nid 0x%x%x\n", + kibnal_data.kib_nid); + } + + return &kibnal_api; + +} + + +// +// called before remove ibnal kernel module +// +void __exit +kibnal_finalize(void) +{ + struct list_head *tmp; + + inter_module_unregister("kibnal_ni"); + + // release resources allocated to this Infiniband network interface + PtlNIFini(kibnal_ni); + + lib_fini(&kibnal_lib); + + IB_Close_HCA(); + + // how much do we need to do here? + list_for_each(tmp, &kibnal_data.kib_list) { + kibnal_rx_t *conn; + conn = list_entry(tmp, kibnal_rx_t, krx_item); + CDEBUG(D_IOCTL, "freeing conn %p\n",conn); + tmp = tmp->next; + list_del(&conn->krx_item); + PORTAL_FREE(conn, sizeof(*conn)); + } + + CDEBUG(D_MALLOC,"done kmem %d\n",atomic_read(&portal_kmemory)); + CDEBUG(D_TRACE,"IBNAL-Done kibnal_finalize\n"); + + return; +} + + +// +// * k_server_thread is a kernel thread +// use a shared memory ro exchange HCA's data with a pthread in user +// address space +// * will be replaced when CM is used to handle communication management +// + +void k_server_thread(Remote_QP_Info *hca_data) +{ + int segment_id; + const int shared_segment_size = sizeof(Remote_QP_Info); + key_t key = HCA_EXCHANGE_SHM_KEY; + unsigned long raddr; + int exchanged_done = NO; + int i; + + Remote_QP_Info *exchange_hca_data; + + long *n; + long *uaddr; + long ret = 0; + + // create a shared memory with pre-agreement key + segment_id = sys_shmget(key, + shared_segment_size, + IPC_CREAT | 0666); + + + // attached to shared memoru + // raddr is pointed to an user address space + // use this address to update shared menory content + ret = sys_shmat(segment_id, 0 , SHM_RND, &raddr); + +#ifdef IBNAL_DEBUG + if(ret >= 0) { + CDEBUG(D_NET,"k_server_thread: Shared memory attach success ret = 0X%d,&raddr" + " 0X%x (*(&raddr))=0x%x \n", ret, &raddr, (*(&raddr))); + printk("k_server_thread: Shared memory attach success ret = 0X%d, &raddr" + " 0X%x (*(&raddr))=0x%x \n", ret, &raddr, (*(&raddr))); + } + else { + CERROR("k_server_thread: Shared memory attach failed ret = 0x%d \n", ret); + printk("k_server_thread: Shared memory attach failed ret = 0x%d \n", ret); + return; + } +#endif + + n = &raddr; + uaddr = *n; // get the U-address + /* cast uaddr to exchange_hca_data */ + exchange_hca_data = (Remote_QP_Info *) uaddr; + + /* copy data from local HCA to shared memory */ + exchange_hca_data->opcode = hca_data->opcode; + exchange_hca_data->length = hca_data->length; + + for(i=0; i < NUM_QPS; i++) { + exchange_hca_data->dlid[i] = hca_data->dlid[i]; + exchange_hca_data->rqp_num[i] = hca_data->rqp_num[i]; + } + + // periodically check shared memory until get updated + // remote HCA's data from user mode pthread + while(exchanged_done == NO) { + if(exchange_hca_data->opcode == RECV_QP_INFO){ + exchanged_done = YES; + /* copy data to local buffer from shared memory */ + hca_data->opcode = exchange_hca_data->opcode; + hca_data->length = exchange_hca_data->length; + + for(i=0; i < NUM_QPS; i++) { + hca_data->dlid[i] = exchange_hca_data->dlid[i]; + hca_data->rqp_num[i] = exchange_hca_data->rqp_num[i]; + } + break; + } + else { + schedule_timeout(1000); + } + } + + // detached shared memory + sys_shmdt(uaddr); + + CDEBUG(D_NET, "Exit from kernel thread: k_server_thread \n"); + printk("Exit from kernel thread: k_server_thread \n"); + + return; + +} + +// +// create QP +// +VAPI_ret_t +create_qp(QP_info *qp, int qp_index) +{ + + VAPI_ret_t vstat; + VAPI_qp_init_attr_t qp_init_attr; + VAPI_qp_prop_t qp_prop; + + qp->hca_hndl = Hca_hndl; + qp->port = 1; // default + qp->slid = Hca_port_1_props.lid; + qp->hca_port = Hca_port_1_props; + + + /* Queue Pair Creation Attributes */ + qp_init_attr.cap.max_oust_wr_rq = NUM_WQE; + qp_init_attr.cap.max_oust_wr_sq = NUM_WQE; + qp_init_attr.cap.max_sg_size_rq = NUM_SG; + qp_init_attr.cap.max_sg_size_sq = NUM_SG; + qp_init_attr.pd_hndl = qp->pd_hndl; + qp_init_attr.rdd_hndl = 0; + qp_init_attr.rq_cq_hndl = qp->rq_cq_hndl; + /* we use here polling */ + //qp_init_attr.rq_sig_type = VAPI_SIGNAL_REQ_WR; + qp_init_attr.rq_sig_type = VAPI_SIGNAL_ALL_WR; + qp_init_attr.sq_cq_hndl = qp->sq_cq_hndl; + /* we use here polling */ + //qp_init_attr.sq_sig_type = VAPI_SIGNAL_REQ_WR; + qp_init_attr.sq_sig_type = VAPI_SIGNAL_ALL_WR; + // transport servce - reliable connection + + qp_init_attr.ts_type = VAPI_TS_RC; + + vstat = VAPI_create_qp(qp->hca_hndl, + &qp_init_attr, + &qp->qp_hndl, &qp_prop); + + if (vstat != VAPI_OK) { + CERROR("Failed creating QP. Return Failed - %s\n",VAPI_strerror(vstat)); + return vstat; + } + + qp->qp_num = qp_prop.qp_num; // the qp number + qp->last_posted_send_id = 0; // user defined work request ID + qp->last_posted_rcv_id = 0; // user defined work request ID + qp->cur_send_outstanding = 0; + qp->cur_posted_rcv_bufs = 0; + qp->snd_rcv_balance = 0; + + CDEBUG(D_OTHER, "create_qp: qp_num = %d, slid = %d, qp_hndl = 0X%X", + qp->qp_num, qp->slid, qp->qp_hndl); + + // initialize spin-lock mutex variables + spin_lock_init(&(qp->snd_mutex)); + spin_lock_init(&(qp->rcv_mutex)); + spin_lock_init(&(qp->bl_mutex)); + spin_lock_init(&(qp->cln_mutex)); + // number of outstanding requests on the send Q + qp->cur_send_outstanding = 0; + // number of posted receive buffers + qp->cur_posted_rcv_bufs = 0; + qp->snd_rcv_balance = 0; + + return(VAPI_OK); + +} + +// +// initialize a UD qp state to RTR and RTS +// +VAPI_ret_t +init_qp_UD(QP_info *qp, int qp_index) +{ + VAPI_qp_attr_t qp_attr; + VAPI_qp_init_attr_t qp_init_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + VAPI_ret_t vstat; + + /* Move from RST to INIT */ + /* Change QP to INIT */ + + CDEBUG(D_OTHER, "Changing QP state to INIT qp-index = %d\n", qp_index); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + qp_attr.qp_state = VAPI_INIT; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.pkey_ix = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX); + + CDEBUG(D_OTHER, "pkey_ix qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.port = qp->port; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT); + + CDEBUG(D_OTHER, "port qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.qkey = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QKEY); + + CDEBUG(D_OTHER, "qkey qp_attr_mask = 0X%x\n", qp_attr_mask); + + /* If I do not set this mask, I get an error from HH. QPM should catch it */ + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from RST to INIT. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + CDEBUG(D_OTHER, "Modifying QP from RST to INIT.\n"); + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* Move from INIT to RTR */ + /* Change QP to RTR */ + CDEBUG(D_OTHER, "Changing QP state to RTR\n"); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + qp_attr.qp_state = VAPI_RTR; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + CDEBUG(D_OTHER, "INIT to RTR- qp_state : qp_attr_mask = 0X%x\n", qp_attr_mask); + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from INIT to RTR. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + CDEBUG(D_OTHER, "Modifying QP from INIT to RTR.\n"); + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* RTR to RTS - Change QP to RTS */ + CDEBUG(D_OTHER, "Changing QP state to RTS\n"); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + qp_attr.qp_state = VAPI_RTS; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + qp_attr.sq_psn = START_SQ_PSN; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_SQ_PSN); + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from RTR to RTS. %s:%s\n", + VAPI_strerror_sym(vstat), + VAPI_strerror(vstat)); + return(vstat); + } + + CDEBUG(D_OTHER, "Modifying QP from RTR to RTS. \n"); + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + // + // a QP is at RTS state NOW + // + + CDEBUG(D_OTHER, "IBNAL- UD qp is at RTS NOW\n"); + + return(vstat); + +} + + + +// +// initialize a RC qp state to RTR and RTS +// RC transport service +// +VAPI_ret_t +init_qp_RC(QP_info *qp, int qp_index) +{ + VAPI_qp_attr_t qp_attr; + VAPI_qp_init_attr_t qp_init_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + VAPI_ret_t vstat; + + /* Move from RST to INIT */ + /* Change QP to INIT */ + + CDEBUG(D_OTHER, "Changing QP state to INIT qp-index = %d\n", qp_index); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + qp_attr.qp_state = VAPI_INIT; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.pkey_ix = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX); + + CDEBUG(D_OTHER, "pkey_ix qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.port = qp->port; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT); + + CDEBUG(D_OTHER, "port qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.remote_atomic_flags = VAPI_EN_REM_WRITE | VAPI_EN_REM_READ; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_REMOTE_ATOMIC_FLAGS); + + CDEBUG(D_OTHER, "remote_atomic_flags qp_attr_mask = 0X%x\n", qp_attr_mask); + + /* If I do not set this mask, I get an error from HH. QPM should catch it */ + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from RST to INIT. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* Move from INIT to RTR */ + /* Change QP to RTR */ + CDEBUG(D_OTHER, "Changing QP state to RTR qp_indexi %d\n", qp_index); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + qp_attr.qp_state = VAPI_RTR; + + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.av.sl = 0;/* RESPONDER_SL */ + qp_attr.av.grh_flag = FALSE; + qp_attr.av.dlid = qp->dlid;/*RESPONDER_LID;*/ + qp_attr.av.static_rate = 0; + qp_attr.av.src_path_bits = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_AV); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.path_mtu = MTU_2048;// default is MTU_2048 + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PATH_MTU); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.rq_psn = START_RQ_PSN; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RQ_PSN); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.qp_ous_rd_atom = NUM_WQE; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_OUS_RD_ATOM); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.pkey_ix = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.min_rnr_timer = 10; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_MIN_RNR_TIMER); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + qp_attr.dest_qp_num = qp->rqp_num; + + CDEBUG(D_OTHER, "remore qp num %d\n", qp->rqp_num); + + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_DEST_QP_NUM); + + CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask); + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from INIT to RTR. qp_index %d - %s\n", + qp_index, VAPI_strerror(vstat)); + return(vstat); + } + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* RTR to RTS - Change QP to RTS */ + CDEBUG(D_OTHER, "Changing QP state to RTS\n"); + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + qp_attr.qp_state = VAPI_RTS; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + qp_attr.sq_psn = START_SQ_PSN; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_SQ_PSN); + + qp_attr.timeout = 0x18; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_TIMEOUT); + + qp_attr.retry_count = 10; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RETRY_COUNT); + + qp_attr.rnr_retry = 14; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RNR_RETRY); + + qp_attr.ous_dst_rd_atom = 100; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_OUS_DST_RD_ATOM); + + qp_attr.min_rnr_timer = 5; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_MIN_RNR_TIMER); + + vstat = VAPI_modify_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_cap); + + if (vstat != VAPI_OK) { + CERROR("Failed modifying QP from RTR to RTS. %s:%s\n", + VAPI_strerror_sym(vstat), VAPI_strerror(vstat)); + return(vstat); + } + + vstat= VAPI_query_qp(qp->hca_hndl, + qp->qp_hndl, + &qp_attr, + &qp_attr_mask, + &qp_init_attr); + + if (vstat != VAPI_OK) { + CERROR("Failed query QP. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + // + // a QP is at RTS state NOW + // + + CDEBUG(D_OTHER, "IBNAL- RC qp is at RTS NOW\n"); + + return(vstat); +} + + + +VAPI_ret_t +IB_Open_HCA(kibnal_data_t *kib_data) +{ + + VAPI_ret_t vstat; + VAPI_cqe_num_t cqe_active_num; + QP_info *qp; + int i; + int Num_posted_recv_buf; + + /* Open HCA */ + CDEBUG(D_PORTALS, "Opening an HCA\n"); + + vstat = VAPI_open_hca(HCA_ID, &Hca_hndl); + vstat = EVAPI_get_hca_hndl(HCA_ID, &Hca_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed opening the HCA: %s. %s...\n",HCA_ID,VAPI_strerror(vstat)); + return(vstat); + } + + /* Get HCA CAP */ + vstat = VAPI_query_hca_cap(Hca_hndl, &Hca_vendor, &Hca_cap); + if (vstat != VAPI_OK) { + CERROR("Failed query hca cap %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* Get port 1 info */ + vstat = VAPI_query_hca_port_prop(Hca_hndl, HCA_PORT_1 , &Hca_port_1_props); + if (vstat != VAPI_OK) { + CERROR("Failed query port cap %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* Get port 2 info */ + vstat = VAPI_query_hca_port_prop(Hca_hndl, HCA_PORT_2, &Hca_port_2_props); + if (vstat != VAPI_OK) { + CERROR("Failed query port cap %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + // Get a PD + CDEBUG(D_PORTALS, "Allocating PD \n"); + vstat = VAPI_alloc_pd(Hca_hndl,&Pd_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed allocating a PD. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + vstat = createMemRegion(Hca_hndl, Pd_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed registering a memory region.%s\n",VAPI_strerror(vstat)); + return(vstat); + } + + /* Create CQ for RQ*/ + CDEBUG(D_PORTALS, "Creating a send completion queue\n"); + + vstat = VAPI_create_cq(Hca_hndl, + NUM_CQE, + &Cq_hndl, + &cqe_active_num); + + if (vstat != VAPI_OK) { + CERROR("Failed creating a CQ. %s\n",VAPI_strerror(vstat)); + return(vstat); + } + + if(NUM_CQE == cqe_active_num) { + CERROR("VAPI_create_cq: NUM_CQE EQ cqe_active_num \n"); + } + else { + CDEBUG(D_NET, "VAPI_create_cq: NUM_CQE %d , actual cqe_active_num %d \n", + NUM_CQE, cqe_active_num); + } + + Cq_SQ_hndl = Cq_hndl; + Cq_RQ_hndl = Cq_hndl; + + // + // create QPs + // + for(i=0; i < NUM_QPS; i++) { + QP_list[i].pd_hndl = Pd_hndl; + QP_list[i].hca_hndl = Hca_hndl; + // sq rq use the same Cq_hndl + QP_list[i].sq_cq_hndl = Cq_hndl; + QP_list[i].rq_cq_hndl = Cq_hndl; + vstat = create_qp(&QP_list[i], i); + if (vstat != VAPI_OK) { + CERROR("Failed creating a QP %d %s\n",i, VAPI_strerror(vstat)); + return(vstat); + } + } + + // + // record HCA data + // + + Hca_data.hca_hndl = Hca_hndl; // HCA handle + Hca_data.pd_hndl = Pd_hndl; // protection domain + Hca_data.port = 1; // port number + Hca_data.num_qp = NUM_QPS; // number of qp used + + for(i=0; i < NUM_QPS; i++) { + Hca_data.qp_ptr[i] = &QP_list[i]; // point to QP_list + } + + Hca_data.num_cq = NUM_CQ; // number of cq used + Hca_data.cq_hndl = Cq_hndl; // + Hca_data.sq_cq_hndl = Cq_SQ_hndl; // + Hca_data.rq_cq_hndl = Cq_RQ_hndl; // + Hca_data.kib_data = kib_data; // + Hca_data.slid = QP_list[0].slid;// + + // prepare L_QP_data + +#ifdef USE_SHARED_MEMORY_AND_SOCKET + + /* + * + use a shared-memory between a user thread and a kernel thread + * for HCA's data exchange on the same node + * + use socket in user mode to exhange HCA's data with a remote node + */ + + + R_QP_data.opcode = SEND_QP_INFO; + R_QP_data.length = sizeof(L_QP_data); + + for(i=0; i < NUM_QPS; i++) { + // my slid will be used in a remote node as dlid + R_QP_data.dlid[i] = QP_list[i].slid; + // my qp_num will be used in remode node as remote_qp_number + // RC is used here so we need dlid and rqp_num + R_QP_data.rqp_num[i] = QP_list[i].qp_num ; + } + + // create a kernel thread for exchanging HCA's data + // R_QP_data will be exchanged with a remoe node + + kernel_thread(k_server_thread, &R_QP_data, 0); // + // check if the HCA'data have been updated by kernel_thread + // loop until the HCA's data is updated + // make sure that uagent is running + + // QP info is exchanged with a remote node + while (1) { + schedule_timeout(1000); + if(R_QP_data.opcode == RECV_QP_INFO) { + CDEBUG(D_NET, "HCA's data is being updated\n"); + break; + } + } + +#endif + +#ifdef USE_SHARED_MEMORY_AND_MULTICAST + + /* + * + use a shared-memory between a user thread and a kernel thread + * for HCA's data exchange on the same node + * + use Infinoband UR/multicast in user mode to exhange HCA's data with i + * a remote node + */ + + // use CM, opemSM + +#endif + + // + for(i=0; i < NUM_QPS; i++) { + qp = (QP_info *) &QP_list[i]; + QP_list[i].rqp_num = R_QP_data.rqp_num[i]; // remoter qp number + QP_list[i].dlid = R_QP_data.dlid[i]; // remote dlid + } + + // already have remote_qp_num adn dlid information + // initialize QP to RTR/RTS state + // + for(i=0; i < NUM_QPS; i++) { + vstat = init_qp_RC(&QP_list[i], i); + if (vstat != VAPI_OK) { + CERROR("Failed change a QP %d to RTS state%s\n", + i,VAPI_strerror(vstat)); + return(vstat); + } + } + + // post receiving buffer before any send happened + + Num_posted_recv_buf = post_recv_bufs( (VAPI_wr_id_t ) START_RECV_WRQ_ID); + + // for irregular completion event or some unexpected failure event + vstat = IB_Set_Async_Event_Handler(Hca_data, &kibnal_data); + if (vstat != VAPI_OK) { + CERROR("IB_Set_Async_Event_Handler failed: %d\n", vstat); + return vstat; + } + + + CDEBUG(D_PORTALS, "IBNAL- done with IB_Open_HCA\n"); + + for(i=0; i < NUM_MBUF; i++) { + spin_lock_init(&MSB_mutex[i]); + } + + return(VAPI_OK); + +} + + +/* + Function: IB_Set_Event_Handler() + + IN Hca_info hca_data + IN kibnal_data_t *kib_data -- private data + OUT NONE + + return: VAPI_OK - success + else - fail + +*/ + +VAPI_ret_t +IB_Set_Event_Handler(HCA_info hca_data, kibnal_data_t *kib_data) +{ + VAPI_ret_t vstat; + EVAPI_compl_handler_hndl_t comp_handler_hndl; + + // register CQE_Event_Hnadler + // VAPI function + vstat = VAPI_set_comp_event_handler(hca_data.hca_hndl, + CQE_event_handler, + &hca_data); + + /* + or use extended VAPI function + vstat = EVAPI_set_comp_eventh(hca_data.hca_hndl, + hca_data.cq_hndl, + CQE_event_handler, + &hca_data, + &comp_handler_hndl + ); + */ + + if (vstat != VAPI_OK) { + CERROR("IB_Set_Event_Handler: failed EVAPI_set_comp_eventh for" + " HCA ID = %s (%s).\n", HCA_ID, VAPI_strerror(vstat)); + return vstat; + } + + // issue a request for completion ievent notification + vstat = VAPI_req_comp_notif(hca_data.hca_hndl, + hca_data.cq_hndl, + VAPI_NEXT_COMP); + + if (vstat != VAPI_OK) { + CERROR("IB_Set_Event_Handler: failed VAPI_req_comp_notif for HCA ID" + " = %s (%s).\n", HCA_ID, VAPI_strerror(vstat)); + } + + return vstat; +} + + + +/* + Function: IB_Set_Async_Event_Handler() + + IN HCA_info hca_data + IN kibnal_data_t *kib_data -- private data + OUT NONE + + return: VAPI_OK - success + else - fail + +*/ + + +VAPI_ret_t +IB_Set_Async_Event_Handler(HCA_info hca_data, kibnal_data_t *kib_data) +{ + VAPI_ret_t vstat; + + // + // register an asynchronous event handler for this HCA + // + + vstat= VAPI_set_async_event_handler(hca_data.hca_hndl, + async_event_handler, + kib_data); + + if (vstat != VAPI_OK) { + CERROR("IB_Set_Async_Event_Handler: failed VAPI_set_async_comp_event_handler" + " for HCA ID = %s (%s).\n", HCA_ID, VAPI_strerror(vstat)); + } + + return vstat; +} + +// +// IB_Close_HCA +// close this Infiniband HCA interface +// release allocated resources to system +// +VAPI_ret_t +IB_Close_HCA(void ) +{ + + VAPI_ret_t vstat; + int ok = 1; + int i; + + /* Destroy QP */ + CDEBUG(D_PORTALS, "Destroying QP\n"); + + for(i=0; i < NUM_QPS; i++) { + vstat = VAPI_destroy_qp(QP_list[i].hca_hndl, QP_list[i].qp_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed destroying QP %d. %s\n", i, VAPI_strerror(vstat)); + ok = 0; + } + } + + if (ok) { + /* Destroy CQ */ + CDEBUG(D_PORTALS, "Destroying CQ\n"); + for(i=0; i < NUM_QPS; i++) { + // send_cq adn receive_cq are shared the same CQ + // so only destroy one of them + vstat = VAPI_destroy_cq(QP_list[i].hca_hndl, QP_list[i].sq_cq_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed destroying CQ %d. %s\n", i, VAPI_strerror(vstat)); + ok = 0; + } + } + } + + if (ok) { + /* Destroy Memory Region */ + CDEBUG(D_PORTALS, "Deregistering MR\n"); + for(i=0; i < NUM_QPS; i++) { + vstat = deleteMemRegion(&QP_list[i], i); + if (vstat != VAPI_OK) { + CERROR("Failed deregister mem reg %d. %s\n",i, VAPI_strerror(vstat)); + ok = 0; + break; + } + } + } + + if (ok) { + // finally + /* Close HCA */ + CDEBUG(D_PORTALS, "Closing HCA\n"); + vstat = VAPI_close_hca(Hca_hndl); + if (vstat != VAPI_OK) { + CERROR("Failed to close HCA. %s\n", VAPI_strerror(vstat)); + ok = 0; + } + } + + CDEBUG(D_PORTALS, "IBNAL- Done with closing HCA \n"); + + return vstat; +} + + +VAPI_ret_t +createMemRegion(VAPI_hca_hndl_t hca_hndl, + VAPI_pd_hndl_t pd_hndl) +{ + VAPI_ret_t vstat; + VAPI_mrw_t mrw; + VAPI_mrw_t rep_mr; + VAPI_mr_hndl_t rep_mr_hndl; + int buf_size; + char *bufptr; + int i; + + // send registered memory region + for(i=0; i < NUM_ENTRY; i++) { + MSbuf_list[i].buf_size = KB_32; + PORTAL_ALLOC(bufptr, MSbuf_list[i].buf_size); + if(bufptr == NULL) { + CDEBUG(D_MALLOC,"Failed to malloc a block of send memory, qix %d size %d\n", + i, MSbuf_list[i].buf_size); + CERROR("Failed to malloc a block of send memory, qix %d size %d\n", + i, MSbuf_list[i].buf_size); + return(VAPI_ENOMEM); + } + + mrw.type = VAPI_MR; + mrw.pd_hndl= pd_hndl; + mrw.start = MSbuf_list[i].buf_addr = (VAPI_virt_addr_t)(MT_virt_addr_t) bufptr; + mrw.size = MSbuf_list[i].buf_size; + mrw.acl = VAPI_EN_LOCAL_WRITE | + VAPI_EN_REMOTE_WRITE | + VAPI_EN_REMOTE_READ; + + // register send memory region + vstat = VAPI_register_mr(hca_hndl, + &mrw, + &rep_mr_hndl, + &rep_mr); + + // this memory region is going to be reused until deregister is called + if(vstat != VAPI_OK) { + CERROR("Failed registering a mem region qix %d Addr=%p, Len=%d. %s\n", + i, mrw.start, mrw.size, VAPI_strerror(vstat)); + return(vstat); + } + + MSbuf_list[i].mr = rep_mr; + MSbuf_list[i].mr_hndl = rep_mr_hndl; + MSbuf_list[i].bufptr = bufptr; + MSbuf_list[i].buf_addr = rep_mr.start; + MSbuf_list[i].status = BUF_REGISTERED; + MSbuf_list[i].ref_count = 0; + MSbuf_list[i].buf_type = REG_BUF; + MSbuf_list[i].raddr = 0x0; + MSbuf_list[i].rkey = 0x0; + } + + // RDAM buffer is not reserved for RDAM WRITE/READ + + for(i=NUM_ENTRY; i< NUM_MBUF; i++) { + MSbuf_list[i].status = BUF_UNREGISTERED; + MSbuf_list[i].buf_type = RDMA_BUF; + } + + + // recv registered memory region + for(i=0; i < NUM_ENTRY; i++) { + MRbuf_list[i].buf_size = KB_32; + PORTAL_ALLOC(bufptr, MRbuf_list[i].buf_size); + + if(bufptr == NULL) { + CDEBUG(D_MALLOC, "Failed to malloc a block of send memory, qix %d size %d\n", + i, MRbuf_list[i].buf_size); + return(VAPI_ENOMEM); + } + + mrw.type = VAPI_MR; + mrw.pd_hndl= pd_hndl; + mrw.start = (VAPI_virt_addr_t)(MT_virt_addr_t) bufptr; + mrw.size = MRbuf_list[i].buf_size; + mrw.acl = VAPI_EN_LOCAL_WRITE | + VAPI_EN_REMOTE_WRITE | + VAPI_EN_REMOTE_READ; + + // register send memory region + vstat = VAPI_register_mr(hca_hndl, + &mrw, + &rep_mr_hndl, + &rep_mr); + + // this memory region is going to be reused until deregister is called + if(vstat != VAPI_OK) { + CERROR("Failed registering a mem region qix %d Addr=%p, Len=%d. %s\n", + i, mrw.start, mrw.size, VAPI_strerror(vstat)); + return(vstat); + } + + MRbuf_list[i].mr = rep_mr; + MRbuf_list[i].mr_hndl = rep_mr_hndl; + MRbuf_list[i].bufptr = bufptr; + MRbuf_list[i].buf_addr = rep_mr.start; + MRbuf_list[i].status = BUF_REGISTERED; + MRbuf_list[i].ref_count = 0; + MRbuf_list[i].buf_type = REG_BUF; + MRbuf_list[i].raddr = 0x0; + MRbuf_list[i].rkey = rep_mr.r_key; + MRbuf_list[i].lkey = rep_mr.l_key; + + } + + // keep extra information for a qp + for(i=0; i < NUM_QPS; i++) { + QP_list[i].mr_hndl = MSbuf_list[i].mr_hndl; + QP_list[i].mr = MSbuf_list[i].mr; + QP_list[i].bufptr = MSbuf_list[i].bufptr; + QP_list[i].buf_addr = MSbuf_list[i].buf_addr; + QP_list[i].buf_size = MSbuf_list[i].buf_size; + QP_list[i].raddr = MSbuf_list[i].raddr; + QP_list[i].rkey = MSbuf_list[i].rkey; + QP_list[i].lkey = MSbuf_list[i].lkey; + } + + CDEBUG(D_PORTALS, "IBNAL- done VAPI_ret_t createMemRegion \n"); + + return vstat; + +} /* createMemRegion */ + + + +VAPI_ret_t +deleteMemRegion(QP_info *qp, int qix) +{ + VAPI_ret_t vstat; + + // + // free send memory assocaited with this memory region + // + PORTAL_FREE(MSbuf_list[qix].bufptr, MSbuf_list[qix].buf_size); + + // de-register it + vstat = VAPI_deregister_mr(qp->hca_hndl, MSbuf_list[qix].mr_hndl); + + if(vstat != VAPI_OK) { + CERROR("Failed deregistering a send mem region qix %d %s\n", + qix, VAPI_strerror(vstat)); + return vstat; + } + + // + // free recv memory assocaited with this memory region + // + PORTAL_FREE(MRbuf_list[qix].bufptr, MRbuf_list[qix].buf_size); + + // de-register it + vstat = VAPI_deregister_mr(qp->hca_hndl, MRbuf_list[qix].mr_hndl); + + if(vstat != VAPI_OK) { + CERROR("Failed deregistering a recv mem region qix %d %s\n", + qix, VAPI_strerror(vstat)); + return vstat; + } + + return vstat; +} + + +// +// polling based event handling +// + a daemon process +// + poll the CQ and check what is in the CQ +// + process incoming CQ event +// + +// + + +RDMA_Info_Exchange Rdma_info; +int Cts_Message_arrived = NO; + +void k_recv_thread(HCA_info *hca_data) +{ + VAPI_ret_t vstat; + VAPI_wc_desc_t comp_desc; + unsigned long polling_count = 0; + u_int32_t timeout_usec; + unsigned int priority = 100; + unsigned int length; + VAPI_wr_id_t wrq_id; + u_int32_t transferred_data_length; /* Num. of bytes transferred */ + void *bufdata; + VAPI_virt_addr_t bufaddr; + unsigned long buf_size = 0; + QP_info *qp; // point to QP_list + + kportal_daemonize("k_recv_thread"); // make it as a daemon process + + // tuning variable + timeout_usec = 100; // how is the impact on the performance + + // send Q and receive Q are using the same CQ + // so only poll one CQ for both operations + + CDEBUG(D_NET, "IBNAL- enter kibnal_recv_thread\n"); + CDEBUG(D_NET, "hca_hndl = 0X%x, cq_hndl=0X%x\n", + hca_data->hca_hndl,hca_data->cq_hndl); + + qp = hca_data->qp_ptr; + if(qp == NULL) { + CDEBUG(D_NET, "in recv_thread qp is NULL\n"); + CDEBUG(D_NET, "Exit from recv_thread qp is NULL\n"); + return; + } + else { + CDEBUG(D_NET, "in recv_thread qp is 0X%X\n", qp); + } + + CDEBUG(D_NET, "kibnal_recv_thread - enter event driver polling loop\n"); + + // + // use event driver + // + + + + while(1) { + polling_count++; + + // + // send Q and receive Q are using the same CQ + // so only poll one CQ for both operations + // + + vstat = VAPI_poll_cq(hca_data->hca_hndl,hca_data->cq_hndl, &comp_desc); + + if (vstat == VAPI_CQ_EMPTY) { + // there is no event in CQE + continue; + } + else { + if (vstat != (VAPI_OK)) { + CERROR("error while polling completion queuei vstat %d \n", vstat); + return; + } + } + + // process the complete event + switch(comp_desc.opcode) { + case VAPI_CQE_SQ_SEND_DATA: + // about the Send Q ,POST SEND completion + // who needs this information + // get wrq_id + // mark MSbuf_list[wr_id].status = BUF_REGISTERED + + wrq_id = comp_desc.id; + + if(RDMA_OP_ID < wrq_id) { + // this RDMA message id, adjust it to the right entry + wrq_id = wrq_id - RDMA_OP_ID; + vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.send_rdma_mr_hndl); + } + + if(vstat != VAPI_OK) { + CERROR("VAPI_CQE_SQ_SEND_DATA: Failed deregistering a RDMAi recv" " mem region %s\n", VAPI_strerror(vstat)); + } + + if((RDMA_CTS_ID <= wrq_id) && (RDMA_OP_ID < wrq_id)) { + // RTS or CTS send complete, release send buffer + if(wrq_id >= RDMA_RTS_ID) + wrq_id = wrq_id - RDMA_RTS_ID; + else + wrq_id = wrq_id - RDMA_CTS_ID; + } + + spin_lock(&MSB_mutex[(int) wrq_id]); + MRbuf_list[wrq_id].status = BUF_REGISTERED; + spin_unlock(&MSB_mutex[(int) wrq_id]); + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_SEND_DATA\n"); + break; + + case VAPI_CQE_SQ_RDMA_WRITE: + // about the Send Q, RDMA write completion + // who needs this information + // data is successfully write from pource to destionation + + // get wr_id + // mark MSbuf_list[wr_id].status = BUF_REGISTERED + // de-register rdma buffer + // + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_RDMA_WRITE\n"); + break; + + case VAPI_CQE_SQ_RDMA_READ: + // about the Send Q + // RDMA read completion + // who needs this information + // data is successfully read from destionation to source + CDEBUG(D_NET, "CQE opcode- VAPI_CQE_SQ_RDMA_READ\n"); + break; + + case VAPI_CQE_SQ_COMP_SWAP: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_COMP_SWAP\n"); + break; + + case VAPI_CQE_SQ_FETCH_ADD: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_FETCH_ADD\n"); + break; + + case VAPI_CQE_SQ_BIND_MRW: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_BIND_MRW\n"); + break; + + case VAPI_CQE_RQ_SEND_DATA: + // about the Receive Q + // process the incoming data and + // forward it to ..... + // a completion recevie event is arriving at CQ + // issue a recevie to get this arriving data out from CQ + // pass the receiving data for further processing + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_SEND_DATA\n"); + wrq_id = comp_desc.id ; + transferred_data_length = comp_desc.byte_len; + + if((wrq_id >= RDMA_CTS_ID) && (wrq_id < RDMA_OP_ID)) { + // this is RTS/CTS message + // process it locally and don't pass it to portals layer + // adjust wrq_id to get the right entry in MRbfu_list + + if(wrq_id >= RDMA_RTS_ID) + wrq_id = wrq_id - RDMA_RTS_ID; + else + wrq_id = wrq_id - RDMA_CTS_ID; + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[wrq_id].buf_addr; + MRbuf_list[wrq_id].status = BUF_INUSE; + memcpy(&Rdma_info, &bufaddr, sizeof(RDMA_Info_Exchange)); + + if(Ready_To_send == Rdma_info.opcode) + // an RTS request message from remote node + // prepare local RDMA buffer and send local rdma info to + // remote node + CTS_handshaking_protocol(&Rdma_info); + else + if((Clear_To_send == Rdma_info.opcode) && + (RDMA_BUFFER_RESERVED == Rdma_info.flag)) + Cts_Message_arrived = YES; + else + if(RDMA_BUFFER_UNAVAILABLE == Rdma_info.flag) + CERROR("RDMA operation abort-RDMA_BUFFER_UNAVAILABLE\n"); + } + else { + // + // this is an incoming mesage for portals layer + // move to PORTALS layer for further processing + // + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) + MRbuf_list[wrq_id].buf_addr; + + MRbuf_list[wrq_id].status = BUF_INUSE; + transferred_data_length = comp_desc.byte_len; + + kibnal_rx(hca_data->kib_data, + bufaddr, + transferred_data_length, + MRbuf_list[wrq_id].buf_size, + priority); + } + + // repost this receiving buffer and makr it at BUF_REGISTERED + + vstat = repost_recv_buf(qp, wrq_id); + if(vstat != (VAPI_OK)) { + CERROR("error while polling completion queue\n"); + } + else { + MRbuf_list[wrq_id].status = BUF_REGISTERED; + } + + break; + + case VAPI_CQE_RQ_RDMA_WITH_IMM: + // about the Receive Q + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n"); + + wrq_id = comp_desc.id ; + transferred_data_length = comp_desc.byte_len; + + if(wrq_id == RDMA_OP_ID) { + // this is RDAM op , locate the RDAM memory buffer address + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) Local_rdma_info.raddr; + + transferred_data_length = comp_desc.byte_len; + + kibnal_rx(hca_data->kib_data, + bufaddr, + transferred_data_length, + Local_rdma_info.buf_length, + priority); + + // de-regiser this RDAM receiving memory buffer + // too early ?? test & check + vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.recv_rdma_mr_hndl); + if(vstat != VAPI_OK) { + CERROR("VAPI_CQE_RQ_RDMA_WITH_IMM: Failed deregistering a RDMA" + " recv mem region %s\n", VAPI_strerror(vstat)); + } + } + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n"); + break; + + case VAPI_CQE_INVAL_OPCODE: + // + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_INVAL_OPCODE\n"); + break; + + default : + CDEBUG(D_NET, "CQE opcode-unknown opcode\n"); + break; + } // switch + + schedule_timeout(RECEIVING_THREAD_TIMEOUT);//how often do we need to poll CQ + + }// receiving while loop + + +} + + +void CQE_event_handler(VAPI_hca_hndl_t hca_hndl, + VAPI_cq_hndl_t cq_hndl, + void *private) +{ + VAPI_ret_t vstat; + VAPI_wc_desc_t comp_desc; + unsigned long polling_count = 0; + u_int32_t timeout_usec; + unsigned int priority = 100; + unsigned int length; + VAPI_wr_id_t wrq_id; + u_int32_t transferred_data_length; /* Num. of bytes transferred */ + void *bufdata; + VAPI_virt_addr_t bufaddr; + unsigned long buf_size = 0; + QP_info *qp; // point to QP_list + HCA_info *hca_data; + + // send Q and receive Q are using the same CQ + // so only poll one CQ for both operations + + CDEBUG(D_NET, "IBNAL- enter CQE_event_handler\n"); + printk("IBNAL- enter CQE_event_handler\n"); + + hca_data = (HCA_info *) private; + + // + // use event driven + // + + + vstat = VAPI_poll_cq(hca_data->hca_hndl,hca_data->cq_hndl, &comp_desc); + + if (vstat == VAPI_CQ_EMPTY) { + CDEBUG(D_NET, "CQE_event_handler: there is no event in CQE, how could" + " this " "happened \n"); + printk("CQE_event_handler: there is no event in CQE, how could" + " this " "happened \n"); + + } + else { + if (vstat != (VAPI_OK)) { + CDEBUG(D_NET, "error while polling completion queue vstat %d - %s\n", + vstat, VAPI_strerror(vstat)); + printk("error while polling completion queue vstat %d - %s\n", + vstat, VAPI_strerror(vstat)); + return; + } + } + + // process the complete event + switch(comp_desc.opcode) { + case VAPI_CQE_SQ_SEND_DATA: + // about the Send Q ,POST SEND completion + // who needs this information + // get wrq_id + // mark MSbuf_list[wr_id].status = BUF_REGISTERED + + wrq_id = comp_desc.id; + +#ifdef IBNAL_SELF_TESTING + if(wrq_id == SEND_RECV_TEST_ID) { + printk("IBNAL_SELF_TESTING - VAPI_CQE_SQ_SEND_DATA \n"); + } +#else + if(RDMA_OP_ID < wrq_id) { + // this RDMA message id, adjust it to the right entry + wrq_id = wrq_id - RDMA_OP_ID; + vstat = VAPI_deregister_mr(qp->hca_hndl, + Local_rdma_info.send_rdma_mr_hndl); + } + + if(vstat != VAPI_OK) { + CERROR(" VAPI_CQE_SQ_SEND_DATA: Failed deregistering a RDMA" + " recv mem region %s\n", VAPI_strerror(vstat)); + } + + if((RDMA_CTS_ID <= wrq_id) && (RDMA_OP_ID < wrq_id)) { + // RTS or CTS send complete, release send buffer + if(wrq_id >= RDMA_RTS_ID) + wrq_id = wrq_id - RDMA_RTS_ID; + else + wrq_id = wrq_id - RDMA_CTS_ID; + } + + spin_lock(&MSB_mutex[(int) wrq_id]); + MRbuf_list[wrq_id].status = BUF_REGISTERED; + spin_unlock(&MSB_mutex[(int) wrq_id]); +#endif + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_SEND_DATA\n"); + + break; + + case VAPI_CQE_SQ_RDMA_WRITE: + // about the Send Q, RDMA write completion + // who needs this information + // data is successfully write from pource to destionation + + // get wr_id + // mark MSbuf_list[wr_id].status = BUF_REGISTERED + // de-register rdma buffer + // + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_RDMA_WRITE\n"); + break; + + case VAPI_CQE_SQ_RDMA_READ: + // about the Send Q + // RDMA read completion + // who needs this information + // data is successfully read from destionation to source + CDEBUG(D_NET, "CQE opcode- VAPI_CQE_SQ_RDMA_READ\n"); + break; + + case VAPI_CQE_SQ_COMP_SWAP: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_COMP_SWAP\n"); + break; + + case VAPI_CQE_SQ_FETCH_ADD: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_FETCH_ADD\n"); + break; + + case VAPI_CQE_SQ_BIND_MRW: + // about the Send Q + // RDMA write completion + // who needs this information + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_BIND_MRW\n"); + break; + + case VAPI_CQE_RQ_SEND_DATA: + // about the Receive Q + // process the incoming data and + // forward it to ..... + // a completion recevie event is arriving at CQ + // issue a recevie to get this arriving data out from CQ + // pass the receiving data for further processing + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_SEND_DATA\n"); + + wrq_id = comp_desc.id ; + +#ifdef IBNAL_SELF_TESTING + + char rbuf[KB_32]; + int i; + + if(wrq_id == SEND_RECV_TEST_ID) { + printk("IBNAL_SELF_TESTING - VAPI_CQE_RQ_SEND_DATA\n"); + } + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) + MRbuf_list[ SEND_RECV_TEST_BUF_ID].buf_addr; + MRbuf_list[SEND_RECV_TEST_BUF_ID].status = BUF_INUSE; + memcpy(&rbuf, &bufaddr, KB_32); + + + for(i=0; i < 16; i++) + printk("rbuf[%d]=%c, ", rbuf[i]); + printk("\n"); + + // repost this receiving buffer and makr it at BUF_REGISTERED + vstat = repost_recv_buf(qp,SEND_RECV_TEST_BUF_ID); + if(vstat != (VAPI_OK)) { + printk("error while polling completion queue\n"); + } + else { + MRbuf_list[SEND_RECV_TEST_BUF_ID].status = BUF_REGISTERED; + } +#else + transferred_data_length = comp_desc.byte_len; + + if((wrq_id >= RDMA_CTS_ID) && (wrq_id < RDMA_OP_ID)) { + // this is RTS/CTS message + // process it locally and don't pass it to portals layer + // adjust wrq_id to get the right entry in MRbfu_list + + if(wrq_id >= RDMA_RTS_ID) + wrq_id = wrq_id - RDMA_RTS_ID; + else + wrq_id = wrq_id - RDMA_CTS_ID; + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) + MRbuf_list[wrq_id].buf_addr; + MRbuf_list[wrq_id].status = BUF_INUSE; + memcpy(&Rdma_info, &bufaddr, sizeof(RDMA_Info_Exchange)); + + if(Ready_To_send == Rdma_info.opcode) + // an RTS request message from remote node + // prepare local RDMA buffer and send local rdma info to + // remote node + CTS_handshaking_protocol(&Rdma_info); + else + if((Clear_To_send == Rdma_info.opcode) && + (RDMA_BUFFER_RESERVED == Rdma_info.flag)) + Cts_Message_arrived = YES; + else + if(RDMA_BUFFER_UNAVAILABLE == Rdma_info.flag) + CERROR("RDMA operation abort-RDMA_BUFFER_UNAVAILABLE\n"); + } + else { + // + // this is an incoming mesage for portals layer + // move to PORTALS layer for further processing + // + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) + MRbuf_list[wrq_id].buf_addr; + + MRbuf_list[wrq_id].status = BUF_INUSE; + transferred_data_length = comp_desc.byte_len; + + kibnal_rx(hca_data->kib_data, + bufaddr, + transferred_data_length, + MRbuf_list[wrq_id].buf_size, + priority); + } + + // repost this receiving buffer and makr it at BUF_REGISTERED + vstat = repost_recv_buf(qp, wrq_id); + if(vstat != (VAPI_OK)) { + CERROR("error while polling completion queue\n"); + } + else { + MRbuf_list[wrq_id].status = BUF_REGISTERED; + } +#endif + + break; + + case VAPI_CQE_RQ_RDMA_WITH_IMM: + // about the Receive Q + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n"); + + wrq_id = comp_desc.id ; + transferred_data_length = comp_desc.byte_len; + + if(wrq_id == RDMA_OP_ID) { + // this is RDAM op , locate the RDAM memory buffer address + + bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) Local_rdma_info.raddr; + + transferred_data_length = comp_desc.byte_len; + + kibnal_rx(hca_data->kib_data, + bufaddr, + transferred_data_length, + Local_rdma_info.buf_length, + priority); + + // de-regiser this RDAM receiving memory buffer + // too early ?? test & check + vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.recv_rdma_mr_hndl); + if(vstat != VAPI_OK) { + CERROR("VAPI_CQE_RQ_RDMA_WITH_IMM: Failed deregistering a RDMA" + " recv mem region %s\n", VAPI_strerror(vstat)); + } + } + + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n"); + break; + + case VAPI_CQE_INVAL_OPCODE: + // + CDEBUG(D_NET, "CQE opcode-VAPI_CQE_INVAL_OPCODE\n"); + break; + + default : + CDEBUG(D_NET, "CQE opcode-unknown opcode\n"); + + break; + } // switch + + // issue a new request for completion ievent notification + vstat = VAPI_req_comp_notif(hca_data->hca_hndl, + hca_data->cq_hndl, + VAPI_NEXT_COMP); + + + if(vstat != VAPI_OK) { + CERROR("PI_req_comp_notif: Failed %s\n", VAPI_strerror(vstat)); + } + + return; // end of event handler + +} + + + +int +kibnal_cmd(struct portal_ioctl_data * data, void * private) +{ + int rc ; + + CDEBUG(D_NET, "kibnal_cmd \n"); + + return YES; +} + + + +void ibnal_send_recv_self_testing(int *my_role) +{ + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + QP_info *qp; + VAPI_wr_id_t send_id; + int buf_id; + char sbuf[KB_32]; + char rbuf[KB_32]; + int i; + int buf_length = KB_32; + VAPI_wc_desc_t comp_desc; + int num_send = 1; + int loop_count = 0; + + // make it as a daemon process + // kportal_daemonize("ibnal_send_recv_self_testing"); + + printk("My role is 0X%X\n", *my_role); + +if(*my_role == TEST_SEND_MESSAGE) { + printk("Enter ibnal_send_recv_self_testing\n"); + + memset(&sbuf, 'a', KB_32); + memset(&rbuf, ' ', KB_32); + + send_id = SEND_RECV_TEST_ID; + buf_id = SEND_RECV_TEST_BUF_ID; + + qp = &QP_list[buf_id]; + + sr_desc.opcode = VAPI_SEND; + sr_desc.comp_type = VAPI_SIGNALED; + sr_desc.id = send_id; + + // scatter and gather info + sr_sg.len = KB_32; + sr_sg.lkey = MSbuf_list[buf_id].mr.l_key; // use send MR + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[buf_id].buf_addr; + + // copy data to register send buffer + memcpy(&sr_sg.addr, &sbuf, buf_length); + + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + /* + // call VAPI_post_sr to send out this data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + printk("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat)); + } + + printk("VAPI_post_sr success.\n"); + */ + + } +else { + printk("I am a receiver and doing nothing here\n"); +} + + printk("ibnal_send_recv_self_testing thread exit \n"); + + return; + +} + + +// +// ibnal initialize process +// +// 1. Bring up Infiniband network interface +// * +// 2. Initialize a PORTALS nal interface +// +// +int __init +kibnal_initialize(void) +{ + int rc; + int ntok; + unsigned long sizemask; + unsigned int nid; + VAPI_ret_t vstat; + + + portals_debug_set_level(IBNAL_DEBUG_LEVEL_1); + + CDEBUG(D_MALLOC, "start kmem %d\n", atomic_read (&portal_kmemory)); + + CDEBUG(D_PORTALS, "kibnal_initialize: Enter kibnal_initialize\n"); + + // set api functional pointers + kibnal_api.forward = kibnal_forward; + kibnal_api.shutdown = kibnal_shutdown; + kibnal_api.yield = kibnal_yield; + kibnal_api.validate = NULL; /* our api validate is a NOOP */ + kibnal_api.lock = kibnal_lock; + kibnal_api.unlock = kibnal_unlock; + kibnal_api.nal_data = &kibnal_data; // this is so called private data + kibnal_api.refct = 1; + kibnal_api.timeout = NULL; + kibnal_lib.nal_data = &kibnal_data; + + memset(&kibnal_data, 0, sizeof(kibnal_data)); + + // initialize kib_list list data structure + INIT_LIST_HEAD(&kibnal_data.kib_list); + + kibnal_data.kib_cb = &kibnal_lib; + + spin_lock_init(&kibnal_data.kib_dispatch_lock); + + + // + // bring up the IB inter-connect network interface + // setup QP, CQ + // + vstat = IB_Open_HCA(&kibnal_data); + + if(vstat != VAPI_OK) { + CERROR("kibnal_initialize: IB_Open_HCA failed: %d- %s\n", + vstat, VAPI_strerror(vstat)); + + printk("kibnal_initialize: IB_Open_HCA failed: %d- %s\n", + vstat, VAPI_strerror(vstat)); + return NO; + } + + kibnal_data.kib_nid = (__u64 )Hca_hndl;//convert Hca_hndl to 64-bit format + kibnal_data.kib_init = 1; + + CDEBUG(D_NET, " kibnal_data.kib_nid 0x%x%x\n", kibnal_data.kib_nid); + printk(" kibnal_data.kib_nid 0x%x%x\n", kibnal_data.kib_nid); + + /* Network interface ready to initialise */ + // get an entery in the PORTALS table for this IB protocol + + CDEBUG(D_PORTALS,"Call PtlNIInit to register this Infiniband Interface\n"); + printk("Call PtlNIInit to register this Infiniband Interface\n"); + + rc = PtlNIInit(kibnal_init, 32, 4, 0, &kibnal_ni); + + if(rc != PTL_OK) { + CERROR("kibnal_initialize: PtlNIInit failed %d\n", rc); + printk("kibnal_initialize: PtlNIInit failed %d\n", rc); + kibnal_finalize(); + return (-ENOMEM); + } + + CDEBUG(D_PORTALS,"kibnal_initialize: PtlNIInit DONE\n"); + printk("kibnal_initialize: PtlNIInit DONE\n"); + + + +#ifdef POLL_BASED_CQE_HANDLING + // create a receiving thread: main loopa + // this is polling based mail loop + kernel_thread(k_recv_thread, &Hca_data, 0); +#endif + +#ifdef EVENT_BASED_CQE_HANDLING + // for completion event handling, this is event based CQE handling + vstat = IB_Set_Event_Handler(Hca_data, &kibnal_data); + + if (vstat != VAPI_OK) { + CERROR("IB_Set_Event_Handler failed: %d - %s \n", + vstat, VAPI_strerror(vstat)); + return vstat; + } + + CDEBUG(D_PORTALS,"IB_Set_Event_Handler Done \n"); + printk("IB_Set_Event_Handler Done \n"); + +#endif + + PORTAL_SYMBOL_REGISTER(kibnal_ni); + +#ifdef IBNAL_SELF_TESTING + // + // test HCA send recv before normal event handling + // + int my_role; + my_role = TEST_SEND_MESSAGE; + + printk("my role is TEST_RECV_MESSAGE\n"); + + // kernel_thread(ibnal_send_recv_self_testing, &my_role, 0); + + ibnal_send_recv_self_testing(&my_role); + +#endif + + return 0; + +} + + + +MODULE_AUTHOR("Hsingbung(HB) Chen "); +MODULE_DESCRIPTION("Kernel Infiniband NAL v0.1"); +MODULE_LICENSE("GPL"); + +module_init (kibnal_initialize); +module_exit (kibnal_finalize); + +EXPORT_SYMBOL(kibnal_ni); + diff --git a/lustre/portals/knals/ibnal/ibnal.h b/lustre/portals/knals/ibnal/ibnal.h new file mode 100644 index 0000000..ff5aeb3 --- /dev/null +++ b/lustre/portals/knals/ibnal/ibnal.h @@ -0,0 +1,564 @@ +#ifndef _IBNAL_H +#define _IBNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG_SUBSYSTEM S_IBNAL + +#include +#include +#include + +// Infiniband VAPI/EVAPI header files +// Mellanox MT23108 VAPI +#include +#include +#include +#include + +// pick a port for this RDMA information exhange between two hosts +#define HOST_PORT 11211 +#define QUEUE_SIZE 1024 +#define HCA_PORT_1 1 +#define HCA_PORT_2 2 +#define DEBUG_SUBSYSTEM S_IBNAL + +#define START_SEND_WRQ_ID 0 +#define START_RECV_WRQ_ID 0 +#define START_RDMA_WRQ_ID 0 + +#define DEFAULT_PRIORITY 100 + +#define WAIT_FOT_R_RDMA_TIMEOUT 10000 +#define MAX_NUM_TRY 3000 + +#define MAX_NUM_POLL 300 +#define MAX_LOOP_COUNT 500 + +#define MAX_GID 32 +#define MCG_BUF_LENGTH 128 + +#define SHARED_SEGMENT_SIZE 0x10000 +#define HCA_EXCHANGE_SHM_KEY 999 // shared memory key for HCA data exchange + +// some internals opcodes for IB operations used in IBNAL +#define SEND_QP_INFO 0X00000001 +#define RECV_QP_INFO 0X00000010 + +// Mellanox InfiniHost MT23108 +// QP/CQ related information +// + +#define MTU_256 1 /* 1-256,2-512,3-1024,4-2048 */ +#define MTU_512 2 /* 1-256,2-512,3-1024,4-2048 */ +#define MTU_1024 3 /* 1-256,2-512,3-1024,4-2048 */ +#define MTU_2048 4 /* 1-256,2-512,3-1024,4-2048 */ + +// number of entries for each CQ and WQ +// how much do we need ? +#define NUM_CQE 1024 +#define NUM_WQE 1024 +#define MAX_OUT_SQ 64 +#define MAX_OUT_RQ 64 + +#define NUM_MBUF 256 +#define NUM_RDMA_RESERVED_ENTRY 128 +#define NUM_QPS 256 + +#define INVALID_WR_ID ((VAPI_wr_id_t) -1) + + +// for Vector IO +// scatter and gather +// Portals can support upto 64 IO-Vectors +// how much do we need ? +#define NUM_SGE 1 +#define NUM_SG 1 +#define NUM_CQ 1 + +#define ONE_KB 1024 +#define ONE_MB 1024 * ONE_KB +#define ONE_GB 1024 * ONE_MB + + +#define KB_4 1024 * 4 +#define KB_8 1024 * 8 +#define KB_16 1024 * 16 +#define KB_32 1024 * 32 +#define KB_64 1024 * 64 +#define KB_128 1024 * 128 +#define KB_256 1024 * 256 + +// 256 entry in registered buffer list +// small size message +#define Num_4_KB 64 +#define Num_8_KB 64 +#define Num_16_KB 40 +#define Num_32_KB 40 +#define Num_64_KB 40 +#define Num_128_KB 4 +#define Num_256_KB 4 + +#define SMALL_MSG_SIZE KB_32 + +#define MAX_MSG_SIZE ONE_MB * 512 + +// 128's 64KB bufer for send +// 128's 64KB bufer for recv +// used in RDAM operation only + +#define NUM_ENTRY 128 + +#define End_4_kb Num_4_KB +#define End_8_kb End_4_kb + Num_8_KB +#define End_16_kb End_8_kb + Num_16_KB +#define End_32_kb End_16_kb + Num_32_KB +#define End_64_kb End_32_kb + Num_64_KB +#define End_128_kb End_64_kb + Num_128_KB +#define End_256_kb End_128_kb+ Num_256_KB + + +#define SEND_BUF_SIZE KB_32 +#define RECV_BUF_SIZE SEND_BUF_SIZE + +// #define POLL_BASED_CQE_HANDLING 1 +#define EVENT_BASED_CQE_HANDLING 1 +#define IBNAL_SELF_TESTING 1 + +#ifdef IBNAL_SELF_TESTING +#undef IBNAL_SELF_TESTING +#endif + + +#define MSG_SIZE_SMALL 1 +#define MSG_SIZE_LARGE 2 + + + +// some defauly configuration values for early testing +#define DEFAULT_DLID 1 // default destination link ID +#define DEFAULT_QP_NUM 4 // default QP number +#define P_KEY 0xFFFF // do we need default value +#define PKEY_IX 0x0 // do we need default value +#define Q_KEY 0x012 // do we need default value +#define L_KEY 0x12345678 // do we need default value +#define R_KEY 0x87654321 // do we need default value +#define HCA_ID "InfiniHost0" // default +#define START_PSN 0 +#define START_SQ_PSN 0 +#define START_RQ_PSN 0 + + +#define __u_long_long unsigned long long + +#define IBNAL_DEBUG 1 + +#define USE_SHARED_MEMORY_AND_SOCKET 1 + +// operation type +#define TRY_SEND_ONLY 1 + +#define YES 1 +#define NO 0 + +// +// a common data structure for IB QP's operation +// each QP is associated with an QP_info structure +// +typedef struct QP_info +{ + VAPI_hca_hndl_t hca_hndl; // HCA handle + IB_port_t port; // port number + VAPI_qp_hndl_t qp_hndl; // QP's handle list + VAPI_qp_state_t qp_state; // QP's current state + VAPI_pd_hndl_t pd_hndl; // protection domain + VAPI_cq_hndl_t cq_hndl; // send-queue CQ's handle + VAPI_cq_hndl_t sq_cq_hndl; // send-queue CQ's handle + VAPI_cq_hndl_t rq_cq_hndl; // receive-queue CQ's handle + VAPI_ud_av_hndl_t av_hndl; // receive-queue CQ's handle + VAPI_qp_init_attr_t qp_init_attr; // QP's init attribute + VAPI_qp_attr_t qp_attr; // QP's attribute - dlid + VAPI_qp_prop_t qp_prop; // QP's propertities + VAPI_hca_port_t hca_port; + VAPI_qp_num_t qp_num; // QP's number + VAPI_qp_num_t rqp_num; // remote QP's number + IB_lid_t slid; + IB_lid_t dlid; + VAPI_gid_t src_gid; + + u_int32_t buf_size; + VAPI_virt_addr_t buf_addr; + char *bufptr; + VAPI_mrw_t mr; + VAPI_mr_hndl_t mr_hndl; + VAPI_virt_addr_t raddr; + VAPI_rkey_t rkey; + VAPI_lkey_t lkey; + + VAPI_wr_id_t last_posted_send_id; // user defined work request ID + VAPI_wr_id_t last_posted_rcv_id; // user defined work request ID + VAPI_mw_hndl_t mw_hndl; // memory window handle + VAPI_rkey_t mw_rkey; // memory window rkey + VAPI_sg_lst_entry_t sg_lst[256]; // scatter and gather list + int sg_list_sz; // set as NUM_SGE + VAPI_wr_id_t wr_id; // + spinlock_t snd_mutex; + spinlock_t rcv_mutex; + spinlock_t bl_mutex; + spinlock_t cln_mutex; + int cur_RDMA_outstanding; + int cur_send_outstanding; + int cur_posted_rcv_bufs; + int snd_rcv_balance; +} QP_info; + + +// buffer status +#define BUF_REGISTERED 0x10000000 +#define BUF_INUSE 0x01000000 +#define BUF_UNREGISTERED 0x00100000 + +// buffer type +#define REG_BUF 0x10000000 +#define RDMA_BUF 0x01000000 + +// +// IMM data +// +#define IMM_000 (0 << 32); +#define IMM_001 (1 << 32); +#define IMM_002 (2 << 32); +#define IMM_003 (3 << 32); +#define IMM_004 (4 << 32); +#define IMM_005 (5 << 32); +#define IMM_006 (6 << 32); +#define IMM_007 (7 << 32); +#define IMM_008 (8 << 32); +#define IMM_009 (9 << 32); +#define IMM_010 (10 << 32); +#define IMM_011 (11 << 32); +#define IMM_012 (12 << 32); +#define IMM_013 (13 << 32); +#define IMM_014 (14 << 32); +#define IMM_015 (15 << 32); +#define IMM_016 (16 << 32); +#define IMM_017 (17 << 32); +#define IMM_018 (18 << 32); +#define IMM_019 (19 << 32); +#define IMM_020 (20 << 32); +#define IMM_021 (21 << 32); +#define IMM_022 (22 << 32); +#define IMM_023 (23 << 32); +#define IMM_024 (24 << 32); +#define IMM_025 (25 << 32); +#define IMM_026 (26 << 32); +#define IMM_027 (27 << 32); +#define IMM_028 (28 << 32); +#define IMM_029 (29 << 32); +#define IMM_030 (30 << 32); +#define IMM_031 (31 << 32); + + + +typedef struct Memory_buffer_info{ + u_int32_t buf_size; + VAPI_virt_addr_t buf_addr; + char *bufptr; + VAPI_mrw_t mr; + VAPI_mr_hndl_t mr_hndl; + int status; + int ref_count; + int buf_type; + VAPI_virt_addr_t raddr; + VAPI_rkey_t rkey; + VAPI_lkey_t lkey; +} Memory_buffer_info; + +typedef struct RDMA_Info_Exchange { + int opcode; + int buf_length; + VAPI_mrw_t recv_rdma_mr; + VAPI_mr_hndl_t recv_rdma_mr_hndl; + VAPI_mrw_t send_rdma_mr; + VAPI_mr_hndl_t send_rdma_mr_hndl; + VAPI_virt_addr_t raddr; + VAPI_rkey_t rkey; + int flag; +} RDMA_Info_Exchange; + +// opcode for Rdma info exchange RTS/CTS +#define Ready_To_send 0x10000000 +#define Clear_To_send 0x01000000 + +#define RDMA_RTS_ID 5555 +#define RDMA_CTS_ID 7777 +#define RDMA_OP_ID 9999 +#define SEND_RECV_TEST_ID 2222 +#define SEND_RECV_TEST_BUF_ID 0 + +#define TEST_SEND_MESSAGE 0x00000001 +#define TEST_RECV_MESSAGE 0x00000002 + + +#define RTS_CTS_TIMEOUT 50 +#define RECEIVING_THREAD_TIMEOUT 50 +#define WAIT_FOR_SEND_BUF_TIMEOUT 50 + +#define IBNAL_DEBUG_LEVEL_1 0XFFFFFFFF +#define IBNAL_DEBUG_LEVEL_2 D_PORTALS | D_NET | D_WARNING | D_MALLOC | \ + D_ERROR | D_OTHER | D_TRACE | D_INFO + + +// flag for Rdma info exhange +#define RDMA_BUFFER_RESERVED 0x10000000 +#define RDMA_BUFFER_UNAVAILABLE 0x01000000 + + +// receiving data structure +typedef struct { + ptl_hdr_t *krx_buffer; // pointer to receiving buffer + unsigned long krx_len; // length of buffer + unsigned int krx_size; // + unsigned int krx_priority; // do we need this + struct list_head krx_item; +} kibnal_rx_t; + +// transmitting data structure +typedef struct { + nal_cb_t *ktx_nal; + void *ktx_private; + lib_msg_t *ktx_cookie; + char *ktx_buffer; + size_t ktx_len; + unsigned long ktx_size; + int ktx_ndx; + unsigned int ktx_priority; + unsigned int ktx_tgt_node; + unsigned int ktx_tgt_port_id; +} kibnal_tx_t; + + +typedef struct { + char kib_init; + char kib_shuttingdown; + IB_port_t port_num; // IB port information + struct list_head kib_list; + ptl_nid_t kib_nid; + nal_t *kib_nal; + nal_cb_t *kib_cb; + struct kib_trans *kib_trans; // do I need this + struct tq_struct kib_ready_tq; + spinlock_t kib_dispatch_lock; +} kibnal_data_t; + + +// +// A data structure for keeping the HCA information in system +// information related to HCA and hca_handle will be kept here +// +typedef struct HCA_Info +{ + VAPI_hca_hndl_t hca_hndl; // HCA handle + VAPI_pd_hndl_t pd_hndl; // protection domain + IB_port_t port; // port number + int num_qp; // number of qp used + QP_info *qp_ptr[NUM_QPS]; // point to QP_list + int num_cq; // number of cq used + VAPI_cq_hndl_t cq_hndl; + VAPI_cq_hndl_t sq_cq_hndl; + VAPI_cq_hndl_t rq_cq_hndl; + IB_lid_t dlid; + IB_lid_t slid; + kibnal_data_t *kib_data; // for PORTALS operations +} HCA_info; + + + + +// Remote HCA Info information +typedef struct Remote_HCA_Info { + unsigned long opcode; + unsigned long length; + IB_lid_t dlid[NUM_QPS]; + VAPI_qp_num_t rqp_num[NUM_QPS]; +} Remote_QP_Info; + +typedef struct Bucket_index{ + int start; + int end; +} Bucket_index; + +// functional prototypes +// infiniband initialization +int kib_init(kibnal_data_t *); + +// receiving thread +void kibnal_recv_thread(HCA_info *); +void recv_thread(HCA_info *); + +// forward data packet +void kibnal_fwd_packet (void *, kpr_fwd_desc_t *); + +// global data structures +extern kibnal_data_t kibnal_data; +extern ptl_handle_ni_t kibnal_ni; +extern nal_t kibnal_api; +extern nal_cb_t kibnal_lib; +extern QP_info QP_list[]; +extern QP_info CQ_list[]; +extern HCA_info Hca_data; +extern VAPI_hca_hndl_t Hca_hndl; +extern VAPI_pd_hndl_t Pd_hndl; +extern VAPI_hca_vendor_t Hca_vendor; +extern VAPI_hca_cap_t Hca_cap; +extern VAPI_hca_port_t Hca_port_1_props; +extern VAPI_hca_port_t Hca_port_2_props; +extern VAPI_hca_attr_t Hca_attr; +extern VAPI_hca_attr_mask_t Hca_attr_mask; +extern VAPI_cq_hndl_t Cq_SQ_hndl; +extern VAPI_cq_hndl_t Cq_RQ_hndl; +extern VAPI_cq_hndl_t Cq_hndl; +extern unsigned long User_Defined_Small_Msg_Size; +extern Remote_QP_Info L_HCA_RDMA_Info; +extern Remote_QP_Info R_HCA_RDMA_Info; +extern unsigned int Num_posted_recv_buf; +extern int R_RDMA_DATA_ARRIVED; +extern Memory_buffer_info MRbuf_list[]; +extern Memory_buffer_info MSbuf_list[]; +extern Bucket_index Bucket[]; +extern RDMA_Info_Exchange Rdma_info; +extern int Cts_Message_arrived; +extern RDMA_Info_Exchange Local_rdma_info; +extern spinlock_t MSB_mutex[]; + + + +// kernel NAL API function prototype +int kibnal_forward(nal_t *,int ,void *,size_t ,void *,size_t ); +void kibnal_lock(nal_t *, unsigned long *); +void kibnal_unlock(nal_t *, unsigned long *); +int kibnal_shutdown(nal_t *, int ); +void kibnal_yield( nal_t * ); +void kibnal_invalidate(nal_cb_t *,void *,size_t ,void *); +int kibnal_validate(nal_cb_t *,void *,size_t ,void **); + + + +nal_t *kibnal_init(int , ptl_pt_index_t , ptl_ac_index_t , ptl_pid_t ); +void __exit kibnal_finalize(void ); +VAPI_ret_t create_qp(QP_info *, int ); +VAPI_ret_t init_qp(QP_info *, int ); +VAPI_ret_t IB_Open_HCA(kibnal_data_t *); +VAPI_ret_t IB_Close_HCA(void ); +VAPI_ret_t createMemRegion(VAPI_hca_hndl_t, VAPI_pd_hndl_t); +VAPI_ret_t deleteMemRegion(QP_info *, int ); + +void ibnal_send_recv_self_testing(int *); + +int __init kibnal_initialize(void); + + + +/* CB NAL functions */ +int kibnal_send(nal_cb_t *, + void *, + lib_msg_t *, + ptl_hdr_t *, + int, + ptl_nid_t, + ptl_pid_t, + unsigned int, + ptl_kiov_t *, + size_t); + +int kibnal_send_pages(nal_cb_t *, + void *, + lib_msg_t *, + ptl_hdr_t *, + int, + ptl_nid_t, + ptl_pid_t, + unsigned int, + ptl_kiov_t *, + size_t); +int kibnal_recv(nal_cb_t *, void *, lib_msg_t *, + unsigned int, struct iovec *, size_t, size_t); +int kibnal_recv_pages(nal_cb_t *, void *, lib_msg_t *, + unsigned int, ptl_kiov_t *, size_t, size_t); +int kibnal_read(nal_cb_t *,void *,void *,user_ptr ,size_t ); +int kibnal_write(nal_cb_t *,void *,user_ptr ,void *,size_t ); +int kibnal_callback(nal_cb_t * , void *, lib_eq_t *, ptl_event_t *); +void *kibnal_malloc(nal_cb_t *,size_t ); +void kibnal_free(nal_cb_t *,void *,size_t ); +int kibnal_map(nal_cb_t *, unsigned int , struct iovec *, void **); +void kibnal_unmap(nal_cb_t *, unsigned int , struct iovec *, void **); +int kibnal_map_pages(nal_cb_t *, unsigned int , ptl_kiov_t *, void **); +void kibnal_unmap_pages(nal_cb_t * , unsigned int , ptl_kiov_t *, void **); +void kibnal_printf(nal_cb_t *, const char *, ...); +void kibnal_cli(nal_cb_t *,unsigned long *); +void kibnal_sti(nal_cb_t *,unsigned long *); +int kibnal_dist(nal_cb_t *,ptl_nid_t ,unsigned long *); + +void kibnal_fwd_packet (void *, kpr_fwd_desc_t *); +void kibnal_rx(kibnal_data_t *, + VAPI_virt_addr_t , + u_int32_t, + u_int32_t, + unsigned int); + +int kibnal_end(kibnal_data_t *); + +void async_event_handler(VAPI_hca_hndl_t , VAPI_event_record_t *,void *); + +void CQE_event_handler(VAPI_hca_hndl_t ,VAPI_cq_hndl_t , void *); + + +VAPI_ret_t Send_Small_Msg(char *, int ); +VAPI_ret_t Send_Large_Msg(char *, int ); + +VAPI_ret_t repost_recv_buf(QP_info *, VAPI_wr_id_t ); +int post_recv_bufs(VAPI_wr_id_t ); +int server_listen_thread(void *); +VAPI_wr_id_t RTS_handshaking_protocol(int ); +VAPI_wr_id_t CTS_handshaking_protocol(RDMA_Info_Exchange *); + +VAPI_ret_t createMemRegion_RDMA(VAPI_hca_hndl_t , + VAPI_pd_hndl_t , + char *, + int , + VAPI_mr_hndl_t *, + VAPI_mrw_t *); + + +VAPI_ret_t IB_Set_Event_Handler(HCA_info , kibnal_data_t *); + +VAPI_ret_t IB_Set_Async_Event_Handler(HCA_info ,kibnal_data_t *); + +VAPI_wr_id_t find_available_buf(int ); +VAPI_wr_id_t search_send_buf(int ); +VAPI_wr_id_t find_filler_list(int ,int ); +int insert_MRbuf_list(int ); + + +#endif /* _IBNAL_H */ diff --git a/lustre/portals/knals/ibnal/ibnal_cb.c b/lustre/portals/knals/ibnal/ibnal_cb.c new file mode 100644 index 0000000..2c07cc4 --- /dev/null +++ b/lustre/portals/knals/ibnal/ibnal_cb.c @@ -0,0 +1,1288 @@ +/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- + * vim:expandtab:shiftwidth=8:tabstop=8: + * + * Based on ksocknal and qswnal + * + * Author: Hsing-bung Chen + * + * This file is part of Portals, http://www.sf.net/projects/sandiaportals/ + * + * Portals is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * Portals is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Portals; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include "ibnal.h" + + + + +RDMA_Info_Exchange Rdma_nfo; +int Cts_Msg_Arrived = NO; + + +/* + * LIB functions follow + */ + +// +// read +// copy a block of data from scr_addr to dst_addr +// it all happens in kernel space - dst_addr and src_addr +// +// original definition is to read a block od data from a +// specified user address +// +// cb_read + +int kibnal_read (nal_cb_t *nal, + void *private, + void *dst_addr, + user_ptr src_addr, + size_t len) +{ + CDEBUG(D_NET, "kibnal_read: 0x%Lx: reading %ld bytes from %p -> %p\n", + nal->ni.nid, (long)len, src_addr, dst_addr ); + + memcpy( dst_addr, src_addr, len ); + + return 0; +} + +// +// it seems that read and write are doing the same thing +// because they all happen in kernel space +// why do we need two functions like read and write +// to make PORTALS API compatable +// + +// +// write +// copy a block of data from scr_addr to dst_addr +// it all happens in kernel space - dst_addr and src_addr +// +// original definition is to write a block od data to a +// specified user address +// +// cb_write + +int kibnal_write(nal_cb_t *nal, + void *private, + user_ptr dst_addr, + void *src_addr, + size_t len) +{ + CDEBUG(D_NET, "kibnal_write: 0x%Lx: writing %ld bytes from %p -> %p\n", + nal->ni.nid, (long)len, src_addr, dst_addr ); + + + memcpy( dst_addr, src_addr, len ); + + return 0; +} + +// +// malloc +// +// either vmalloc or kmalloc is used +// dynamically allocate a block of memory based on the size of buffer +// +// cb_malloc + +void * kibnal_malloc(nal_cb_t *nal, size_t length) +{ + void *buffer; + + // PORTAL_ALLOC will do the job + // allocate a buffer with size "length" + PORTAL_ALLOC(buffer, length); + + return buffer; +} + +// +// free +// release a dynamically allocated memory pointed by buffer pointer +// +// cb_free + +void kibnal_free(nal_cb_t *nal, void *buffer, size_t length) +{ + // + // release allocated buffer to system + // + PORTAL_FREE(buffer, length); +} + +// +// invalidate +// because evernthing is in kernel space (LUSTRE) +// there is no need to mark a piece of user memory as no longer in use by +// the system +// +// cb_invalidate + +void kibnal_invalidate(nal_cb_t *nal, + void *base, + size_t extent, + void *addrkey) +{ + // do nothing + CDEBUG(D_NET, "kibnal_invalidate: 0x%Lx: invalidating %p : %d\n", + nal->ni.nid, base, extent); + return; +} + + +// +// validate +// because everything is in kernel space (LUSTRE) +// there is no need to mark a piece of user memory in use by +// the system +// +// cb_validate + +int kibnal_validate(nal_cb_t *nal, + void *base, + size_t extent, + void **addrkey) +{ + // do nothing + CDEBUG(D_NET, "kibnal_validate: 0x%Lx: validating %p : %d\n", + nal->ni.nid, base, extent); + + return 0; +} + + +// +// log messages from kernel space +// printk() is used +// +// cb_printf + +void kibnal_printf(nal_cb_t *nal, const char *fmt, ...) +{ + va_list ap; + char msg[256]; + + if (portal_debug & D_NET) { + va_start( ap, fmt ); + vsnprintf( msg, sizeof(msg), fmt, ap ); + va_end( ap ); + + printk("CPUId: %d %s",smp_processor_id(), msg); + } +} + +// +// clear interrupt +// use spin_lock to lock protected area such as MD, ME... +// so a process can enter a protected area and do some works +// this won't physicall disable interrup but use a software +// spin-lock to control some protected areas +// +// cb_cli + +void kibnal_cli(nal_cb_t *nal, unsigned long *flags) +{ + kibnal_data_t *data= nal->nal_data; + + CDEBUG(D_NET, "kibnal_cli \n"); + + spin_lock_irqsave(&data->kib_dispatch_lock,*flags); + +} + +// +// set interrupt +// use spin_lock to unlock protected area such as MD, ME... +// this won't physicall enable interrup but use a software +// spin-lock to control some protected areas +// +// cb_sti + +void kibnal_sti(nal_cb_t *nal, unsigned long *flags) +{ + kibnal_data_t *data= nal->nal_data; + + CDEBUG(D_NET, "kibnal_sti \n"); + + spin_unlock_irqrestore(&data->kib_dispatch_lock,*flags); +} + + + +// +// nic distance +// +// network distance doesn't mean much for this nal +// here we only indicate +// 0 - operation is happened on the same node +// 1 - operation is happened on different nodes +// router will handle the data routing +// +// cb_dist + +int kibnal_dist(nal_cb_t *nal, ptl_nid_t nid, unsigned long *dist) +{ + CDEBUG(D_NET, "kibnal_dist \n"); + + if ( nal->ni.nid == nid ) { + *dist = 0; + } + else { + *dist = 1; + } + + return 0; // always retrun 0 +} + + +// +// This is the cb_send() on IB based interconnect system +// prepare a data package and use VAPI_post_sr() to send it +// down-link out-going message +// + + +int +kibnal_send(nal_cb_t *nal, + void *private, + lib_msg_t *cookie, + ptl_hdr_t *hdr, + int type, + ptl_nid_t nid, + ptl_pid_t pid, + unsigned int niov, + ptl_kiov_t *iov, + size_t len) +{ + + int rc=0; + void *buf = NULL; + unsigned long buf_length = sizeof(ptl_hdr_t) + len; + int expected_buf_size = 0; + VAPI_ret_t vstat; + + PROF_START(kibnal_send); // time stamp send start + + CDEBUG(D_NET,"kibnal_send: sending %d bytes from %p to nid: 0x%Lx pid %d\n", + buf_length, iov, nid, HCA_PORT_1); + + + // do I need to check the gateway information + // do I have problem to send direct + // do I have to forward a data packet to gateway + // + // The current connection is back-to-back + // I always know that data will be send from one-side to + // the other side + // + + // + // check data buffer size + // + // MSG_SIZE_SMALL + // regular post send + // + // MSG_SIZE_LARGE + // rdma write + + if(buf_length <= SMALL_MSG_SIZE) { + expected_buf_size = MSG_SIZE_SMALL; + } + else { + if(buf_length > MAX_MSG_SIZE) { + CERROR("kibnal_send:request exceeds Transmit data size (%d).\n", + MAX_MSG_SIZE); + rc = -1; + return rc; + } + else { + expected_buf_size = MSG_SIZE_LARGE; // this is a large data package + } + } + + // prepare data packet for send operation + // + // allocate a data buffer "buf" with size of buf_len(header + payload) + // --------------- + // buf | hdr | size = sizeof(ptl_hdr_t) + // -------------- + // |payload data | size = len + // --------------- + + // copy header to buf + memcpy(buf, hdr, sizeof(ptl_hdr_t)); + + // copy payload data from iov to buf + // use portals library function lib_copy_iov2buf() + + if (len != 0) + lib_copy_iov2buf(((char *)buf) + sizeof (ptl_hdr_t), + niov, + iov, + len); + + // buf is ready to do a post send + // the send method is base on the buf_size + + CDEBUG(D_NET,"ib_send %d bytes (size %d) from %p to nid: 0x%Lx " + " port %d\n", buf_length, expected_buf_size, iov, nid, HCA_PORT_1); + + switch(expected_buf_size) { + case MSG_SIZE_SMALL: + // send small message + if((vstat = Send_Small_Msg(buf, buf_length)) != VAPI_OK){ + CERROR("Send_Small_Msg() is failed\n"); + } + break; + + case MSG_SIZE_LARGE: + // send small message + if((vstat = Send_Large_Msg(buf, buf_length)) != VAPI_OK){ + CERROR("Send_Large_Msg() is failed\n"); + } + break; + + default: + CERROR("Unknown message size %d\n", expected_buf_size); + break; + } + + PROF_FINISH(kibnal_send); // time stapm of send operation + + rc = 1; + + return rc; +} + +// +// kibnal_send_pages +// +// no support +// +// do you need this +// +int kibnal_send_pages(nal_cb_t * nal, + void *private, + lib_msg_t * cookie, + ptl_hdr_t * hdr, + int type, + ptl_nid_t nid, + ptl_pid_t pid, + unsigned int niov, + ptl_kiov_t *iov, + size_t mlen) +{ + int rc = 1; + + CDEBUG(D_NET, "kibnal_send_pages\n"); + + // do nothing now for Infiniband + + return rc; +} + + + + + +// +// kibnal_fwd_packet +// +// no support +// +// do you need this +// +void kibnal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd) +{ + CDEBUG(D_NET, "forwarding not implemented\n"); + return; + +} + +// +// kibnal_callback +// +// no support +// +// do you need this +// +int kibnal_callback(nal_cb_t * nal, + void *private, + lib_eq_t *eq, + ptl_event_t *ev) +{ + CDEBUG(D_NET, "callback not implemented\n"); + return PTL_OK; +} + + +/* Process a received portals packet */ +// +// conver receiving data in to PORTALS header +// + +void kibnal_rx(kibnal_data_t *kib, + VAPI_virt_addr_t buffer_addr, + u_int32_t buffer_len, + u_int32_t buffer_size, + unsigned int priority) +{ + ptl_hdr_t *hdr = (ptl_hdr_t *) buffer_addr; // case to ptl header format + kibnal_rx_t krx; + + CDEBUG(D_NET,"kibnal_rx: buf %p, len %ld\n", buffer_addr, buffer_len); + + if ( buffer_len < sizeof( ptl_hdr_t ) ) { + /* XXX what's this for? */ + if (kib->kib_shuttingdown) + return; + CERROR("kibnal_rx: did not receive complete portal header, " + "len= %ld", buffer_len); + + return; + } + + // typedef struct { + // char *krx_buffer; // pointer to receiving buffer + // unsigned long krx_len; // length of buffer + // unsigned int krx_size; // + // unsigned int krx_priority; // do we need this + // struct list_head krx_item; + // } kibnal_rx_t; + // + krx.krx_buffer = hdr; + krx.krx_len = buffer_len; + krx.krx_size = buffer_size; + krx.krx_priority = priority; + + if ( hdr->dest_nid == kibnal_lib.ni.nid ) { + // this is my data + PROF_START(lib_parse); + + lib_parse(&kibnal_lib, (ptl_hdr_t *)krx.krx_buffer, &krx); + + PROF_FINISH(lib_parse); + } else { + /* forward to gateway */ + // Do we expect this happened ? + // + CERROR("kibnal_rx: forwarding not implemented yet"); + } + + return; +} + + + + +// +// kibnal_recv_pages +// +// no support +// +// do you need this +// +int +kibnal_recv_pages(nal_cb_t * nal, + void *private, + lib_msg_t * cookie, + unsigned int niov, + ptl_kiov_t *iov, + size_t mlen, + size_t rlen) +{ + + CDEBUG(D_NET, "recv_pages not implemented\n"); + return PTL_OK; + +} + + +int +kibnal_recv(nal_cb_t *nal, + void *private, + lib_msg_t *cookie, + unsigned int niov, + struct iovec *iov, + size_t mlen, + size_t rlen) +{ + kibnal_rx_t *krx = private; + + CDEBUG(D_NET,"kibnal_recv: mlen=%d, rlen=%d\n", mlen, rlen); + + /* What was actually received must be >= what sender claims to + * have sent. This is an LASSERT, since lib-move doesn't + * check cb return code yet. */ + LASSERT (krx->krx_len >= sizeof (ptl_hdr_t) + rlen); + LASSERT (mlen <= rlen); + + PROF_START(kibnal_recv); + + if(mlen != 0) { + PROF_START(memcpy); + lib_copy_buf2iov (niov, iov, krx->krx_buffer + + sizeof (ptl_hdr_t), mlen); + PROF_FINISH(memcpy); + } + + PROF_START(lib_finalize); + + lib_finalize(nal, private, cookie); + + PROF_FINISH(lib_finalize); + PROF_FINISH(kibnal_recv); + + return rlen; +} + +// +// kibnal_map +// no support +// do you need this +// +int kibnal_map(nal_cb_t * nal, + unsigned int niov, + struct iovec *iov, + void **addrkey) +{ + CDEBUG(D_NET, "map not implemented\n"); + return PTL_OK; +} + + + +// +// kibnal_unmap +// +// no support +// +// do you need this +// +void kibnal_unmap(nal_cb_t * nal, + unsigned int niov, + struct iovec *iov, + void **addrkey) +{ + CDEBUG(D_NET, "unmap not implemented\n"); + return; +} + + + +// +// kibnal_map_pages +// no support +// do you need this +/* as (un)map, but with a set of page fragments */ +int kibnal_map_pages(nal_cb_t * nal, + unsigned int niov, + ptl_kiov_t *iov, + void **addrkey) +{ + CDEBUG(D_NET, "map_pages not implemented\n"); + return PTL_OK; +} + + + +// +// kibnal_unmap_pages +// +// no support +// +// do you need this +// +void kibnal_unmap_pages(nal_cb_t * nal, + unsigned int niov, + ptl_kiov_t *iov, + void **addrkey) +{ + CDEBUG(D_NET, "unmap_pages not implemented\n"); + return ; +} + + +int kibnal_end(kibnal_data_t *kib) +{ + + /* wait for sends to finish ? */ + /* remove receive buffers */ + /* shutdown receive thread */ + + CDEBUG(D_NET, "kibnal_end\n"); + IB_Close_HCA(); + + return 0; +} + + +// +// +// asynchronous event handler: response to some unexpetced operation errors +// +// void async_event_handler(VAPI_hca_hndl_t hca_hndl, +// VAPI_event_record_t *event_record_p, +// void* private_data) +// the HCA drive will prepare evetn_record_p +// +// this handler is registered with VAPI_set_async_event_handler() +// VAPI_set_async_event_handler() is issued when an HCA is created +// +// +void async_event_handler(VAPI_hca_hndl_t hca_hndl, + VAPI_event_record_t *event_record_p, + void* private_data) +{ + // + // * event_record_p is prepared by the system when an async + // event happened + // * what to do with private_data + // * do we expect more async events happened if so what are they + // + // only log ERROR message now + + switch (event_record_p->type) { + case VAPI_PORT_ERROR: + printk("Got PORT_ERROR event. port number=%d\n", + event_record_p->modifier.port_num); + break; + case VAPI_PORT_ACTIVE: + printk("Got PORT_ACTIVE event. port number=%d\n", + event_record_p->modifier.port_num); + break; + case VAPI_QP_PATH_MIGRATED: /*QP*/ + printk("Got P_PATH_MIGRATED event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_EEC_PATH_MIGRATED: /*EEC*/ + printk("Got EEC_PATH_MIGRATED event. eec_hndl=%d\n", + event_record_p->modifier.eec_hndl); + break; + case VAPI_QP_COMM_ESTABLISHED: /*QP*/ + printk("Got QP_COMM_ESTABLISHED event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_EEC_COMM_ESTABLISHED: /*EEC*/ + printk("Got EEC_COMM_ESTABLISHED event. eec_hndl=%d\n", + event_record_p->modifier.eec_hndl); + break; + case VAPI_SEND_QUEUE_DRAINED: /*QP*/ + printk("Got SEND_QUEUE_DRAINED event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_CQ_ERROR: /*CQ*/ + printk("Got CQ_ERROR event. cq_hndl=%lu\n", + event_record_p->modifier.cq_hndl); + break; + case VAPI_LOCAL_WQ_INV_REQUEST_ERROR: /*QP*/ + printk("Got LOCAL_WQ_INV_REQUEST_ERROR event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR: /*QP*/ + printk("Got LOCAL_WQ_ACCESS_VIOL_ERROR event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR: /*QP*/ + printk("Got LOCAL_WQ_CATASTROPHIC_ERROR event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_PATH_MIG_REQ_ERROR: /*QP*/ + printk("Got PATH_MIG_REQ_ERROR event. qp_hndl=%lu\n", + event_record_p->modifier.qp_hndl); + break; + case VAPI_LOCAL_CATASTROPHIC_ERROR: /*none*/ + printk("Got LOCAL_CATASTROPHIC_ERROR event. \n"); + break; + default: + printk(":got non-valid event type=%d. IGNORING\n", + event_record_p->type); + } + +} + + + + +VAPI_wr_id_t +search_send_buf(int buf_length) +{ + VAPI_wr_id_t send_id = -1; + u_int32_t i; + int flag = NO; + int loop_count = 0; + + CDEBUG(D_NET, "search_send_buf \n"); + + while((flag == NO) && (loop_count < MAX_LOOP_COUNT)) { + for(i=0; i < NUM_ENTRY; i++) { + // problem about using spinlock + spin_lock(&MSB_mutex[i]); + if(MSbuf_list[i].status == BUF_REGISTERED) { + MSbuf_list[i].status = BUF_INUSE;// make send buf as inuse + flag = YES; + spin_unlock(&MSB_mutex[i]); + break; + } + else + spin_unlock(&MSB_mutex[i]); + } + + loop_count++; + schedule_timeout(200); // wait for a while + } + + if(flag == NO) { + CDEBUG(D_NET, "search_send_buf: could not locate an entry in MSbuf_list\n"); + } + + send_id = (VAPI_wr_id_t ) i; + + return send_id; +} + + + +VAPI_wr_id_t +search_RDMA_recv_buf(int buf_length) +{ + VAPI_wr_id_t recv_id = -1; + u_int32_t i; + int flag = NO; + int loop_count = 0; + + CDEBUG(D_NET, "search_RDMA_recv_buf\n"); + + while((flag == NO) && (loop_count < MAX_LOOP_COUNT)) { + + for(i=NUM_ENTRY; i < NUM_MBUF; i++) { + + spin_lock(&MSB_mutex[i]); + + if((MRbuf_list[i].status == BUF_REGISTERED) && + (MRbuf_list[i].buf_size >= buf_length)) { + MSbuf_list[i].status = BUF_INUSE;// make send buf as inuse + flag = YES; + spin_unlock(&MSB_mutex[i]); + break; + } + else + spin_unlock(&MSB_mutex[i]); + } + + loop_count++; + + schedule_timeout(200); // wait for a while + } + + if(flag == NO) { + CERROR("search_RDMA_recv_buf: could not locate an entry in MBbuf_list\n"); + } + + recv_id = (VAPI_wr_id_t ) i; + + return recv_id; + +} + + + + + + + +VAPI_ret_t Send_Small_Msg(char *buf, int buf_length) +{ + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + QP_info *qp; + VAPI_wr_id_t send_id; + + CDEBUG(D_NET, "Send_Small_Msg\n"); + + send_id = search_send_buf(buf_length); + + if(send_id < 0){ + CERROR("Send_Small_Msg: Can not find a QP \n"); + return(~VAPI_OK); + } + + qp = &QP_list[(int) send_id]; + + // find a suitable/registered send_buf from MSbuf_list + CDEBUG(D_NET, "Send_Small_Msg: current send id %d \n", send_id); + + sr_desc.opcode = VAPI_SEND; + sr_desc.comp_type = VAPI_SIGNALED; + sr_desc.id = send_id; + + + // scatter and gather info + sr_sg.len = buf_length; + sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR + + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr; + + // copy data to register send buffer + memcpy(&sr_sg.addr, buf, buf_length); + + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + // call VAPI_post_sr to send out this data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + CERROR("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat)); + } + + CDEBUG(D_NET, "VAPI_post_sr success.\n"); + + return (vstat); + +} + + + + +VAPI_wr_id_t +RTS_handshaking_protocol(int buf_length) +{ + + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + VAPI_wr_id_t send_id; + + RDMA_Info_Exchange rdma_info; + + rdma_info.opcode = Ready_To_send; + rdma_info.buf_length = buf_length; + rdma_info.raddr = (VAPI_virt_addr_t) 0; + rdma_info.rkey = (VAPI_rkey_t) 0 ; + + QP_info *qp; + + CDEBUG(D_NET, "RTS_handshaking_protocol\n"); + + // find a suitable/registered send_buf from MSbuf_list + send_id = search_send_buf(sizeof(RDMA_Info_Exchange)); + + qp = &QP_list[(int) send_id]; + + CDEBUG(D_NET, "RTS_CTS: current send id %d \n", send_id); + sr_desc.opcode = VAPI_SEND; + sr_desc.comp_type = VAPI_SIGNALED; + sr_desc.id = send_id + RDMA_RTS_ID;// this RTS mesage ID + + // scatter and gather info + sr_sg.len = sizeof(RDMA_Info_Exchange); + sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr; + + // copy data to register send buffer + memcpy(&sr_sg.addr, &rdma_info, sizeof(RDMA_Info_Exchange)); + + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + // call VAPI_post_sr to send out this RTS message data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + CERROR("RTS: VAPI_post_sr failed (%s).\n",VAPI_strerror_sym(vstat)); + } + + return send_id; + +} + + + +// create local receiving Memory Region for a HCA +VAPI_ret_t +createMemRegion_RDMA(VAPI_hca_hndl_t hca_hndl, + VAPI_pd_hndl_t pd_hndl, + char *bufptr, + int buf_length, + VAPI_mr_hndl_t *rep_mr_hndl, + VAPI_mrw_t *rep_mr) +{ + VAPI_ret_t vstat; + VAPI_mrw_t mrw; + + CDEBUG(D_NET, "createMemRegion_RDMA\n"); + + // memory region address and size of memory region + // allocate a block of memory for this HCA + // RDMA data buffer + + + if(bufptr == NULL) { + // need to allcate a local buffer to receive data from a + // remore VAPI_RDMA_WRITE_IMM + PORTAL_ALLOC(bufptr, buf_length); + } + + if(bufptr == NULL) { + CDEBUG(D_MALLOC, "Failed to malloc a block of RDMA receiving memory, size %d\n", + buf_length); + return(VAPI_ENOMEM); + } + + /* Register RDAM data Memory region */ + CDEBUG(D_NET, "Register a RDMA data memory region\n"); + + mrw.type = VAPI_MR; + mrw.pd_hndl= pd_hndl; + mrw.start = (VAPI_virt_addr_t )(MT_virt_addr_t )bufptr; + mrw.size = buf_length; + mrw.acl = VAPI_EN_LOCAL_WRITE | + VAPI_EN_REMOTE_WRITE | + VAPI_EN_REMOTE_READ; + + // register send memory region + vstat = VAPI_register_mr(hca_hndl, + &mrw, + rep_mr_hndl, + rep_mr); + + // this memory region is going to be reused until deregister is called + if (vstat != VAPI_OK) { + CERROR("Failed registering a mem region Addr=%p, Len=%d. %s\n", + bufptr, buf_length, VAPI_strerror(vstat)); + } + + return(vstat); + +} + + + +RDMA_Info_Exchange Local_rdma_info; + +int insert_MRbuf_list(int buf_lenght) +{ + int recv_id = NUM_ENTRY; + + CDEBUG(D_NET, "insert_MRbuf_list\n"); + + for(recv_id= NUM_ENTRY; recv_id < NUM_MBUF; recv_id++){ + if(BUF_UNREGISTERED == MRbuf_list[recv_id].status) { + MRbuf_list[recv_id].status = BUF_UNREGISTERED; + MRbuf_list[recv_id].buf_size = buf_lenght; + break; + } + } + + return recv_id; + +} + +VAPI_wr_id_t +CTS_handshaking_protocol(RDMA_Info_Exchange *rdma_info) +{ + + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + QP_info *qp; + VAPI_wr_id_t send_id; + VAPI_mr_hndl_t rep_mr_hndl; + VAPI_mrw_t rep_mr; + int recv_id; + char *bufptr = NULL; + + // search MRbuf_list for an available entry that + // has registered data buffer with size equal to rdma_info->buf_lenght + + CDEBUG(D_NET, "CTS_handshaking_protocol\n"); + + // register memory buffer for RDAM operation + + vstat = createMemRegion_RDMA(Hca_hndl, + Pd_hndl, + bufptr, + rdma_info->buf_length, + &rep_mr_hndl, + &rep_mr); + + + Local_rdma_info.opcode = Clear_To_send; + Local_rdma_info.recv_rdma_mr = rep_mr; + Local_rdma_info.recv_rdma_mr_hndl = rep_mr_hndl; + + if (vstat != VAPI_OK) { + CERROR("CST_handshaking_protocol: Failed registering a mem region" + "Len=%d. %s\n", rdma_info->buf_length, VAPI_strerror(vstat)); + Local_rdma_info.flag = RDMA_BUFFER_UNAVAILABLE; + } + else { + // successfully allcate reserved RDAM data buffer + recv_id = insert_MRbuf_list(rdma_info->buf_length); + + if(recv_id >= NUM_ENTRY) { + MRbuf_list[recv_id].buf_addr = rep_mr.start; + MRbuf_list[recv_id].mr = rep_mr; + MRbuf_list[recv_id].mr_hndl = rep_mr_hndl; + MRbuf_list[recv_id].ref_count = 0; + Local_rdma_info.flag = RDMA_BUFFER_RESERVED; + Local_rdma_info.buf_length = rdma_info->buf_length; + Local_rdma_info.raddr = rep_mr.start; + Local_rdma_info.rkey = rep_mr.r_key; + } + else { + CERROR("Can not find an entry in MRbuf_list - how could this happen\n"); + } + } + + // find a suitable/registered send_buf from MSbuf_list + send_id = search_send_buf(sizeof(RDMA_Info_Exchange)); + CDEBUG(D_NET, "CTS: current send id %d \n", send_id); + sr_desc.opcode = VAPI_SEND; + sr_desc.comp_type = VAPI_SIGNALED; + sr_desc.id = send_id + RDMA_CTS_ID; // this CST message ID + + // scatter and gather info + sr_sg.len = sizeof(RDMA_Info_Exchange); + sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr; + + // copy data to register send buffer + memcpy(&sr_sg.addr, &Local_rdma_info, sizeof(RDMA_Info_Exchange)); + + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + // call VAPI_post_sr to send out this RTS message data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + CERROR("CTS: VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat)); + } + + +} + + + +VAPI_ret_t Send_Large_Msg(char *buf, int buf_length) +{ + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + QP_info *qp; + VAPI_mrw_t rep_mr; + VAPI_mr_hndl_t rep_mr_hndl; + int send_id; + VAPI_imm_data_t imm_data = 0XAAAA5555; + + + CDEBUG(D_NET, "Send_Large_Msg: Enter\n"); + + // register this large buf + // don't need to copy this buf to send buffer + vstat = createMemRegion_RDMA(Hca_hndl, + Pd_hndl, + buf, + buf_length, + &rep_mr_hndl, + &rep_mr); + + if (vstat != VAPI_OK) { + CERROR("Send_Large_M\sg: createMemRegion_RDMAi() failed (%s).\n", + VAPI_strerror(vstat)); + } + + + Local_rdma_info.send_rdma_mr = rep_mr; + Local_rdma_info.send_rdma_mr_hndl = rep_mr_hndl; + + // + // Prepare descriptor for send queue + // + + // ask for a remote rdma buffer with size buf_lenght + send_id = RTS_handshaking_protocol(buf_length); + + qp = &QP_list[send_id]; + + // wait for CTS message receiving from remote node + while(1){ + if(YES == Cts_Message_arrived) { + // receive CST message from remote node + // Rdma_info is available for use + break; + } + schedule_timeout(RTS_CTS_TIMEOUT); + } + + sr_desc.id = send_id + RDMA_OP_ID; + sr_desc.opcode = VAPI_RDMA_WRITE_WITH_IMM; + sr_desc.comp_type = VAPI_SIGNALED; + + // scatter and gather info + sr_sg.len = buf_length; + + // rdma mr + sr_sg.lkey = rep_mr.l_key; + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) rep_mr.start; + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + + // immediate data - not used here + sr_desc.imm_data = imm_data; + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + // RDAM operation only + // raddr and rkey is receiving from remote node + sr_desc.remote_addr = Rdma_info.raddr; + sr_desc.r_key = Rdma_info.rkey; + + // call VAPI_post_sr to send out this data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + CERROR("VAPI_post_sr failed (%s).\n",VAPI_strerror_sym(vstat)); + } + +} + + + + + + +// +// repost_recv_buf +// post a used recv buffer back to recv WQE list +// wrq_id is used to indicate the starting position of recv-buffer +// +VAPI_ret_t +repost_recv_buf(QP_info *qp, + VAPI_wr_id_t wrq_id) +{ + VAPI_rr_desc_t rr; + VAPI_sg_lst_entry_t sg_entry; + VAPI_ret_t ret; + + CDEBUG(D_NET, "repost_recv_buf\n"); + + sg_entry.lkey = MRbuf_list[wrq_id].mr.l_key; + sg_entry.len = MRbuf_list[wrq_id].buf_size; + sg_entry.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[wrq_id].buf_addr; + rr.opcode = VAPI_RECEIVE; + rr.comp_type = VAPI_SIGNALED; /* All with CQE (IB compliant) */ + rr.sg_lst_len = 1; /* single buffers */ + rr.sg_lst_p = &sg_entry; + rr.id = wrq_id; /* WQE id used is the index to buffers ptr array */ + + ret= VAPI_post_rr(qp->hca_hndl,qp->qp_hndl,&rr); + + if (ret != VAPI_OK){ + CERROR("failed reposting RQ WQE (%s) buffer \n",VAPI_strerror_sym(ret)); + return ret; + } + + CDEBUG(D_NET, "Successfully reposting an RQ WQE %d recv bufer \n", wrq_id); + + return ret ; +} + +// +// post_recv_bufs +// post "num_o_bufs" for receiving data +// each receiving buf (buffer starting address, size of buffer) +// each buffer is associated with an id +// +int +post_recv_bufs(VAPI_wr_id_t start_id) +{ + int i; + VAPI_rr_desc_t rr; + VAPI_sg_lst_entry_t sg_entry; + VAPI_ret_t ret; + + CDEBUG(D_NET, "post_recv_bufs\n"); + + for(i=0; i< NUM_ENTRY; i++) { + sg_entry.lkey = MRbuf_list[i].mr.l_key; + sg_entry.len = MRbuf_list[i].buf_size; + sg_entry.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[i].buf_addr; + rr.opcode = VAPI_RECEIVE; + rr.comp_type = VAPI_SIGNALED; /* All with CQE (IB compliant) */ + rr.sg_lst_len = 1; /* single buffers */ + rr.sg_lst_p = &sg_entry; + rr.id = start_id+i; /* WQE id used is the index to buffers ptr array */ + + ret= VAPI_post_rr(QP_list[i].hca_hndl,QP_list[i].qp_hndl, &rr); + if (ret != VAPI_OK) { + CERROR("failed posting RQ WQE (%s)\n",VAPI_strerror_sym(ret)); + return i; + } + } + + return i; /* num of buffers posted */ +} + +int +post_RDMA_bufs(QP_info *qp, + void *buf_array, + unsigned int num_bufs, + unsigned int buf_size, + VAPI_wr_id_t start_id) +{ + + CDEBUG(D_NET, "post_RDMA_bufs \n"); + return YES; +} + + + +// +// LIB NAL +// assign function pointers to theirs corresponding entries +// + +nal_cb_t kibnal_lib = { + nal_data: &kibnal_data, /* NAL private data */ + cb_send: kibnal_send, + cb_send_pages: NULL, // not implemented + cb_recv: kibnal_recv, + cb_recv_pages: NULL, // not implemented + cb_read: kibnal_read, + cb_write: kibnal_write, + cb_callback: NULL, // not implemented + cb_malloc: kibnal_malloc, + cb_free: kibnal_free, + cb_map: NULL, // not implemented + cb_unmap: NULL, // not implemented + cb_map_pages: NULL, // not implemented + cb_unmap_pages: NULL, // not implemented + cb_printf: kibnal_printf, + cb_cli: kibnal_cli, + cb_sti: kibnal_sti, + cb_dist: kibnal_dist // no used at this moment +}; diff --git a/lustre/portals/knals/ibnal/ibnal_send_recv_self_testing.c b/lustre/portals/knals/ibnal/ibnal_send_recv_self_testing.c new file mode 100644 index 0000000..82defdb --- /dev/null +++ b/lustre/portals/knals/ibnal/ibnal_send_recv_self_testing.c @@ -0,0 +1,116 @@ +/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- + * vim:expandtab:shiftwidth=8:tabstop=8: + * * + * * Based on ksocknal, qswnal, and gmnal + * * + * * Copyright (C) 2003 LANL + * * Author: HB Chen + * * Los Alamos National Lab + * * + * * Portals is free software; you can redistribute it and/or + * * modify it under the terms of version 2 of the GNU General Public + * * License as published by the Free Software Foundation. + * * + * * Portals is distributed in the hope that it will be useful, + * * but WITHOUT ANY WARRANTY; without even the implied warranty of + * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * * GNU General Public License for more details. + * * + * * You should have received a copy of the GNU General Public License + * * along with Portals; if not, write to the Free Software + * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * * + * */ + +#include "ibnal.h" + + + +VAPI_ret_t ibnal_send_recv_self_testing() +{ + VAPI_ret_t vstat; + VAPI_sr_desc_t sr_desc; + VAPI_sg_lst_entry_t sr_sg; + QP_info *qp; + VAPI_wr_id_t send_id; + int buf_id; + char sbuf[KB_32]; + char rbuf[KB_32]; + int i; + int buf_length = KB_32; + VAPI_wc_desc_t comp_desc; + int num_send = 1; + int loop_count = 0; + + + printk("ibnal_send_recv_self_testing\n"); + + memset(&sbuf, 'a', KB_32); + memset(&rbuf, ' ', KB_32); + + send_id = 2222; + buf_id = 0; + + qp = &QP_list[0]; + + sr_desc.opcode = VAPI_SEND; + sr_desc.comp_type = VAPI_SIGNALED; + + // scatter and gather info + sr_sg.len = KB_32; + sr_sg.lkey = MSbuf_list[buf_id].mr.l_key; // use send MR + sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[buf_id].buf_addr; + + // copy data to register send buffer + memcpy(&sr_sg.addr, &buf, buf_length); + + sr_desc.sg_lst_p = &sr_sg; + sr_desc.sg_lst_len = 1; // only 1 entry is used + sr_desc.fence = TRUE; + sr_desc.set_se = FALSE; + + + // call VAPI_post_sr to send out this data + vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc); + + if (vstat != VAPI_OK) { + printk("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat)); + } + + printk("VAPI_post_sr success.\n"); + + // poll for completion + + while( loop_count < 100 ){ + vstat = VAPI_poll_cq(qp->hca_hndl, qp->cq_hndl, &comp_desc); + if( vstat == VAPI_OK ) { + if(comp_desc.opcode == VAPI_CQE_SQ_SEND_DATA ) { + /* SEND completion */ + printk("received SQ completion\n"); + } + else { + if(comp_desc.opcode == VAPI_CQE_RQ_SEND_DATA ) { + /* RECEIVE completion */ + printk("received RQ completion\n"); + memcpy(&rbuf, (char *) MRbuf_list[buf_id].buf_addar, KB_32); + + int n; + + n = memcmp($sbuf, &rbuf, KB_32); + printk("compare sbuf and rbuf n = %d\n", n); + + } + else { + printk("unexpected completion opcode %d \n", comp_desc.opcode); + } + } + } + + loop_count++; + schedule_timeout(500); + } + + printk("end of ibnal_self_send_recv_testing\n"); + + +} diff --git a/lustre/portals/knals/ibnal/uagent.c b/lustre/portals/knals/ibnal/uagent.c new file mode 100644 index 0000000..d7e939a --- /dev/null +++ b/lustre/portals/knals/ibnal/uagent.c @@ -0,0 +1,391 @@ +#include +#include +#include + + +#include +#include +#include +#include + +#include +#include +#include +#include + +// Infiniband VAPI/EVAPI header files Mellanox MT23108 VAPI +#include +#include +#include +#include + +// Remote HCA Info information + typedef struct Remote_HCA_Info { + unsigned long opcode; + unsigned long length; + IB_lid_t dlid[256]; + VAPI_qp_num_t rqp_num[256]; + VAPI_rkey_t rkey; // for remote RDAM request + unsigned long vaddr1; // virtual address fisrt 4 bytes + unsigned long vaddr2; // virtual address second 4 bytes + u_int32_t size; // size of RDMA memory buffer + char dest_ip[256]; //destination server IP address + } Remote_HCA_Info; + +#define SHARED_SEGMENT_SIZE 0x10000 // 16KB shared memory between U and K + +// some internals opcodes for IB operations used in IBNAL +#define SEND_QP_INFO 0X00000001 +#define RECV_QP_INFO 0X00000010 +#define DEFAULT_SOCKET_PORT 11211 +#define LISTEN_QUEUE_SIZE 2048 +#define DEST_IP "10.128.105.26" + +// server_thread +// + wait for an incoming connection from remote node +// + receive remote HCA's data +// +// +// +// +// +void *server_thread(void *vargp) +{ + Remote_HCA_Info *hca_data; + Remote_HCA_Info hca_data_buffer; + + int serverfd; + int infd; + struct hostent *hp; + struct sockaddr_in serveraddr; + struct sockaddr_in clientaddr; + int sin_size=sizeof(struct sockaddr_in); + int bytes_recv; + int i; + + + hca_data = (Remote_HCA_Info *) vargp; + + if((serverfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + printf("server_thread couldnot create a socket \n"); + pthread_exit((void *) 0); + } + + printf("server_thread create a socket \n"); + + bzero((char *) &serveraddr, sizeof(serveraddr)); + + serveraddr.sin_family = AF_INET; + serveraddr.sin_addr.s_addr = htons(INADDR_ANY); + serveraddr.sin_port = htons((unsigned short) DEFAULT_SOCKET_PORT); + + if(bind(serverfd,(struct sockaddr *)&serveraddr,sizeof(struct sockaddr)) < 0) { + printf("server_thread couldnot bind to a socket \n"); + pthread_exit((void *) 0); + } + + printf("server_thread bind to a socket \n"); + + if(listen(serverfd, LISTEN_QUEUE_SIZE) < 0) { + printf("server_thread couldnot listen to a socket \n"); + pthread_exit((void *) 0); + } + + printf("server_thread listen to a socket \n"); + + // + // I only expect to receive one HCA data from a remote HCA + // + printf("server_thread: Waiting for a connection\n"); + infd= accept(serverfd,(struct sockaddr*)&clientaddr,&sin_size); + printf("server_thread: Got an incoming connection"); + + /* receive data from socket into buffer */ + bytes_recv = recv(infd, + &hca_data_buffer, + sizeof(Remote_HCA_Info), + 0); + + if(bytes_recv > 0) { +/* + printf("server_thread receive data\n"); + printf("opcode is 0x%X\n", hca_data_buffer.opcode); + printf("length is 0x%X\n", hca_data_buffer.length); + + for(i=0; i < 256; i++) { + printf("dlid %d is 0x%X\n", i, hca_data_buffer.dlid[i]); + printf("rqp_num %d is 0x%X\n", hca_data_buffer.rqp_num[i]); + } + + printf("rkey is 0x%X\n", hca_data_buffer.rkey); + printf("vaddr1 is 0x%X\n", hca_data_buffer.vaddr1); + printf("vaddr2 is 0x%X\n", hca_data_buffer.vaddr2); + printf("size is 0x%X\n", hca_data_buffer.size); + printf("After conversion hton \n"); + printf("opcode is 0x%X\n", htonl(hca_data_buffer.opcode)); + printf("length is 0x%X\n", htonl(hca_data_buffer.length)); + + for(i=0; i < 256; i++) { + printf("dlid %d is 0x%X\n", htons(hca_data_buffer.dlid[i])); + printf("rqp_num %d is 0x%X\n", htonl(hca_data_buffer.rqp_num[i])); + } + + printf("rkey is 0x%X\n", htonl(hca_data_buffer.rkey)); + printf("vaddr1 is 0x%X\n", htonl(hca_data_buffer.vaddr1)); + printf("vaddr2 is 0x%X\n", htonl(hca_data_buffer.vaddr2)); + printf("size is 0x%X\n", htonl(hca_data_buffer.size)); +*/ + + hca_data->opcode = ntohl(hca_data_buffer.opcode); // long + hca_data->length = ntohl(hca_data_buffer.length); // long + + for(i=0; i < 256; i++) { + hca_data->dlid[i] = ntohs(hca_data_buffer.dlid[i]); // u_int16 + hca_data->rqp_num[i] = ntohl(hca_data_buffer.rqp_num[i]);// u_int32 + } + + hca_data->rkey = ntohl(hca_data_buffer.rkey); // u_int32 + hca_data->vaddr1 = ntohl(hca_data_buffer.vaddr1); // first word u_int32 + hca_data->vaddr2 = ntohl(hca_data_buffer.vaddr2); // second word u_int32 + hca_data->size = ntohl(hca_data_buffer.size); // u_int32 + } + else { + printf("server_thread receive ERROR bytes_recv = %d\n", bytes_recv); + } + + close(infd); + close(serverfd); + + printf("server_thread EXIT \n"); + + pthread_exit((void *) 0); + +} + +// +// client_thread +// + connect to a remote server_thread +// + send local HCA's data to remote server_thread +// +void *client_thread(void *vargp) +{ + + Remote_HCA_Info *hca_data; + Remote_HCA_Info hca_data_buffer; + + int clientfd; + struct hostent *hp; + struct sockaddr_in clientaddr; + int bytes_send; + int i; + + hca_data = (Remote_HCA_Info *) vargp; + + if((clientfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + printf("client_thread couldnot create a socket \n"); + pthread_exit((void *) 0); + } + + printf("client_thread create a socket \n"); + + bzero((char *) &clientaddr, sizeof(clientaddr)); + + clientaddr.sin_family = AF_INET; + clientaddr.sin_addr.s_addr = inet_addr(hca_data->dest_ip); + printf("client_thread get server Ip address = %s\n", hca_data->dest_ip); + clientaddr.sin_port = htons((unsigned short) DEFAULT_SOCKET_PORT); + memset(&(clientaddr.sin_zero), '\0', 8); + + connect(clientfd, (struct sockaddr *) &clientaddr, sizeof(struct sockaddr)); + + printf("client_thread connect to server Ip address = %s\n", hca_data->dest_ip); + + hca_data_buffer.opcode = htonl(hca_data->opcode); // long + hca_data_buffer.length = htonl(hca_data->length); // long + + for(i=0; i < 256; i++) { + hca_data_buffer.dlid[i] = htons(hca_data->dlid[i]); // u_int16 + hca_data_buffer.rqp_num[i] = htonl(hca_data->rqp_num[i]);// u_int32 + } + + hca_data_buffer.rkey = htonl(hca_data->rkey); // u_int32 + hca_data_buffer.vaddr1 = htonl(hca_data->vaddr1); // first word u_int32 + hca_data_buffer.vaddr2 = htonl(hca_data->vaddr2); // second word u_int32 + hca_data_buffer.size = htonl(hca_data->size); // u_int32 + + bytes_send = send(clientfd, & hca_data_buffer, sizeof(Remote_HCA_Info), 0); + + if(bytes_send == sizeof(Remote_HCA_Info)) { + printf("client_thread: send successfully \n"); + } + else { + printf("client_thread: send failed \n"); + } + + printf("client_thread EXIT \n"); + + pthread_exit((void *) 0); +} + + +// +// main +// + create a shared-memory between this main()/user address and +// a kernel thread/kernel address space associated with inbal +// kernel module +// + access local HCA's data through this shared memory +// +// + create a server_thread for receiving remote HCA's data +// + create a client_thread for sending out local HCA's data +// + after receiving remote HCA's data update this shared memory +// +int main(int argc , char *argv[]) +{ + int segment_id; + struct shmid_ds shmbuffer; + int segment_size; + const int shared_segment_size = sizeof(Remote_HCA_Info); + key_t key = 999; + unsigned long raddr; + Remote_HCA_Info *shared_memory; + Remote_HCA_Info exchange_hca_data; + Remote_HCA_Info remote_hca_data; + int i; + + /* pthread */ + pthread_t sid; + pthread_t cid; + pthread_attr_t attr; + int rc, status; + + char dest_ip[256]; + + if(argc != 2) { + printf("USAGE: uagent server_ip_address\n"); + printf("argc = %d \n", argc); + exit(1); + } + + strcpy(&exchange_hca_data.dest_ip[0], argv[1]); + printf("the destinational server IP address = %s\n", + &exchange_hca_data.dest_ip); + + segment_id = shmget(key, shared_segment_size, IPC_CREAT | 0666); + + printf("sys_shmget is done segment_id = %d\n", segment_id); + + shared_memory = (Remote_HCA_Info *) shmat(segment_id, 0, 0); + + if(shared_memory == (char *) -1) { + printf("Shared memory attach failed shared_memory=%p\n",shared_memory); + exit(0); + } + + printf("shared menory attached at address %p\n", shared_memory); + + while (1) { + if(shared_memory->opcode == SEND_QP_INFO) { + printf("Local HCA data received from kernel thread\n"); + break; + } + usleep(1000); + continue; + } + + printf("Local HCA data received from kernel thread\n"); + + // save local HCA's data in exchange_hca_data + // + exchange_hca_data.opcode = shared_memory->opcode; + exchange_hca_data.length = shared_memory->length; + + for(i=0; i < 256; i++) { + exchange_hca_data.dlid[i] = shared_memory->dlid[i]; + exchange_hca_data.rqp_num[i] = shared_memory->rqp_num[i]; + } + + exchange_hca_data.rkey = shared_memory->rkey; + exchange_hca_data.vaddr1 = shared_memory->vaddr1; + exchange_hca_data.vaddr2 = shared_memory->vaddr2; + exchange_hca_data.size = shared_memory->size; + + /* Initialize and set thread detached attribute */ + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); + + /* create a server thread for procsssing incoming remote node socket data */ + // + pthread_create(&sid, + &attr, + server_thread, + (Remote_HCA_Info *) &remote_hca_data); + + printf("Main: created a server thread \n"); + + sleep(10); + + /* create a clint thread to send out local HCA data to remote node */ + pthread_create(&cid, + &attr, + client_thread, + (Remote_HCA_Info *) &exchange_hca_data); + + printf("Main: created a client thread \n"); + + /* synchronization between server_thread and client_thread */ + pthread_attr_destroy(&attr); + + rc = pthread_join(sid, (void **) &status); + if(rc) { + printf("Error: return code from pthread_join() is %d\n", rc); + exit(-1); + } + + printf("completed join with thread %d status = %d\n", sid, status); + + rc = pthread_join(cid, (void **) &status); + if(rc) { + printf("Error: return code from pthread_join() is %d\n", rc); + exit(-1); + } + printf("completed join with thread %d status = %d\n", cid, status); + + // update shared memory with remote HCA's data + + shared_memory->opcode = RECV_QP_INFO; + shared_memory->length = remote_hca_data.length; + for(i=0; i < 256; i++) { + shared_memory->dlid[i] = remote_hca_data.dlid[i]; + shared_memory->rqp_num[i]= remote_hca_data.rqp_num[i]; + } + shared_memory->rkey = remote_hca_data.rkey; + shared_memory->vaddr1 = remote_hca_data.vaddr1; + shared_memory->vaddr2 = remote_hca_data.vaddr2; + shared_memory->size = remote_hca_data.size; + + sleep(5); + + shared_memory->opcode = RECV_QP_INFO; + shared_memory->length = remote_hca_data.length; + for(i=0; i < 256; i++) { + shared_memory->dlid[i] = remote_hca_data.dlid[i]; + shared_memory->rqp_num[i]= remote_hca_data.rqp_num[i]; + } + + shared_memory->rkey = remote_hca_data.rkey; + shared_memory->vaddr1 = remote_hca_data.vaddr1; + shared_memory->vaddr2 = remote_hca_data.vaddr2; + shared_memory->size = remote_hca_data.size; + + sleep(10); + +// shmdt(shared_memory); + + printf("uagent is DONE \n"); + + + + exit(0); + +} + diff --git a/lustre/portals/knals/qswnal/qswnal.c b/lustre/portals/knals/qswnal/qswnal.c index 4472e30..9caf381 100644 --- a/lustre/portals/knals/qswnal/qswnal.c +++ b/lustre/portals/knals/qswnal/qswnal.c @@ -131,7 +131,7 @@ kqswnal_get_tx_desc (struct portals_cfg *pcfg) pcfg->pcfg_pbuf1 = (char *)ktx; pcfg->pcfg_count = NTOH__u32(ktx->ktx_wire_hdr->type); - pcfg->pcfg_size = NTOH__u32(PTL_HDR_LENGTH(ktx->ktx_wire_hdr)); + pcfg->pcfg_size = NTOH__u32(ktx->ktx_wire_hdr->payload_length); pcfg->pcfg_nid = NTOH__u64(ktx->ktx_wire_hdr->dest_nid); pcfg->pcfg_nid2 = ktx->ktx_nid; pcfg->pcfg_misc = ktx->ktx_launcher; diff --git a/lustre/portals/knals/qswnal/qswnal_cb.c b/lustre/portals/knals/qswnal/qswnal_cb.c index 006ea49..43926c9 100644 --- a/lustre/portals/knals/qswnal/qswnal_cb.c +++ b/lustre/portals/knals/qswnal/qswnal_cb.c @@ -542,8 +542,9 @@ kqswnal_cerror_hdr(ptl_hdr_t * hdr) { char *type_str = hdr_type_string (hdr); - CERROR("P3 Header at %p of type %s\n", hdr, type_str); - CERROR(" From nid/pid "LPU64"/%u", NTOH__u64(hdr->src_nid), + CERROR("P3 Header at %p of type %s length %d\n", hdr, type_str, + NTOH__u32(hdr->payload_length)); + CERROR(" From nid/pid "LPU64"/%u\n", NTOH__u64(hdr->src_nid), NTOH__u32(hdr->src_pid)); CERROR(" To nid/pid "LPU64"/%u\n", NTOH__u64(hdr->dest_nid), NTOH__u32(hdr->dest_pid)); @@ -556,8 +557,7 @@ kqswnal_cerror_hdr(ptl_hdr_t * hdr) hdr->msg.put.ack_wmd.wh_interface_cookie, hdr->msg.put.ack_wmd.wh_object_cookie, NTOH__u64 (hdr->msg.put.match_bits)); - CERROR(" Length %d, offset %d, hdr data "LPX64"\n", - NTOH__u32(PTL_HDR_LENGTH(hdr)), + CERROR(" offset %d, hdr data "LPX64"\n", NTOH__u32(hdr->msg.put.offset), hdr->msg.put.hdr_data); break; @@ -582,10 +582,9 @@ kqswnal_cerror_hdr(ptl_hdr_t * hdr) break; case PTL_MSG_REPLY: - CERROR(" dst md "LPX64"."LPX64", length %d\n", + CERROR(" dst md "LPX64"."LPX64"\n", hdr->msg.reply.dst_wmd.wh_interface_cookie, - hdr->msg.reply.dst_wmd.wh_object_cookie, - NTOH__u32 (PTL_HDR_LENGTH(hdr))); + hdr->msg.reply.dst_wmd.wh_object_cookie); } } /* end of print_hdr() */ diff --git a/lustre/portals/knals/scimacnal/scimacnal.c b/lustre/portals/knals/scimacnal/scimacnal.c index f3fe617..5ffba31 100644 --- a/lustre/portals/knals/scimacnal/scimacnal.c +++ b/lustre/portals/knals/scimacnal/scimacnal.c @@ -123,7 +123,7 @@ static nal_t *kscimacnal_init(int interface, ptl_pt_index_t ptl_size, { int nnids = 512; /* FIXME: Need ScaMac funktion to get #nodes */ - CDEBUG(D_NET, "calling lib_init with nid 0x%Lx nnids %d\n", kscimacnal_data.ksci_nid, nnids); + CDEBUG(D_NET, "calling lib_init with nid "LPX64" nnids %d\n", kscimacnal_data.ksci_nid, nnids); lib_init(&kscimacnal_lib, kscimacnal_data.ksci_nid, 0, nnids,ptl_size, ac_size); return &kscimacnal_api; } diff --git a/lustre/portals/knals/scimacnal/scimacnal_cb.c b/lustre/portals/knals/scimacnal/scimacnal_cb.c index 7d5796e..b31c2ea 100644 --- a/lustre/portals/knals/scimacnal/scimacnal_cb.c +++ b/lustre/portals/knals/scimacnal/scimacnal_cb.c @@ -208,7 +208,7 @@ kscimacnal_sendmsg(nal_cb_t *nal, unsigned long physaddr; - CDEBUG(D_NET, "sending %d bytes from %p/%p to nid 0x%Lx niov: %d\n", + CDEBUG(D_NET, "sending %d bytes from %p/%p to nid "LPX64" niov: %d\n", payload_len, payload_iov, payload_kiov, nid, payload_niov); /* Basic sanity checks */ diff --git a/lustre/portals/knals/socknal/socknal.c b/lustre/portals/knals/socknal/socknal.c index 3d0c758..da47785 100644 --- a/lustre/portals/knals/socknal/socknal.c +++ b/lustre/portals/knals/socknal/socknal.c @@ -45,6 +45,8 @@ kpr_nal_interface_t ksocknal_router_interface = { #define SOCKNAL_SYSCTL_TIMEOUT 1 #define SOCKNAL_SYSCTL_EAGER_ACK 2 #define SOCKNAL_SYSCTL_ZERO_COPY 3 +#define SOCKNAL_SYSCTL_TYPED 4 +#define SOCKNAL_SYSCTL_MIN_BULK 5 static ctl_table ksocknal_ctl_table[] = { {SOCKNAL_SYSCTL_TIMEOUT, "timeout", @@ -58,6 +60,12 @@ static ctl_table ksocknal_ctl_table[] = { &ksocknal_data.ksnd_zc_min_frag, sizeof (int), 0644, NULL, &proc_dointvec}, #endif + {SOCKNAL_SYSCTL_TYPED, "typed", + &ksocknal_data.ksnd_typed_conns, sizeof (int), + 0644, NULL, &proc_dointvec}, + {SOCKNAL_SYSCTL_MIN_BULK, "min_bulk", + &ksocknal_data.ksnd_min_bulk, sizeof (int), + 0644, NULL, &proc_dointvec}, { 0 } }; @@ -86,7 +94,7 @@ ksocknal_api_shutdown(nal_t *nal, int ni) CDEBUG (D_NET, "closing all connections\n"); ksocknal_del_route (PTL_NID_ANY, 0, 0, 0); - ksocknal_close_conn (PTL_NID_ANY, 0); + ksocknal_close_matching_conns (PTL_NID_ANY, 0); return PTL_OK; } @@ -198,7 +206,7 @@ ksocknal_bind_irq (unsigned int irq) ksock_route_t * ksocknal_create_route (__u32 ipaddr, int port, int buffer_size, - int nonagel, int xchange_nids, int irq_affinity, int eager) + int nonagel, int irq_affinity, int eager) { ksock_route_t *route; @@ -215,13 +223,12 @@ ksocknal_create_route (__u32 ipaddr, int port, int buffer_size, route->ksnr_port = port; route->ksnr_buffer_size = buffer_size; route->ksnr_irq_affinity = irq_affinity; - route->ksnr_xchange_nids = xchange_nids; route->ksnr_nonagel = nonagel; route->ksnr_eager = eager; route->ksnr_connecting = 0; + route->ksnr_connected = 0; route->ksnr_deleted = 0; - route->ksnr_generation = 0; - route->ksnr_conn = NULL; + route->ksnr_conn_count = 0; return (route); } @@ -230,7 +237,6 @@ void ksocknal_destroy_route (ksock_route_t *route) { LASSERT (route->ksnr_sharecount == 0); - LASSERT (route->ksnr_conn == NULL); if (route->ksnr_peer != NULL) ksocknal_put_peer (route->ksnr_peer); @@ -397,8 +403,7 @@ ksocknal_get_route_by_idx (int index) int ksocknal_add_route (ptl_nid_t nid, __u32 ipaddr, int port, int bufnob, - int nonagle, int xchange_nids, int bind_irq, - int share, int eager) + int nonagle, int bind_irq, int share, int eager) { unsigned long flags; ksock_peer_t *peer; @@ -415,8 +420,8 @@ ksocknal_add_route (ptl_nid_t nid, __u32 ipaddr, int port, int bufnob, if (peer == NULL) return (-ENOMEM); - route = ksocknal_create_route (ipaddr, port, bufnob, nonagle, - xchange_nids, bind_irq, eager); + route = ksocknal_create_route (ipaddr, port, bufnob, + nonagle, bind_irq, eager); if (route == NULL) { ksocknal_put_peer (peer); return (-ENOMEM); @@ -455,7 +460,7 @@ ksocknal_add_route (ptl_nid_t nid, __u32 ipaddr, int port, int bufnob, route->ksnr_peer = peer; atomic_inc (&peer->ksnp_refcount); /* peer's route list takes existing ref on route */ - list_add (&route->ksnr_list, &peer->ksnp_routes); + list_add_tail (&route->ksnr_list, &peer->ksnp_routes); } route->ksnr_sharecount++; @@ -468,8 +473,10 @@ ksocknal_add_route (ptl_nid_t nid, __u32 ipaddr, int port, int bufnob, void ksocknal_del_route_locked (ksock_route_t *route, int share, int keep_conn) { - ksock_peer_t *peer = route->ksnr_peer; - ksock_conn_t *conn = route->ksnr_conn; + ksock_peer_t *peer = route->ksnr_peer; + ksock_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; if (!share) route->ksnr_sharecount = 0; @@ -479,18 +486,22 @@ ksocknal_del_route_locked (ksock_route_t *route, int share, int keep_conn) return; } - if (conn != NULL) { - if (!keep_conn) + list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { + conn = list_entry(ctmp, ksock_conn_t, ksnc_list); + + if (conn->ksnc_route != route) + continue; + + if (!keep_conn) { ksocknal_close_conn_locked (conn, 0); - else { - /* keeping the conn; just dissociate it and route... */ - conn->ksnc_route = NULL; - route->ksnr_conn = NULL; - ksocknal_put_route (route); /* drop conn's ref on route */ - ksocknal_put_conn (conn); /* drop route's ref on conn */ + continue; } + + /* keeping the conn; just dissociate it and route... */ + conn->ksnc_route = NULL; + ksocknal_put_route (route); /* drop conn's ref on route */ } - + route->ksnr_deleted = 1; list_del (&route->ksnr_list); ksocknal_put_route (route); /* drop peer's ref */ @@ -669,9 +680,11 @@ ksocknal_choose_scheduler_locked (unsigned int irq) } int -ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, - struct socket *sock, int bind_irq) +ksocknal_create_conn (ksock_route_t *route, struct socket *sock, + int bind_irq, int type) { + ptl_nid_t nid; + __u64 incarnation; unsigned long flags; ksock_conn_t *conn; ksock_peer_t *peer; @@ -692,6 +705,19 @@ ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, if (rc != 0) return (rc); + if (route == NULL) { + /* acceptor or explicit connect */ + nid = PTL_NID_ANY; + } else { + LASSERT (type != SOCKNAL_CONN_NONE); + /* autoconnect: expect this nid on exchange */ + nid = route->ksnr_peer->ksnp_nid; + } + + rc = ksocknal_hello (sock, &nid, &type, &incarnation); + if (rc != 0) + return (rc); + peer = NULL; if (route == NULL) { /* not autoconnect */ /* Assume this socket connects to a brand new peer */ @@ -711,6 +737,8 @@ ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, conn->ksnc_peer = NULL; conn->ksnc_route = NULL; conn->ksnc_sock = sock; + conn->ksnc_type = type; + conn->ksnc_incarnation = incarnation; conn->ksnc_saved_data_ready = sock->sk->sk_data_ready; conn->ksnc_saved_write_space = sock->sk->sk_write_space; atomic_set (&conn->ksnc_refcount, 1); /* 1 ref for me */ @@ -732,7 +760,8 @@ ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, if (route != NULL) { /* Autoconnected! */ - LASSERT (route->ksnr_conn == NULL && route->ksnr_connecting); + LASSERT ((route->ksnr_connected & (1 << type)) == 0); + LASSERT ((route->ksnr_connecting & (1 << type)) != 0); if (route->ksnr_deleted) { /* This conn was autoconnected, but the autoconnect @@ -745,14 +774,13 @@ ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, } - /* associate conn/route for auto-reconnect */ - route->ksnr_conn = conn; - atomic_inc (&conn->ksnc_refcount); + /* associate conn/route */ conn->ksnc_route = route; atomic_inc (&route->ksnr_refcount); - route->ksnr_connecting = 0; - route->ksnr_generation++; + route->ksnr_connecting &= ~(1 << type); + route->ksnr_connected |= (1 << type); + route->ksnr_conn_count++; route->ksnr_retry_interval = SOCKNAL_MIN_RECONNECT_INTERVAL; peer = route->ksnr_peer; @@ -803,8 +831,13 @@ ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, ksocknal_queue_tx_locked (tx, conn); } + rc = ksocknal_close_stale_conns_locked (peer, incarnation); + write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); + if (rc != 0) + CERROR ("Closed %d stale conns to "LPX64"\n", rc, nid); + if (bind_irq) /* irq binding required */ ksocknal_bind_irq (irq); @@ -836,14 +869,17 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) route = conn->ksnc_route; if (route != NULL) { /* dissociate conn from route... */ - LASSERT (!route->ksnr_connecting && - !route->ksnr_deleted); + LASSERT (!route->ksnr_deleted); + LASSERT ((route->ksnr_connecting & (1 << conn->ksnc_type)) == 0); + LASSERT ((route->ksnr_connected & (1 << conn->ksnc_type)) != 0); - route->ksnr_conn = NULL; + route->ksnr_connected &= ~(1 << conn->ksnc_type); conn->ksnc_route = NULL; + list_del (&route->ksnr_list); /* make route least favourite */ + list_add_tail (&route->ksnr_list, &peer->ksnp_routes); + ksocknal_put_route (route); /* drop conn's ref on route */ - ksocknal_put_conn (conn); /* drop route's ref on conn */ } /* ksnd_deathrow_conns takes over peer's ref */ @@ -869,24 +905,6 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error) spin_unlock (&ksocknal_data.ksnd_reaper_lock); } -int -ksocknal_close_conn_unlocked (ksock_conn_t *conn, int why) -{ - unsigned long flags; - int did_it = 0; - - write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags); - - if (!conn->ksnc_closing) { - did_it = 1; - ksocknal_close_conn_locked (conn, why); - } - - write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); - - return (did_it); -} - void ksocknal_terminate_conn (ksock_conn_t *conn) { @@ -958,9 +976,10 @@ ksocknal_destroy_conn (ksock_conn_t *conn) ksock_tx_t *tx = list_entry (conn->ksnc_tx_queue.next, ksock_tx_t, tx_list); - CERROR ("Deleting packet type %d len %d ("LPX64"->"LPX64")\n", + CERROR ("Deleting packet %p type %d len %d ("LPX64"->"LPX64")\n", + tx, NTOH__u32 (tx->tx_hdr->type), - NTOH__u32 (PTL_HDR_LENGTH(tx->tx_hdr)), + NTOH__u32 (tx->tx_hdr->payload_length), NTOH__u64 (tx->tx_hdr->src_nid), NTOH__u64 (tx->tx_hdr->dest_nid)); @@ -1012,19 +1031,75 @@ ksocknal_put_conn (ksock_conn_t *conn) } int -ksocknal_close_conn (ptl_nid_t nid, __u32 ipaddr) +ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why) +{ + ksock_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + int count = 0; + + list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { + conn = list_entry (ctmp, ksock_conn_t, ksnc_list); + + if (ipaddr == 0 || + conn->ksnc_ipaddr == ipaddr) { + count++; + ksocknal_close_conn_locked (conn, why); + } + } + + return (count); +} + +int +ksocknal_close_stale_conns_locked (ksock_peer_t *peer, __u64 incarnation) { - unsigned long flags; ksock_conn_t *conn; struct list_head *ctmp; struct list_head *cnxt; + int count = 0; + + list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { + conn = list_entry (ctmp, ksock_conn_t, ksnc_list); + + if (conn->ksnc_incarnation == incarnation) + continue; + + count++; + ksocknal_close_conn_locked (conn, -ESTALE); + } + + return (count); +} + +int +ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why) +{ + ksock_peer_t *peer = conn->ksnc_peer; + __u32 ipaddr = conn->ksnc_ipaddr; + unsigned long flags; + int count; + + write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags); + + count = ksocknal_close_peer_conns_locked (peer, ipaddr, why); + + write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); + + return (count); +} + +int +ksocknal_close_matching_conns (ptl_nid_t nid, __u32 ipaddr) +{ + unsigned long flags; ksock_peer_t *peer; struct list_head *ptmp; struct list_head *pnxt; int lo; int hi; int i; - int rc = -ENOENT; + int count = 0; write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags); @@ -1043,24 +1118,17 @@ ksocknal_close_conn (ptl_nid_t nid, __u32 ipaddr) if (!(nid == PTL_NID_ANY || nid == peer->ksnp_nid)) continue; - list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { - - conn = list_entry (ctmp, ksock_conn_t, - ksnc_list); - - if (!(ipaddr == 0 || - conn->ksnc_ipaddr == ipaddr)) - continue; - - rc = 0; - ksocknal_close_conn_locked (conn, 0); - } + count += ksocknal_close_peer_conns_locked (peer, ipaddr, 0); } } write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); - return (rc); + /* wildcards always succeed */ + if (nid == PTL_NID_ANY || ipaddr == 0) + return (0); + + return (count == 0 ? -ENOENT : 0); } void @@ -1073,7 +1141,7 @@ ksocknal_notify (void *arg, ptl_nid_t gw_nid, int alive) if (!alive) { /* If the gateway crashed, close all open connections... */ - ksocknal_close_conn (gw_nid, 0); + ksocknal_close_matching_conns (gw_nid, 0); return; } @@ -1233,13 +1301,12 @@ ksocknal_cmd(struct portals_cfg *pcfg, void * private) pcfg->pcfg_nid = route->ksnr_peer->ksnp_nid; pcfg->pcfg_id = route->ksnr_ipaddr; pcfg->pcfg_misc = route->ksnr_port; - pcfg->pcfg_count = route->ksnr_generation; + pcfg->pcfg_count = route->ksnr_conn_count; pcfg->pcfg_size = route->ksnr_buffer_size; pcfg->pcfg_wait = route->ksnr_sharecount; pcfg->pcfg_flags = (route->ksnr_nonagel ? 1 : 0) | - (route->ksnr_xchange_nids ? 2 : 0) | - (route->ksnr_irq_affinity ? 4 : 0) | - (route->ksnr_eager ? 8 : 0); + (route->ksnr_irq_affinity ? 2 : 0) | + (route->ksnr_eager ? 4 : 0); ksocknal_put_route (route); } break; @@ -1250,8 +1317,7 @@ ksocknal_cmd(struct portals_cfg *pcfg, void * private) (pcfg->pcfg_flags & 0x01) != 0, (pcfg->pcfg_flags & 0x02) != 0, (pcfg->pcfg_flags & 0x04) != 0, - (pcfg->pcfg_flags & 0x08) != 0, - (pcfg->pcfg_flags & 0x10) != 0); + (pcfg->pcfg_flags & 0x08) != 0); break; } case NAL_CMD_DEL_AUTOCONN: { @@ -1267,26 +1333,38 @@ ksocknal_cmd(struct portals_cfg *pcfg, void * private) rc = -ENOENT; else { rc = 0; - pcfg->pcfg_nid = conn->ksnc_peer->ksnp_nid; - pcfg->pcfg_id = conn->ksnc_ipaddr; - pcfg->pcfg_misc = conn->ksnc_port; + pcfg->pcfg_nid = conn->ksnc_peer->ksnp_nid; + pcfg->pcfg_id = conn->ksnc_ipaddr; + pcfg->pcfg_misc = conn->ksnc_port; + pcfg->pcfg_flags = conn->ksnc_type; ksocknal_put_conn (conn); } break; } case NAL_CMD_REGISTER_PEER_FD: { struct socket *sock = sockfd_lookup (pcfg->pcfg_fd, &rc); + int type = pcfg->pcfg_misc; - if (sock != NULL) { - rc = ksocknal_create_conn (pcfg->pcfg_nid, NULL, - sock, pcfg->pcfg_flags); - if (rc != 0) - fput (sock->file); + if (sock == NULL) + break; + + switch (type) { + case SOCKNAL_CONN_NONE: + case SOCKNAL_CONN_ANY: + case SOCKNAL_CONN_CONTROL: + case SOCKNAL_CONN_BULK_IN: + case SOCKNAL_CONN_BULK_OUT: + rc = ksocknal_create_conn(NULL, sock, pcfg->pcfg_flags, type); + default: + break; } + if (rc != 0) + fput (sock->file); break; } case NAL_CMD_CLOSE_CONNECTION: { - rc = ksocknal_close_conn (pcfg->pcfg_nid, pcfg->pcfg_id); + rc = ksocknal_close_matching_conns (pcfg->pcfg_nid, + pcfg->pcfg_id); break; } case NAL_CMD_REGISTER_MYNID: { @@ -1337,7 +1415,7 @@ ksocknal_free_buffers (void) ksocknal_data.ksnd_peer_hash_size); } -void /*__exit*/ +void ksocknal_module_fini (void) { int i; @@ -1421,6 +1499,22 @@ ksocknal_module_fini (void) } +void __init +ksocknal_init_incarnation (void) +{ + struct timeval tv; + + /* The incarnation number is the time this module loaded and it + * identifies this particular instance of the socknal. Hopefully + * we won't be able to reboot more frequently than 1MHz for the + * forseeable future :) */ + + do_gettimeofday(&tv); + + ksocknal_data.ksnd_incarnation = + (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec; +} + int __init ksocknal_module_init (void) { @@ -1434,7 +1528,9 @@ ksocknal_module_init (void) /* the following must be sizeof(int) for proc_dointvec() */ LASSERT(sizeof (ksocknal_data.ksnd_io_timeout) == sizeof (int)); LASSERT(sizeof (ksocknal_data.ksnd_eager_ack) == sizeof (int)); - + /* check ksnr_connected/connecting field large enough */ + LASSERT(SOCKNAL_CONN_NTYPES <= 4); + LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); ksocknal_api.forward = ksocknal_api_forward; @@ -1451,10 +1547,13 @@ ksocknal_module_init (void) ksocknal_data.ksnd_io_timeout = SOCKNAL_IO_TIMEOUT; ksocknal_data.ksnd_eager_ack = SOCKNAL_EAGER_ACK; + ksocknal_data.ksnd_typed_conns = SOCKNAL_TYPED_CONNS; + ksocknal_data.ksnd_min_bulk = SOCKNAL_MIN_BULK; #if SOCKNAL_ZC ksocknal_data.ksnd_zc_min_frag = SOCKNAL_ZC_MIN_FRAG; #endif - + ksocknal_init_incarnation(); + ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE; PORTAL_ALLOC (ksocknal_data.ksnd_peers, sizeof (struct list_head) * ksocknal_data.ksnd_peer_hash_size); diff --git a/lustre/portals/knals/socknal/socknal.h b/lustre/portals/knals/socknal/socknal.h index 1c73ae8..8c906e2 100644 --- a/lustre/portals/knals/socknal/socknal.h +++ b/lustre/portals/knals/socknal/socknal.h @@ -25,7 +25,9 @@ */ #define DEBUG_PORTAL_ALLOC -#define EXPORT_SYMTAB +#ifndef EXPORT_SYMTAB +# define EXPORT_SYMTAB +#endif #include #include @@ -58,6 +60,7 @@ #include #include #include +#include #if CONFIG_SMP # define SOCKNAL_N_SCHED num_online_cpus() /* # socknal schedulers */ @@ -71,9 +74,10 @@ /* default vals for runtime tunables */ #define SOCKNAL_IO_TIMEOUT 50 /* default comms timeout (seconds) */ -#define SOCKNAL_EAGER_ACK 1 /* default eager ack (boolean) */ +#define SOCKNAL_EAGER_ACK 0 /* default eager ack (boolean) */ +#define SOCKNAL_TYPED_CONNS 1 /* unidirectional large, bidirectional small? */ #define SOCKNAL_ZC_MIN_FRAG (2<<10) /* default smallest zerocopy fragment */ - +#define SOCKNAL_MIN_BULK (1<<10) /* smallest "large" message */ #define SOCKNAL_USE_KEEPALIVES 0 /* use tcp/ip keepalive? */ #define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */ @@ -142,10 +146,13 @@ typedef struct { int ksnd_init; /* initialisation state */ int ksnd_io_timeout; /* "stuck" socket timeout (seconds) */ int ksnd_eager_ack; /* make TCP ack eagerly? */ + int ksnd_typed_conns; /* drive sockets by type? */ + int ksnd_min_bulk; /* smallest "large" message */ #if SOCKNAL_ZC unsigned int ksnd_zc_min_frag; /* minimum zero copy frag size */ #endif struct ctl_table_header *ksnd_sysctl; /* sysctl interface */ + __u64 ksnd_incarnation; /* my epoch */ rwlock_t ksnd_global_lock; /* stabilize peer/conn ops */ struct list_head *ksnd_peers; /* hash table of all my known peers */ @@ -300,8 +307,10 @@ typedef struct ksock_conn __u32 ksnc_ipaddr; /* peer's IP */ int ksnc_port; /* peer's port */ int ksnc_closing; /* being shut down */ + int ksnc_type; /* type of connection */ + __u64 ksnc_incarnation; /* peer's incarnation */ - /* READER */ + /* reader */ struct list_head ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */ unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times out */ int ksnc_rx_started; /* started receiving a message */ @@ -327,6 +336,10 @@ typedef struct ksock_conn int ksnc_tx_scheduled; /* being progressed */ } ksock_conn_t; +#define KSNR_TYPED_ROUTES ((1 << SOCKNAL_CONN_CONTROL) | \ + (1 << SOCKNAL_CONN_BULK_IN) | \ + (1 << SOCKNAL_CONN_BULK_OUT)) + typedef struct ksock_route { struct list_head ksnr_list; /* chain on peer route list */ @@ -340,13 +353,12 @@ typedef struct ksock_route int ksnr_port; /* port to connect to */ int ksnr_buffer_size; /* size of socket buffers */ unsigned int ksnr_irq_affinity:1; /* set affinity? */ - unsigned int ksnr_xchange_nids:1; /* do hello protocol? */ unsigned int ksnr_nonagel:1; /* disable nagle? */ unsigned int ksnr_eager:1; /* connect eagery? */ - unsigned int ksnr_connecting:1; /* autoconnect in progress? */ + unsigned int ksnr_connecting:4; /* autoconnects in progress by type */ + unsigned int ksnr_connected:4; /* connections established by type */ unsigned int ksnr_deleted:1; /* been removed from peer? */ - int ksnr_generation; /* connection incarnation # */ - ksock_conn_t *ksnr_conn; /* NULL/active connection */ + int ksnr_conn_count; /* # conns established by this route */ } ksock_route_t; typedef struct ksock_peer @@ -401,14 +413,15 @@ extern ksock_peer_t *ksocknal_find_peer_locked (ptl_nid_t nid); extern ksock_peer_t *ksocknal_get_peer (ptl_nid_t nid); extern int ksocknal_del_route (ptl_nid_t nid, __u32 ipaddr, int single, int keep_conn); -extern int ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route, - struct socket *sock, int bind_irq); +extern int ksocknal_create_conn (ksock_route_t *route, + struct socket *sock, int bind_irq, int type); extern void ksocknal_close_conn_locked (ksock_conn_t *conn, int why); -extern int ksocknal_close_conn_unlocked (ksock_conn_t *conn, int why); extern void ksocknal_terminate_conn (ksock_conn_t *conn); extern void ksocknal_destroy_conn (ksock_conn_t *conn); extern void ksocknal_put_conn (ksock_conn_t *conn); -extern int ksocknal_close_conn (ptl_nid_t nid, __u32 ipaddr); +extern int ksocknal_close_stale_conns_locked (ksock_peer_t *peer, __u64 incarnation); +extern int ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why); +extern int ksocknal_close_matching_conns (ptl_nid_t nid, __u32 ipaddr); extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn); extern void ksocknal_tx_done (ksock_tx_t *tx, int asynch); @@ -423,3 +436,5 @@ extern void ksocknal_write_space(struct sock *sk); extern int ksocknal_autoconnectd (void *arg); extern int ksocknal_reaper (void *arg); extern int ksocknal_setup_sock (struct socket *sock); +extern int ksocknal_hello (struct socket *sock, + ptl_nid_t *nid, int *type, __u64 *incarnation); diff --git a/lustre/portals/knals/socknal/socknal_cb.c b/lustre/portals/knals/socknal/socknal_cb.c index 8ce6777..346d60e 100644 --- a/lustre/portals/knals/socknal/socknal_cb.c +++ b/lustre/portals/knals/socknal/socknal_cb.c @@ -218,9 +218,9 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) struct iovec *iov = tx->tx_iov; int fragsize = iov->iov_len; unsigned long vaddr = (unsigned long)iov->iov_base; - int more = (!list_empty (&conn->ksnc_tx_queue)) | - (tx->tx_niov > 1) | - (tx->tx_nkiov > 1); + int more = (tx->tx_niov > 1) || + (tx->tx_nkiov > 0) || + (!list_empty (&conn->ksnc_tx_queue)); #if SOCKNAL_ZC int offset = vaddr & (PAGE_SIZE - 1); int zcsize = MIN (fragsize, PAGE_SIZE - offset); @@ -266,7 +266,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) .msg_flags = more ? (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT }; mm_segment_t oldmm = get_fs(); - + set_fs (KERNEL_DS); rc = sock_sendmsg(sock, &msg, fragsize); set_fs (oldmm); @@ -298,8 +298,8 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) int fragsize = kiov->kiov_len; struct page *page = kiov->kiov_page; int offset = kiov->kiov_offset; - int more = (!list_empty (&conn->ksnc_tx_queue)) | - (tx->tx_nkiov > 1); + int more = (tx->tx_nkiov > 1) || + (!list_empty (&conn->ksnc_tx_queue)); int rc; /* NB we can't trust socket ops to either consume our iovs @@ -464,7 +464,7 @@ ksocknal_recv_iov (ksock_conn_t *conn) * or leave them alone, so we only receive 1 frag at a time. */ LASSERT (conn->ksnc_rx_niov > 0); LASSERT (fragsize <= conn->ksnc_rx_nob_wanted); - + set_fs (KERNEL_DS); rc = sock_recvmsg (conn->ksnc_sock, &msg, fragsize, MSG_DONTWAIT); /* NB this is just a boolean............................^ */ @@ -521,7 +521,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn) LASSERT (fragsize <= conn->ksnc_rx_nob_wanted); LASSERT (conn->ksnc_rx_nkiov > 0); LASSERT (offset + fragsize <= PAGE_SIZE); - + set_fs (KERNEL_DS); rc = sock_recvmsg (conn->ksnc_sock, &msg, fragsize, MSG_DONTWAIT); /* NB this is just a boolean............................^ */ @@ -597,7 +597,7 @@ ksocknal_recvmsg (ksock_conn_t *conn) if (conn->ksnc_rx_nob_wanted == 0) { /* Completed a message segment (header or payload) */ - if (ksocknal_data.ksnd_eager_ack && + if ((ksocknal_data.ksnd_eager_ack & conn->ksnc_type) != 0 && (conn->ksnc_rx_state == SOCKNAL_RX_BODY || conn->ksnc_rx_state == SOCKNAL_RX_BODY_FWD)) { /* Remind the socket to ack eagerly... */ @@ -721,12 +721,12 @@ ksocknal_process_transmit (ksock_sched_t *sched, unsigned long *irq_flags) CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc); if (rc != 0) { - if (ksocknal_close_conn_unlocked (conn, rc)) { - /* I'm the first to close */ + if (!conn->ksnc_closing) CERROR ("[%p] Error %d on write to "LPX64" ip %08x:%d\n", conn, rc, conn->ksnc_peer->ksnp_nid, conn->ksnc_ipaddr, conn->ksnc_port); - } + ksocknal_close_conn_and_siblings (conn, rc); + ksocknal_tx_launched (tx); spin_lock_irqsave (&sched->kss_lock, *irq_flags); @@ -764,10 +764,17 @@ ksocknal_launch_autoconnect_locked (ksock_route_t *route) /* called holding write lock on ksnd_global_lock */ - LASSERT (route->ksnr_conn == NULL); - LASSERT (!route->ksnr_deleted && !route->ksnr_connecting); + LASSERT (!route->ksnr_deleted); + LASSERT ((route->ksnr_connected & (1 << SOCKNAL_CONN_ANY)) == 0); + LASSERT ((route->ksnr_connected & KSNR_TYPED_ROUTES) != KSNR_TYPED_ROUTES); + LASSERT (!route->ksnr_connecting); - route->ksnr_connecting = 1; + if (ksocknal_data.ksnd_typed_conns) + route->ksnr_connecting = + KSNR_TYPED_ROUTES & ~route->ksnr_connected; + else + route->ksnr_connecting = (1 << SOCKNAL_CONN_ANY); + atomic_inc (&route->ksnr_refcount); /* extra ref for asynchd */ spin_lock_irqsave (&ksocknal_data.ksnd_autoconnectd_lock, flags); @@ -814,21 +821,51 @@ ksock_conn_t * ksocknal_find_conn_locked (ksock_tx_t *tx, ksock_peer_t *peer) { struct list_head *tmp; - ksock_conn_t *conn = NULL; - + ksock_conn_t *typed = NULL; + int tnob = 0; + ksock_conn_t *fallback = NULL; + int fnob = 0; + /* Find the conn with the shortest tx queue */ list_for_each (tmp, &peer->ksnp_conns) { - ksock_conn_t *c = list_entry (tmp, ksock_conn_t, ksnc_list); + ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); + int nob = atomic_read(&c->ksnc_tx_nob); LASSERT (!c->ksnc_closing); - - if (conn == NULL || - atomic_read (&conn->ksnc_tx_nob) > - atomic_read (&c->ksnc_tx_nob)) - conn = c; + + if (fallback == NULL || nob < fnob) { + fallback = c; + fnob = nob; + } + + if (!ksocknal_data.ksnd_typed_conns) + continue; + + switch (c->ksnc_type) { + default: + LBUG(); + case SOCKNAL_CONN_ANY: + break; + case SOCKNAL_CONN_BULK_IN: + continue; + case SOCKNAL_CONN_BULK_OUT: + if (tx->tx_nob < ksocknal_data.ksnd_min_bulk) + continue; + break; + case SOCKNAL_CONN_CONTROL: + if (tx->tx_nob >= ksocknal_data.ksnd_min_bulk) + continue; + break; + } + + if (typed == NULL || nob < tnob) { + typed = c; + tnob = nob; + } } - return (conn); + /* prefer the typed selection */ + return ((typed != NULL) ? typed : fallback); } void @@ -870,22 +907,46 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) } ksock_route_t * -ksocknal_find_connectable_route_locked (ksock_peer_t *peer, int eager_only) +ksocknal_find_connectable_route_locked (ksock_peer_t *peer) { struct list_head *tmp; ksock_route_t *route; + ksock_route_t *candidate = NULL; + int found = 0; + int bits; list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); + bits = route->ksnr_connected; + + if ((bits & KSNR_TYPED_ROUTES) == KSNR_TYPED_ROUTES || + (bits & (1 << SOCKNAL_CONN_ANY)) != 0 || + route->ksnr_connecting != 0) { + /* All typed connections have been established, or + * an untyped connection has been established, or + * connections are currently being established */ + found = 1; + continue; + } + + /* too soon to retry this guy? */ + if (!time_after_eq (jiffies, route->ksnr_timeout)) + continue; - if (route->ksnr_conn == NULL && /* not connected */ - !route->ksnr_connecting && /* not connecting */ - (!eager_only || route->ksnr_eager) && /* wants to connect */ - time_after_eq (jiffies, route->ksnr_timeout)) /* OK to retry */ + /* always do eager routes */ + if (route->ksnr_eager) return (route); + + if (candidate == NULL) { + /* If we don't find any other route that is fully + * connected or connecting, the first connectable + * route is returned. If it fails to connect, it + * will get placed at the end of the list */ + candidate = route; + } } - - return (NULL); + + return (found ? NULL : candidate); } ksock_route_t * @@ -897,7 +958,7 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); - if (route->ksnr_connecting) + if (route->ksnr_connecting != 0) return (route); } @@ -912,7 +973,7 @@ ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid) ksock_conn_t *conn; ksock_route_t *route; rwlock_t *g_lock; - + /* Ensure the frags we've been given EXACTLY match the number of * bytes we want to send. Many TCP/IP stacks disregard any total * size parameters passed to them and just look at the frags. @@ -936,18 +997,17 @@ ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid) peer = ksocknal_find_target_peer_locked (tx, nid); if (peer == NULL) { read_unlock (g_lock); - return (PTL_FAIL); + return (-EHOSTUNREACH); } - if (ksocknal_find_connectable_route_locked(peer, 1) == NULL) { + if (ksocknal_find_connectable_route_locked(peer) == NULL) { conn = ksocknal_find_conn_locked (tx, peer); if (conn != NULL) { - /* I've got no unconnected autoconnect routes that - * need to be connected, and I do have an actual - * connection... */ + /* I've got no autoconnect routes that need to be + * connecting and I do have an actual connection... */ ksocknal_queue_tx_locked (tx, conn); read_unlock (g_lock); - return (PTL_OK); + return (0); } } @@ -960,14 +1020,13 @@ ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid) if (peer->ksnp_closing) { /* peer deleted as I blocked! */ write_unlock_irqrestore (g_lock, flags); ksocknal_put_peer (peer); - return (PTL_FAIL); + return (-EHOSTUNREACH); } ksocknal_put_peer (peer); /* drop ref I got above */ - for (;;) { - /* launch all eager autoconnections */ - route = ksocknal_find_connectable_route_locked (peer, 1); + /* launch any/all autoconnections that need it */ + route = ksocknal_find_connectable_route_locked (peer); if (route == NULL) break; @@ -979,26 +1038,20 @@ ksocknal_launch_packet (ksock_tx_t *tx, ptl_nid_t nid) /* Connection exists; queue message on it */ ksocknal_queue_tx_locked (tx, conn); write_unlock_irqrestore (g_lock, flags); - return (PTL_OK); + return (0); } - if (ksocknal_find_connecting_route_locked (peer) == NULL) { - /* no autoconnect routes actually connecting now. Scrape - * the barrel for non-eager autoconnects */ - route = ksocknal_find_connectable_route_locked (peer, 0); - if (route != NULL) { - ksocknal_launch_autoconnect_locked (route); - } else { - write_unlock_irqrestore (g_lock, flags); - return (PTL_FAIL); - } + route = ksocknal_find_connecting_route_locked (peer); + if (route != NULL) { + /* At least 1 connection is being established; queue the + * message... */ + list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue); + write_unlock_irqrestore (g_lock, flags); + return (0); } - - /* At least 1 connection is being established; queue the message... */ - list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue); - + write_unlock_irqrestore (g_lock, flags); - return (PTL_OK); + return (-EHOSTUNREACH); } ksock_ltx_t * @@ -1069,10 +1122,11 @@ ksocknal_send (nal_cb_t *nal, void *private, lib_msg_t *cookie, ltx->ltx_tx.tx_nob = sizeof (*hdr) + payload_len; rc = ksocknal_launch_packet (<x->ltx_tx, nid); - if (rc != PTL_OK) - ksocknal_put_ltx (ltx); + if (rc == 0) + return (PTL_OK); - return (rc); + ksocknal_put_ltx (ltx); + return (PTL_FAIL); } int @@ -1104,10 +1158,11 @@ ksocknal_send_pages (nal_cb_t *nal, void *private, lib_msg_t *cookie, ltx->ltx_tx.tx_nob = sizeof (*hdr) + payload_len; rc = ksocknal_launch_packet (<x->ltx_tx, nid); - if (rc != PTL_OK) - ksocknal_put_ltx (ltx); - - return (rc); + if (rc == 0) + return (PTL_OK); + + ksocknal_put_ltx (ltx); + return (PTL_FAIL); } void @@ -1133,10 +1188,8 @@ ksocknal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd) tx->tx_hdr = (ptl_hdr_t *)fwd->kprfd_iov[0].iov_base; rc = ksocknal_launch_packet (tx, nid); - if (rc != 0) { - /* FIXME, could pass a better completion error */ - kpr_fwd_done (&ksocknal_data.ksnd_router, fwd, -EHOSTUNREACH); - } + if (rc != 0) + kpr_fwd_done (&ksocknal_data.ksnd_router, fwd, rc); } int @@ -1353,7 +1406,7 @@ ksocknal_fwd_parse (ksock_conn_t *conn) { ksock_peer_t *peer; ptl_nid_t dest_nid = NTOH__u64 (conn->ksnc_hdr.dest_nid); - int body_len = NTOH__u32 (PTL_HDR_LENGTH(&conn->ksnc_hdr)); + int body_len = NTOH__u32 (conn->ksnc_hdr.payload_length); CDEBUG (D_NET, "%p "LPX64"->"LPX64" %d parsing header\n", conn, NTOH__u64 (conn->ksnc_hdr.src_nid), @@ -1368,7 +1421,6 @@ ksocknal_fwd_parse (ksock_conn_t *conn) dest_nid, body_len); ksocknal_new_packet (conn, 0); /* on to new packet */ - ksocknal_close_conn_unlocked (conn, -EINVAL); /* give up on conn */ return; } @@ -1512,17 +1564,15 @@ ksocknal_process_receive (ksock_sched_t *sched, unsigned long *irq_flags) rc = ksocknal_recvmsg(conn); if (rc <= 0) { - if (ksocknal_close_conn_unlocked (conn, rc)) { - /* I'm the first to close */ - if (rc < 0) - CERROR ("[%p] Error %d on read from "LPX64" ip %08x:%d\n", - conn, rc, conn->ksnc_peer->ksnp_nid, - conn->ksnc_ipaddr, conn->ksnc_port); - else - CWARN ("[%p] EOF from "LPX64" ip %08x:%d\n", - conn, conn->ksnc_peer->ksnp_nid, - conn->ksnc_ipaddr, conn->ksnc_port); - } + if (rc == 0) + CWARN ("[%p] EOF from "LPX64" ip %08x:%d\n", + conn, conn->ksnc_peer->ksnp_nid, + conn->ksnc_ipaddr, conn->ksnc_port); + else if (!conn->ksnc_closing) + CERROR ("[%p] Error %d on read from "LPX64" ip %08x:%d\n", + conn, rc, conn->ksnc_peer->ksnp_nid, + conn->ksnc_ipaddr, conn->ksnc_port); + ksocknal_close_conn_and_siblings (conn, rc); goto out; } @@ -1945,7 +1995,7 @@ ksocknal_sock_read (struct socket *sock, void *buffer, int nob) } int -ksocknal_exchange_nids (struct socket *sock, ptl_nid_t nid) +ksocknal_hello (struct socket *sock, ptl_nid_t *nid, int *type, __u64 *incarnation) { int rc; ptl_hdr_t hdr; @@ -1960,24 +2010,28 @@ ksocknal_exchange_nids (struct socket *sock, ptl_nid_t nid) hdr.src_nid = __cpu_to_le64 (ksocknal_lib.ni.nid); hdr.type = __cpu_to_le32 (PTL_MSG_HELLO); - + + hdr.msg.hello.type = __cpu_to_le32 (*type); + hdr.msg.hello.incarnation = + __cpu_to_le64 (ksocknal_data.ksnd_incarnation); + /* Assume sufficient socket buffering for this message */ rc = ksocknal_sock_write (sock, &hdr, sizeof (hdr)); if (rc != 0) { - CERROR ("Error %d sending HELLO to "LPX64"\n", rc, nid); + CERROR ("Error %d sending HELLO to "LPX64"\n", rc, *nid); return (rc); } rc = ksocknal_sock_read (sock, hmv, sizeof (*hmv)); if (rc != 0) { - CERROR ("Error %d reading HELLO from "LPX64"\n", rc, nid); + CERROR ("Error %d reading HELLO from "LPX64"\n", rc, *nid); return (rc); } if (hmv->magic != __le32_to_cpu (PORTALS_PROTO_MAGIC)) { CERROR ("Bad magic %#08x (%#08x expected) from "LPX64"\n", - __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC, nid); - return (-EINVAL); + __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC, *nid); + return (-EPROTO); } if (hmv->version_major != __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR) || @@ -1988,37 +2042,71 @@ ksocknal_exchange_nids (struct socket *sock, ptl_nid_t nid) __le16_to_cpu (hmv->version_minor), PORTALS_PROTO_VERSION_MAJOR, PORTALS_PROTO_VERSION_MINOR, - nid); - return (-EINVAL); + *nid); + return (-EPROTO); } - LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0); +#if (PORTALS_PROTO_VERSION_MAJOR != 0) +# error "This code only understands protocol version 0.x" +#endif /* version 0 sends magic/version as the dest_nid of a 'hello' header, * so read the rest of it in now... */ rc = ksocknal_sock_read (sock, hmv + 1, sizeof (hdr) - sizeof (*hmv)); if (rc != 0) { CERROR ("Error %d reading rest of HELLO hdr from "LPX64"\n", - rc, nid); + rc, *nid); return (rc); } /* ...and check we got what we expected */ if (hdr.type != __cpu_to_le32 (PTL_MSG_HELLO) || - PTL_HDR_LENGTH (&hdr) != __cpu_to_le32 (0)) { + hdr.payload_length != __cpu_to_le32 (0)) { CERROR ("Expecting a HELLO hdr with 0 payload," " but got type %d with %d payload from "LPX64"\n", __le32_to_cpu (hdr.type), - __le32_to_cpu (PTL_HDR_LENGTH (&hdr)), nid); - return (-EINVAL); + __le32_to_cpu (hdr.payload_length), *nid); + return (-EPROTO); } - - if (__le64_to_cpu (hdr.src_nid) != nid) { + + if (__le64_to_cpu(hdr.src_nid) == PTL_NID_ANY) { + CERROR("Expecting a HELLO hdr with a NID, but got PTL_NID_ANY\n"); + return (-EPROTO); + } + + if (*nid == PTL_NID_ANY) { /* don't know peer's nid yet */ + *nid = __le64_to_cpu(hdr.src_nid); + } else if (*nid != __le64_to_cpu (hdr.src_nid)) { CERROR ("Connected to nid "LPX64", but expecting "LPX64"\n", - __le64_to_cpu (hdr.src_nid), nid); - return (-EINVAL); + __le64_to_cpu (hdr.src_nid), *nid); + return (-EPROTO); + } + + if (*type == SOCKNAL_CONN_NONE) { + /* I've accepted this connection; peer determines type */ + *type = __le32_to_cpu(hdr.msg.hello.type); + switch (*type) { + case SOCKNAL_CONN_ANY: + case SOCKNAL_CONN_CONTROL: + break; + case SOCKNAL_CONN_BULK_IN: + *type = SOCKNAL_CONN_BULK_OUT; + break; + case SOCKNAL_CONN_BULK_OUT: + *type = SOCKNAL_CONN_BULK_IN; + break; + default: + CERROR ("Unexpected type %d from "LPX64"\n", *type, *nid); + return (-EPROTO); + } + } else if (__le32_to_cpu(hdr.msg.hello.type) != SOCKNAL_CONN_NONE) { + CERROR ("Mismatched types: me %d "LPX64" %d\n", + *type, *nid, __le32_to_cpu(hdr.msg.hello.type)); + return (-EPROTO); } + *incarnation = __le64_to_cpu(hdr.msg.hello.incarnation); + return (0); } @@ -2103,7 +2191,7 @@ ksocknal_setup_sock (struct socket *sock) } int -ksocknal_connect_peer (ksock_route_t *route) +ksocknal_connect_peer (ksock_route_t *route, int type) { struct sockaddr_in peer_addr; mm_segment_t oldmm = get_fs(); @@ -2208,14 +2296,7 @@ ksocknal_connect_peer (ksock_route_t *route) goto out; } - if (route->ksnr_xchange_nids) { - rc = ksocknal_exchange_nids (sock, route->ksnr_peer->ksnp_nid); - if (rc != 0) - goto out; - } - - rc = ksocknal_create_conn (route->ksnr_peer->ksnp_nid, - route, sock, route->ksnr_irq_affinity); + rc = ksocknal_create_conn (route, sock, route->ksnr_irq_affinity, type); if (rc == 0) { /* Take an extra ref on sock->file to compensate for the * upcoming close which will lose fd's ref on it. */ @@ -2235,20 +2316,36 @@ ksocknal_autoconnect (ksock_route_t *route) ksock_peer_t *peer; unsigned long flags; int rc; + int type; - rc = ksocknal_connect_peer (route); - if (rc == 0) { + for (;;) { + for (type = 0; type < SOCKNAL_CONN_NTYPES; type++) + if ((route->ksnr_connecting & (1 << type)) != 0) + break; + LASSERT (type < SOCKNAL_CONN_NTYPES); + + rc = ksocknal_connect_peer (route, type); + + if (rc != 0) + break; + /* successfully autoconnected: create_conn did the - * route/conn binding and scheduled any blocked packets, - * so there's nothing left to do now. */ - return; + * route/conn binding and scheduled any blocked packets */ + + if (route->ksnr_connecting == 0) { + /* No more connections required */ + return; + } } + /* Connection attempt failed */ + write_lock_irqsave (&ksocknal_data.ksnd_global_lock, flags); peer = route->ksnr_peer; route->ksnr_connecting = 0; + /* This is a retry rather than a new connection */ LASSERT (route->ksnr_retry_interval != 0); route->ksnr_timeout = jiffies + route->ksnr_retry_interval; route->ksnr_retry_interval = MIN (route->ksnr_retry_interval * 2, @@ -2268,6 +2365,12 @@ ksocknal_autoconnect (ksock_route_t *route) } while (!list_empty (&peer->ksnp_tx_queue)); } + /* make this route least-favourite for re-selection */ + if (!route->ksnr_deleted) { + list_del(&route->ksnr_list); + list_add_tail(&route->ksnr_list, &peer->ksnp_routes); + } + write_unlock_irqrestore (&ksocknal_data.ksnd_global_lock, flags); while (!list_empty (&zombies)) { @@ -2275,7 +2378,7 @@ ksocknal_autoconnect (ksock_route_t *route) CERROR ("Deleting packet type %d len %d ("LPX64"->"LPX64")\n", NTOH__u32 (tx->tx_hdr->type), - NTOH__u32 (PTL_HDR_LENGTH(tx->tx_hdr)), + NTOH__u32 (tx->tx_hdr->payload_length), NTOH__u64 (tx->tx_hdr->src_nid), NTOH__u64 (tx->tx_hdr->dest_nid)); @@ -2393,13 +2496,11 @@ ksocknal_check_peer_timeouts (int idx) if (conn != NULL) { read_unlock (&ksocknal_data.ksnd_global_lock); - if (ksocknal_close_conn_unlocked (conn, -ETIMEDOUT)) { - /* I actually closed... */ - CERROR ("Timeout out conn->"LPX64" ip %x:%d\n", - peer->ksnp_nid, conn->ksnc_ipaddr, - conn->ksnc_port); - } - + CERROR ("Timeout out conn->"LPX64" ip %x:%d\n", + peer->ksnp_nid, conn->ksnc_ipaddr, + conn->ksnc_port); + ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); + /* NB we won't find this one again, but we can't * just proceed with the next peer, since we dropped * ksnd_global_lock and it might be dead already! */ diff --git a/lustre/portals/knals/toenal/toenal_cb.c b/lustre/portals/knals/toenal/toenal_cb.c index 983fa71..37e3f1e 100644 --- a/lustre/portals/knals/toenal/toenal_cb.c +++ b/lustre/portals/knals/toenal/toenal_cb.c @@ -708,26 +708,7 @@ ktoenal_fwd_parse (ksock_conn_t *conn) LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_HEADER); LASSERT (conn->ksnc_rx_scheduled); - switch (conn->ksnc_hdr.type) - { - case PTL_MSG_GET: - case PTL_MSG_ACK: - body_len = 0; - break; - case PTL_MSG_PUT: - body_len = conn->ksnc_hdr.msg.put.length; - break; - case PTL_MSG_REPLY: - body_len = conn->ksnc_hdr.msg.reply.length; - break; - default: - /* Unrecognised packet type */ - CERROR ("Unrecognised packet type %d from "LPX64" for "LPX64"\n", - conn->ksnc_hdr.type, conn->ksnc_hdr.src_nid, conn->ksnc_hdr.dest_nid); - /* Ignore this header and go back to reading a new packet. */ - ktoenal_new_packet (conn, 0); - return; - } + body_len = conn->ksnc_hdr.payload_length; if (body_len < 0) /* length corrupt */ { diff --git a/lustre/portals/libcfs/debug.c b/lustre/portals/libcfs/debug.c index a42d422..092baae 100644 --- a/lustre/portals/libcfs/debug.c +++ b/lustre/portals/libcfs/debug.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -920,8 +919,116 @@ void portals_run_lbug_upcall(char *file, const char *fn, const int line) portals_run_upcall (argv); } +char *portals_nid2str(int nal, ptl_nid_t nid, char *str) +{ + switch(nal){ + case TCPNAL: + /* userspace NAL */ + case SOCKNAL: + sprintf(str, "%u:%d.%d.%d.%d", (__u32)(nid >> 32), + HIPQUAD(nid)); + break; + case QSWNAL: + case GMNAL: + case IBNAL: + case TOENAL: + case SCIMACNAL: + sprintf(str, "%u:%u", (__u32)(nid >> 32), (__u32)nid); + break; + default: + return NULL; + } + return str; +} + +char stack_backtrace[LUSTRE_TRACE_SIZE]; +spinlock_t stack_backtrace_lock = SPIN_LOCK_UNLOCKED; + +#if defined(__arch_um__) +# warning in arch_um + +extern int is_kernel_text_address(unsigned long addr); + +char *portals_debug_dumpstack(void) +{ + int size; + unsigned long addr; + char *buf = stack_backtrace; + char *pbuf = buf; + unsigned long *stack = (unsigned long *)&buf; + + size = sprintf(pbuf, " Call Trace: "); + pbuf += size; + while (((long) stack & (THREAD_SIZE-1)) != 0) { + addr = *stack++; + if (is_kernel_text_address(addr)) { + size = sprintf(pbuf, "[<%08lx>] ", addr); + pbuf += size; + if (buf + LUSTRE_TRACE_SIZE <= pbuf + 12) + break; + } + } + + return buf; +} + +#elif defined(CONFIG_X86) +# warning in __i386__ + +extern int is_kernel_text_address(unsigned long addr); +extern int lookup_symbol(unsigned long address, char *buf, int buflen); + +char *portals_debug_dumpstack(void) +{ + unsigned long esp = current->thread.esp; + unsigned long *stack = (unsigned long *)&esp; + int size; + unsigned long addr; + char *buf = stack_backtrace; + char *pbuf = buf; + static char buffer[512]; + + /* User space on another CPU? */ + if ((esp ^ (unsigned long)current) & (PAGE_MASK<<1)){ + memset(buf, 0x0, LUSTRE_TRACE_SIZE); + goto out; + } + + size = sprintf(pbuf, " Call Trace: "); + pbuf += size; + while (((long) stack & (THREAD_SIZE-1)) != 0) { + addr = *stack++; + if (is_kernel_text_address(addr)) { + lookup_symbol(addr, buffer, 512); + if (buf + LUSTRE_TRACE_SIZE + /* fix length + sizeof('\0') */ + <= pbuf + strlen(buffer) + 28 + 1) + break; + size = sprintf(pbuf, "([<%08lx>] %s (0x%x)) ", + addr, buffer, stack-1); + pbuf += size; + } + } +out: + return buf; +} + +#else /* !__arch_um__ && !__i386__ */ + +char *portals_debug_dumpstack(void) +{ + char *buf = stack_backtrace; + memset(buf, 0x0, LUSTRE_TRACE_SIZE); + return buf; +} + +#endif /* __arch_um__ */ + EXPORT_SYMBOL(portals_debug_dumplog); EXPORT_SYMBOL(portals_debug_msg); EXPORT_SYMBOL(portals_debug_set_level); EXPORT_SYMBOL(portals_run_upcall); EXPORT_SYMBOL(portals_run_lbug_upcall); +EXPORT_SYMBOL(portals_nid2str); +EXPORT_SYMBOL(portals_debug_dumpstack); +EXPORT_SYMBOL(stack_backtrace_lock); diff --git a/lustre/portals/libcfs/module.c b/lustre/portals/libcfs/module.c index c51a506..a15ce6a 100644 --- a/lustre/portals/libcfs/module.c +++ b/lustre/portals/libcfs/module.c @@ -210,6 +210,84 @@ kportal_get_route(int index, __u32 *gateway_nalidp, ptl_nid_t *gateway_nidp, return (rc); } +static int +kportal_router_cmd(struct portals_cfg *pcfg, void * private) +{ + int err; + ENTRY; + + switch(pcfg->pcfg_command) { + case IOC_PORTAL_ADD_ROUTE: + CDEBUG(D_IOCTL, "Adding route: [%d] "LPU64" : "LPU64" - "LPU64"\n", + pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_nid2, pcfg->pcfg_nid3); + err = kportal_add_route(pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_nid2, pcfg->pcfg_nid3); + break; + + case IOC_PORTAL_DEL_ROUTE: + CDEBUG (D_IOCTL, "Removing routes via [%d] "LPU64" : "LPU64" - "LPU64"\n", + pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_nid2, pcfg->pcfg_nid3); + err = kportal_del_route (pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_nid2, pcfg->pcfg_nid3); + break; + + case IOC_PORTAL_NOTIFY_ROUTER: { + CDEBUG (D_IOCTL, "Notifying peer [%d] "LPU64" %s @ %ld\n", + pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_flags ? "Enabling" : "Disabling", + (time_t)pcfg->pcfg_nid3); + + err = kportal_notify_router (pcfg->pcfg_nal, pcfg->pcfg_nid, + pcfg->pcfg_flags, + (time_t)pcfg->pcfg_nid3); + break; + } + + case IOC_PORTAL_GET_ROUTE: + CDEBUG (D_IOCTL, "Getting route [%d]\n", pcfg->pcfg_count); + err = kportal_get_route(pcfg->pcfg_count, &pcfg->pcfg_nal, + &pcfg->pcfg_nid, + &pcfg->pcfg_nid2, &pcfg->pcfg_nid3, + &pcfg->pcfg_flags); + break; + } + RETURN(err); +} + +static int +kportal_register_router (void) +{ + int rc; + kpr_control_interface_t *ci; + + ci = (kpr_control_interface_t *)PORTAL_SYMBOL_GET(kpr_control_interface); + if (ci == NULL) + return (0); + + rc = kportal_nal_register(ROUTER, kportal_router_cmd, NULL); + + PORTAL_SYMBOL_PUT(kpr_control_interface); + return (rc); +} + +static int +kportal_unregister_router (void) +{ + int rc; + kpr_control_interface_t *ci; + + ci = (kpr_control_interface_t *)PORTAL_SYMBOL_GET(kpr_control_interface); + if (ci == NULL) + return (0); + + rc = kportal_nal_unregister(ROUTER); + + PORTAL_SYMBOL_PUT(kpr_control_interface); + return (rc); +} + int kportal_nal_cmd(struct portals_cfg *pcfg) { @@ -242,6 +320,8 @@ kportal_get_ni (int nal) return (PORTAL_SYMBOL_GET(ktoenal_ni)); case GMNAL: return (PORTAL_SYMBOL_GET(kgmnal_ni)); + case IBNAL: + return (PORTAL_SYMBOL_GET(kibnal_ni)); case TCPNAL: /* userspace NAL */ return (NULL); @@ -272,6 +352,9 @@ kportal_put_ni (int nal) case GMNAL: PORTAL_SYMBOL_PUT(kgmnal_ni); break; + case IBNAL: + PORTAL_SYMBOL_PUT(kibnal_ni); + break; case TCPNAL: /* A lesson to a malicious caller */ LBUG (); @@ -326,6 +409,7 @@ static int kportal_ioctl(struct inode *inode, struct file *file, int err = 0; char buf[1024]; struct portal_ioctl_data *data; + char str[PTL_NALFMT_SIZE]; ENTRY; @@ -379,8 +463,9 @@ static int kportal_ioctl(struct inode *inode, struct file *file, case IOC_PORTAL_PING: { void (*ping)(struct portal_ioctl_data *); - CDEBUG(D_IOCTL, "doing %d pings to nid "LPU64"\n", - data->ioc_count, data->ioc_nid); + CDEBUG(D_IOCTL, "doing %d pings to nid "LPX64" (%s)\n", + data->ioc_count, data->ioc_nid, + portals_nid2str(data->ioc_nal, data->ioc_nid, str)); ping = PORTAL_SYMBOL_GET(kping_client); if (!ping) CERROR("PORTAL_SYMBOL_GET failed\n"); @@ -391,50 +476,11 @@ static int kportal_ioctl(struct inode *inode, struct file *file, RETURN(0); } - case IOC_PORTAL_ADD_ROUTE: - CDEBUG(D_IOCTL, "Adding route: [%d] "LPU64" : "LPU64" - "LPU64"\n", - data->ioc_nal, data->ioc_nid, - data->ioc_nid2, data->ioc_nid3); - err = kportal_add_route(data->ioc_nal, data->ioc_nid, - data->ioc_nid2, data->ioc_nid3); - break; - - case IOC_PORTAL_DEL_ROUTE: - CDEBUG (D_IOCTL, "Removing routes via [%d] "LPU64" : "LPU64" - "LPU64"\n", - data->ioc_nal, data->ioc_nid, - data->ioc_nid2, data->ioc_nid3); - err = kportal_del_route (data->ioc_nal, data->ioc_nid, - data->ioc_nid2, data->ioc_nid3); - break; - - case IOC_PORTAL_NOTIFY_ROUTER: { - CDEBUG (D_IOCTL, "Notifying peer [%d] "LPU64" %s @ %ld\n", - data->ioc_nal, data->ioc_nid, - data->ioc_flags ? "Enabling" : "Disabling", - (time_t)data->ioc_nid3); - - err = kportal_notify_router (data->ioc_nal, data->ioc_nid, - data->ioc_flags, - (time_t)data->ioc_nid3); - break; - } - - case IOC_PORTAL_GET_ROUTE: - CDEBUG (D_IOCTL, "Getting route [%d]\n", data->ioc_count); - err = kportal_get_route(data->ioc_count, &data->ioc_nal, - &data->ioc_nid, - &data->ioc_nid2, &data->ioc_nid3, - &data->ioc_flags); - if (err == 0) - if (copy_to_user((char *)arg, data, sizeof (*data))) - err = -EFAULT; - break; - case IOC_PORTAL_GET_NID: { const ptl_handle_ni_t *nip; ptl_process_id_t pid; - CDEBUG (D_IOCTL, "Getting nid [%d]\n", data->ioc_nal); + CDEBUG (D_IOCTL, "Getting nid for nal [%d]\n", data->ioc_nal); nip = kportal_get_ni (data->ioc_nal); if (nip == NULL) @@ -573,9 +619,17 @@ static int init_kportals_module(void) goto cleanup_fini; } + rc = kportal_register_router(); + if (rc) { + CERROR("kportals_register_router: error %d\n", rc); + goto cleanup_proc; + } + CDEBUG (D_OTHER, "portals setup OK\n"); return (0); + cleanup_proc: + remove_proc(); cleanup_fini: PtlFini(); cleanup_deregister: @@ -593,6 +647,7 @@ static void exit_kportals_module(void) { int rc; + kportal_unregister_router(); remove_proc(); PtlFini(); diff --git a/lustre/portals/portals/api-init.c b/lustre/portals/portals/api-init.c index f77a439..020a2a9 100644 --- a/lustre/portals/portals/api-init.c +++ b/lustre/portals/portals/api-init.c @@ -26,7 +26,7 @@ #include int ptl_init; -unsigned int portal_subsystem_debug = ~0 - (S_PORTALS | S_QSWNAL | S_SOCKNAL | S_GMNAL); +unsigned int portal_subsystem_debug = ~0 - (S_PORTALS | S_QSWNAL | S_SOCKNAL | S_GMNAL | S_IBNAL); unsigned int portal_debug = ~0; unsigned int portal_cerror = 1; unsigned int portal_printk; diff --git a/lustre/portals/portals/lib-init.c b/lustre/portals/portals/lib-init.c index 99c4d32..ab223d6 100644 --- a/lustre/portals/portals/lib-init.c +++ b/lustre/portals/portals/lib-init.c @@ -127,6 +127,8 @@ kportal_descriptor_setup (nal_cb_t *nal) void kportal_descriptor_cleanup (nal_cb_t *nal) { + int rc; + if (--ptl_slab_users != 0) return; @@ -135,14 +137,26 @@ kportal_descriptor_cleanup (nal_cb_t *nal) LASSERT (atomic_read (&eq_in_use_count) == 0); LASSERT (atomic_read (&msg_in_use_count) == 0); - if (ptl_md_slab != NULL) - kmem_cache_destroy(ptl_md_slab); - if (ptl_msg_slab != NULL) - kmem_cache_destroy(ptl_msg_slab); - if (ptl_me_slab != NULL) - kmem_cache_destroy(ptl_me_slab); - if (ptl_eq_slab != NULL) - kmem_cache_destroy(ptl_eq_slab); + if (ptl_md_slab != NULL) { + rc = kmem_cache_destroy(ptl_md_slab); + if (rc != 0) + CERROR("unable to free MD slab\n"); + } + if (ptl_msg_slab != NULL) { + rc = kmem_cache_destroy(ptl_msg_slab); + if (rc != 0) + CERROR("unable to free MSG slab\n"); + } + if (ptl_me_slab != NULL) { + rc = kmem_cache_destroy(ptl_me_slab); + if (rc != 0) + CERROR("unable to free ME slab\n"); + } + if (ptl_eq_slab != NULL) { + rc = kmem_cache_destroy(ptl_eq_slab); + if (rc != 0) + CERROR("unable to free EQ slab\n"); + } } #else diff --git a/lustre/portals/portals/lib-move.c b/lustre/portals/portals/lib-move.c index e73cbb8..6e904ba 100644 --- a/lustre/portals/portals/lib-move.c +++ b/lustre/portals/portals/lib-move.c @@ -583,7 +583,7 @@ static int parse_put(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) me = lib_find_me(nal, hdr->msg.put.ptl_index, PTL_MD_OP_PUT, hdr->src_nid, hdr->src_pid, - PTL_HDR_LENGTH (hdr), hdr->msg.put.offset, + hdr->payload_length, hdr->msg.put.offset, hdr->msg.put.match_bits, &mlength, &offset, &unlink); if (me == NULL) @@ -592,7 +592,7 @@ static int parse_put(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) md = me->md; CDEBUG(D_NET, "Incoming put index %x from "LPU64"/%u of length %d/%d " "into md "LPX64" [%d] + %d\n", hdr->msg.put.ptl_index, - hdr->src_nid, hdr->src_pid, mlength, PTL_HDR_LENGTH(hdr), + hdr->src_nid, hdr->src_pid, mlength, hdr->payload_length, md->md_lh.lh_cookie, md->md_niov, offset); msg = get_new_msg (nal, md); @@ -617,7 +617,7 @@ static int parse_put(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) msg->ev.initiator.pid = hdr->src_pid; msg->ev.portal = hdr->msg.put.ptl_index; msg->ev.match_bits = hdr->msg.put.match_bits; - msg->ev.rlength = PTL_HDR_LENGTH(hdr); + msg->ev.rlength = hdr->payload_length; msg->ev.mlength = mlength; msg->ev.offset = offset; msg->ev.hdr_data = hdr->msg.put.hdr_data; @@ -646,14 +646,14 @@ static int parse_put(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) state_unlock(nal, &flags); - lib_recv (nal, private, msg, md, offset, mlength, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, msg, md, offset, mlength, hdr->payload_length); return 0; drop: nal->ni.counters.drop_count++; - nal->ni.counters.drop_length += PTL_HDR_LENGTH(hdr); + nal->ni.counters.drop_length += hdr->payload_length; state_unlock (nal, &flags); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return -1; } @@ -676,11 +676,6 @@ static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) hdr->msg.get.sink_length = NTOH__u32 (hdr->msg.get.sink_length); hdr->msg.get.src_offset = NTOH__u32 (hdr->msg.get.src_offset); - /* compatibility check until field is deleted */ - if (hdr->msg.get.return_offset != 0) - CERROR("Unexpected non-zero get.return_offset %x from " - LPU64"\n", hdr->msg.get.return_offset, hdr->src_nid); - state_lock(nal, &flags); me = lib_find_me(nal, hdr->msg.get.ptl_index, PTL_MD_OP_GET, @@ -694,7 +689,7 @@ static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) md = me->md; CDEBUG(D_NET, "Incoming get index %d from "LPU64".%u of length %d/%d " "from md "LPX64" [%d] + %d\n", hdr->msg.get.ptl_index, - hdr->src_nid, hdr->src_pid, mlength, PTL_HDR_LENGTH(hdr), + hdr->src_nid, hdr->src_pid, mlength, hdr->payload_length, md->md_lh.lh_cookie, md->md_niov, offset); msg = get_new_msg (nal, md); @@ -710,7 +705,7 @@ static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) msg->ev.initiator.pid = hdr->src_pid; msg->ev.portal = hdr->msg.get.ptl_index; msg->ev.match_bits = hdr->msg.get.match_bits; - msg->ev.rlength = PTL_HDR_LENGTH(hdr); + msg->ev.rlength = hdr->payload_length; msg->ev.mlength = mlength; msg->ev.offset = offset; msg->ev.hdr_data = 0; @@ -745,7 +740,7 @@ static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) reply.src_nid = HTON__u64 (ni->nid); reply.dest_pid = HTON__u32 (hdr->src_pid); reply.src_pid = HTON__u32 (ni->pid); - PTL_HDR_LENGTH(&reply) = HTON__u32 (mlength); + reply.payload_length = HTON__u32 (mlength); reply.msg.reply.dst_wmd = hdr->msg.get.return_wmd; @@ -763,13 +758,13 @@ static int parse_get(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) } /* Complete the incoming message */ - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return (rc); drop: ni->counters.drop_count++; ni->counters.drop_length += hdr->msg.get.sink_length; state_unlock(nal, &flags); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return -1; } @@ -782,11 +777,6 @@ static int parse_reply(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) lib_msg_t *msg; unsigned long flags; - /* compatibility check until field is deleted */ - if (hdr->msg.reply.dst_offset != 0) - CERROR("Unexpected non-zero reply.dst_offset %x from "LPU64"\n", - hdr->msg.reply.dst_offset, hdr->src_nid); - state_lock(nal, &flags); /* NB handles only looked up by creator (no flips) */ @@ -802,7 +792,7 @@ static int parse_reply(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) LASSERT (md->offset == 0); - length = rlength = PTL_HDR_LENGTH(hdr); + length = rlength = hdr->payload_length; if (length > md->length) { if ((md->options & PTL_MD_TRUNCATE) == 0) { @@ -848,9 +838,9 @@ static int parse_reply(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) drop: nal->ni.counters.drop_count++; - nal->ni.counters.drop_length += PTL_HDR_LENGTH(hdr); + nal->ni.counters.drop_length += hdr->payload_length; state_unlock (nal, &flags); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return -1; } @@ -901,13 +891,13 @@ static int parse_ack(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) ni->counters.recv_count++; state_unlock(nal, &flags); - lib_recv (nal, private, msg, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, msg, NULL, 0, 0, hdr->payload_length); return 0; drop: nal->ni.counters.drop_count++; state_unlock (nal, &flags); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return -1; } @@ -954,7 +944,7 @@ void print_hdr(nal_cb_t * nal, ptl_hdr_t * hdr) hdr->msg.put.match_bits); nal->cb_printf(nal, " Length %d, offset %d, hdr data "LPX64"\n", - PTL_HDR_LENGTH(hdr), hdr->msg.put.offset, + hdr->payload_length, hdr->msg.put.offset, hdr->msg.put.hdr_data); break; @@ -984,7 +974,7 @@ void print_hdr(nal_cb_t * nal, ptl_hdr_t * hdr) "length %d\n", hdr->msg.reply.dst_wmd.wh_interface_cookie, hdr->msg.reply.dst_wmd.wh_object_cookie, - PTL_HDR_LENGTH(hdr)); + hdr->payload_length); } } /* end of print_hdr() */ @@ -994,21 +984,13 @@ int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) { unsigned long flags; - /* NB static check; optimizer will elide this if it's right */ - LASSERT (offsetof (ptl_hdr_t, msg.ack.length) == - offsetof (ptl_hdr_t, msg.put.length)); - LASSERT (offsetof (ptl_hdr_t, msg.ack.length) == - offsetof (ptl_hdr_t, msg.get.length)); - LASSERT (offsetof (ptl_hdr_t, msg.ack.length) == - offsetof (ptl_hdr_t, msg.reply.length)); - /* convert common fields to host byte order */ hdr->dest_nid = NTOH__u64 (hdr->dest_nid); hdr->src_nid = NTOH__u64 (hdr->src_nid); hdr->dest_pid = NTOH__u32 (hdr->dest_pid); hdr->src_pid = NTOH__u32 (hdr->src_pid); hdr->type = NTOH__u32 (hdr->type); - PTL_HDR_LENGTH(hdr) = NTOH__u32 (PTL_HDR_LENGTH(hdr)); + hdr->payload_length = NTOH__u32(hdr->payload_length); #if 0 nal->cb_printf(nal, "%d: lib_parse: nal=%p hdr=%p type=%d\n", nal->ni.nid, nal, hdr, hdr->type); @@ -1023,7 +1005,7 @@ int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) nal->ni.nid, mv->magic, mv->version_major, mv->version_minor, hdr->src_nid); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return (-1); } @@ -1034,10 +1016,10 @@ int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) state_lock (nal, &flags); nal->ni.counters.drop_count++; - nal->ni.counters.drop_length += PTL_HDR_LENGTH(hdr); + nal->ni.counters.drop_length += hdr->payload_length; state_unlock (nal, &flags); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return (-1); } @@ -1048,7 +1030,7 @@ int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) ": simulated failure\n", nal->ni.nid, hdr_type_string (hdr), hdr->src_nid); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return (-1); } @@ -1069,7 +1051,7 @@ int lib_parse(nal_cb_t * nal, ptl_hdr_t * hdr, void *private) ": Bad type=0x%x\n", nal->ni.nid, hdr->src_nid, hdr->type); - lib_recv (nal, private, NULL, NULL, 0, 0, PTL_HDR_LENGTH (hdr)); + lib_recv (nal, private, NULL, NULL, 0, 0, hdr->payload_length); return (-1); } } @@ -1126,7 +1108,7 @@ int do_PtlPut(nal_cb_t * nal, void *private, void *v_args, void *v_ret) hdr.src_nid = HTON__u64 (ni->nid); hdr.dest_pid = HTON__u32 (id->pid); hdr.src_pid = HTON__u32 (ni->pid); - PTL_HDR_LENGTH(&hdr) = HTON__u32 (md->length); + hdr.payload_length = HTON__u32 (md->length); /* NB handles only looked up by creator (no flips) */ if (args->ack_req_in == PTL_ACK_REQ) { @@ -1303,7 +1285,7 @@ int do_PtlGet(nal_cb_t * nal, void *private, void *v_args, void *v_ret) hdr.src_nid = HTON__u64 (ni->nid); hdr.dest_pid = HTON__u32 (id->pid); hdr.src_pid = HTON__u32 (ni->pid); - PTL_HDR_LENGTH(&hdr) = 0; + hdr.payload_length = 0; /* NB handles only looked up by creator (no flips) */ hdr.msg.get.return_wmd.wh_interface_cookie = ni->ni_interface_cookie; @@ -1364,12 +1346,15 @@ int do_PtlGet(nal_cb_t * nal, void *private, void *v_args, void *v_ret) void lib_assert_wire_constants (void) { - /* Wire protocol assertions generated by 'wirecheck' */ + /* Wire protocol assertions generated by 'wirecheck' + * running on Linux robert.bartonsoftware.com 2.4.20-18.9 #1 Thu May 29 06:54:41 EDT 2003 i68 + * with gcc version 3.2.2 20030222 (Red Hat Linux 3.2.2-5) */ + /* Constants... */ LASSERT (PORTALS_PROTO_MAGIC == 0xeebc0ded); LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0); - LASSERT (PORTALS_PROTO_VERSION_MINOR == 1); + LASSERT (PORTALS_PROTO_VERSION_MINOR == 3); LASSERT (PTL_MSG_ACK == 0); LASSERT (PTL_MSG_PUT == 1); LASSERT (PTL_MSG_GET == 2); @@ -1377,79 +1362,77 @@ void lib_assert_wire_constants (void) LASSERT (PTL_MSG_HELLO == 4); /* Checks for struct ptl_handle_wire_t */ - LASSERT (sizeof (ptl_handle_wire_t) == 16); - LASSERT (offsetof (ptl_handle_wire_t, wh_interface_cookie) == 0); - LASSERT (sizeof (((ptl_handle_wire_t *)0)->wh_interface_cookie) == 8); - LASSERT (offsetof (ptl_handle_wire_t, wh_object_cookie) == 8); - LASSERT (sizeof (((ptl_handle_wire_t *)0)->wh_object_cookie) == 8); + LASSERT ((int)sizeof(ptl_handle_wire_t) == 16); + LASSERT (offsetof(ptl_handle_wire_t, wh_interface_cookie) == 0); + LASSERT ((int)sizeof(((ptl_handle_wire_t *)0)->wh_interface_cookie) == 8); + LASSERT (offsetof(ptl_handle_wire_t, wh_object_cookie) == 8); + LASSERT ((int)sizeof(((ptl_handle_wire_t *)0)->wh_object_cookie) == 8); /* Checks for struct ptl_magicversion_t */ - LASSERT (sizeof (ptl_magicversion_t) == 8); - LASSERT (offsetof (ptl_magicversion_t, magic) == 0); - LASSERT (sizeof (((ptl_magicversion_t *)0)->magic) == 4); - LASSERT (offsetof (ptl_magicversion_t, version_major) == 4); - LASSERT (sizeof (((ptl_magicversion_t *)0)->version_major) == 2); - LASSERT (offsetof (ptl_magicversion_t, version_minor) == 6); - LASSERT (sizeof (((ptl_magicversion_t *)0)->version_minor) == 2); + LASSERT ((int)sizeof(ptl_magicversion_t) == 8); + LASSERT (offsetof(ptl_magicversion_t, magic) == 0); + LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->magic) == 4); + LASSERT (offsetof(ptl_magicversion_t, version_major) == 4); + LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->version_major) == 2); + LASSERT (offsetof(ptl_magicversion_t, version_minor) == 6); + LASSERT ((int)sizeof(((ptl_magicversion_t *)0)->version_minor) == 2); /* Checks for struct ptl_hdr_t */ - LASSERT (sizeof (ptl_hdr_t) == 72); - LASSERT (offsetof (ptl_hdr_t, dest_nid) == 0); - LASSERT (sizeof (((ptl_hdr_t *)0)->dest_nid) == 8); - LASSERT (offsetof (ptl_hdr_t, src_nid) == 8); - LASSERT (sizeof (((ptl_hdr_t *)0)->src_nid) == 8); - LASSERT (offsetof (ptl_hdr_t, dest_pid) == 16); - LASSERT (sizeof (((ptl_hdr_t *)0)->dest_pid) == 4); - LASSERT (offsetof (ptl_hdr_t, src_pid) == 20); - LASSERT (sizeof (((ptl_hdr_t *)0)->src_pid) == 4); - LASSERT (offsetof (ptl_hdr_t, type) == 24); - LASSERT (sizeof (((ptl_hdr_t *)0)->type) == 4); + LASSERT ((int)sizeof(ptl_hdr_t) == 72); + LASSERT (offsetof(ptl_hdr_t, dest_nid) == 0); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->dest_nid) == 8); + LASSERT (offsetof(ptl_hdr_t, src_nid) == 8); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->src_nid) == 8); + LASSERT (offsetof(ptl_hdr_t, dest_pid) == 16); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->dest_pid) == 4); + LASSERT (offsetof(ptl_hdr_t, src_pid) == 20); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->src_pid) == 4); + LASSERT (offsetof(ptl_hdr_t, type) == 24); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->type) == 4); + LASSERT (offsetof(ptl_hdr_t, payload_length) == 28); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->payload_length) == 4); + LASSERT (offsetof(ptl_hdr_t, msg) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg) == 40); /* Ack */ - LASSERT (offsetof (ptl_hdr_t, msg.ack.mlength) == 28); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.mlength) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.ack.dst_wmd) == 32); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.dst_wmd) == 16); - LASSERT (offsetof (ptl_hdr_t, msg.ack.match_bits) == 48); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.match_bits) == 8); - LASSERT (offsetof (ptl_hdr_t, msg.ack.length) == 56); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.ack.length) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.ack.dst_wmd) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.dst_wmd) == 16); + LASSERT (offsetof(ptl_hdr_t, msg.ack.match_bits) == 48); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.match_bits) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.ack.mlength) == 56); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.ack.mlength) == 4); /* Put */ - LASSERT (offsetof (ptl_hdr_t, msg.put.ptl_index) == 28); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.ptl_index) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.put.ack_wmd) == 32); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.ack_wmd) == 16); - LASSERT (offsetof (ptl_hdr_t, msg.put.match_bits) == 48); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.match_bits) == 8); - LASSERT (offsetof (ptl_hdr_t, msg.put.length) == 56); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.length) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.put.offset) == 60); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.offset) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.put.hdr_data) == 64); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.put.hdr_data) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.put.ack_wmd) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.ack_wmd) == 16); + LASSERT (offsetof(ptl_hdr_t, msg.put.match_bits) == 48); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.match_bits) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.put.hdr_data) == 56); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.hdr_data) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.put.ptl_index) == 64); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.ptl_index) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.put.offset) == 68); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.put.offset) == 4); /* Get */ - LASSERT (offsetof (ptl_hdr_t, msg.get.ptl_index) == 28); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.ptl_index) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.get.return_wmd) == 32); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.return_wmd) == 16); - LASSERT (offsetof (ptl_hdr_t, msg.get.match_bits) == 48); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.match_bits) == 8); - LASSERT (offsetof (ptl_hdr_t, msg.get.length) == 56); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.length) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.get.src_offset) == 60); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.src_offset) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.get.return_offset) == 64); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.return_offset) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.get.sink_length) == 68); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.get.sink_length) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.get.return_wmd) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.return_wmd) == 16); + LASSERT (offsetof(ptl_hdr_t, msg.get.match_bits) == 48); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.match_bits) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.get.ptl_index) == 56); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.ptl_index) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.get.src_offset) == 60); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.src_offset) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.get.sink_length) == 64); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.get.sink_length) == 4); /* Reply */ - LASSERT (offsetof (ptl_hdr_t, msg.reply.dst_wmd) == 32); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.reply.dst_wmd) == 16); - LASSERT (offsetof (ptl_hdr_t, msg.reply.dst_offset) == 48); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.reply.dst_offset) == 4); - LASSERT (offsetof (ptl_hdr_t, msg.reply.length) == 56); - LASSERT (sizeof (((ptl_hdr_t *)0)->msg.reply.length) == 4); + LASSERT (offsetof(ptl_hdr_t, msg.reply.dst_wmd) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.reply.dst_wmd) == 16); + + /* Hello */ + LASSERT (offsetof(ptl_hdr_t, msg.hello.incarnation) == 32); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.hello.incarnation) == 8); + LASSERT (offsetof(ptl_hdr_t, msg.hello.type) == 40); + LASSERT ((int)sizeof(((ptl_hdr_t *)0)->msg.hello.type) == 4); } diff --git a/lustre/portals/portals/lib-msg.c b/lustre/portals/portals/lib-msg.c index 9363251..9840ff5 100644 --- a/lustre/portals/portals/lib-msg.c +++ b/lustre/portals/portals/lib-msg.c @@ -59,7 +59,7 @@ int lib_finalize(nal_cb_t * nal, void *private, lib_msg_t *msg) ack.src_nid = HTON__u64 (nal->ni.nid); ack.dest_pid = HTON__u32 (msg->pid); ack.src_pid = HTON__u32 (nal->ni.pid); - PTL_HDR_LENGTH(&ack) = 0; + ack.payload_length = 0; ack.msg.ack.dst_wmd = msg->ack_wmd; ack.msg.ack.match_bits = msg->ev.match_bits; diff --git a/lustre/portals/router/router.c b/lustre/portals/router/router.c index b5bab2c..e29f628 100644 --- a/lustre/portals/router/router.c +++ b/lustre/portals/router/router.c @@ -126,12 +126,16 @@ kpr_do_upcall (void *arg) void kpr_upcall (int gw_nalid, ptl_nid_t gw_nid, int alive, time_t when) { + char str[PTL_NALFMT_SIZE]; + /* May be in arbitrary context */ kpr_upcall_t *u = kmalloc (sizeof (kpr_upcall_t), GFP_ATOMIC); if (u == NULL) { - CERROR ("Upcall out of memory: nal %d nid "LPX64" %s\n", - gw_nalid, gw_nid, alive ? "up" : "down"); + CERROR ("Upcall out of memory: nal %d nid "LPX64" (%s) %s\n", + gw_nalid, gw_nid, + portals_nid2str(gw_nalid, gw_nid, str), + alive ? "up" : "down"); return; } @@ -155,6 +159,7 @@ kpr_do_notify (int byNal, int gateway_nalid, ptl_nid_t gateway_nid, struct timeval now; struct list_head *e; struct list_head *n; + char str[PTL_NALFMT_SIZE]; CDEBUG (D_NET, "%s notifying [%d] "LPX64": %s\n", byNal ? "NAL" : "userspace", @@ -253,8 +258,9 @@ kpr_do_notify (int byNal, int gateway_nalid, ptl_nid_t gateway_nid, if (byNal) { /* It wasn't userland that notified me... */ - CWARN ("Upcall: NAL %d NID "LPX64" is %s\n", + CWARN ("Upcall: NAL %d NID "LPX64" (%s) is %s\n", gateway_nalid, gateway_nid, + portals_nid2str(gateway_nalid, gateway_nid, str), alive ? "alive" : "dead"); kpr_upcall (gateway_nalid, gateway_nid, alive, when); } else { diff --git a/lustre/portals/tests/ping_cli.c b/lustre/portals/tests/ping_cli.c index 22bdb45..85c0d71 100644 --- a/lustre/portals/tests/ping_cli.c +++ b/lustre/portals/tests/ping_cli.c @@ -111,11 +111,15 @@ pingcli_start(struct portal_ioctl_data *args) unsigned ping_bulk_magic = PING_BULK_MAGIC; int rc; struct timeval tv1, tv2; + char str[PTL_NALFMT_SIZE]; + client->tsk = current; client->args = args; - CDEBUG (D_OTHER, "pingcli_setup args: nid "LPX64", \ + CDEBUG (D_OTHER, "pingcli_setup args: nid "LPX64" (%s), \ nal %d, size %u, count: %u, timeout: %u\n", - args->ioc_nid, args->ioc_nal, args->ioc_size, + args->ioc_nid, + portals_nid2str(args->ioc_nal, args->ioc_nid, str), + args->ioc_nal, args->ioc_size, args->ioc_count, args->ioc_timeout); diff --git a/lustre/portals/tests/sping_cli.c b/lustre/portals/tests/sping_cli.c index c37db4c..64a1dd2 100644 --- a/lustre/portals/tests/sping_cli.c +++ b/lustre/portals/tests/sping_cli.c @@ -104,14 +104,17 @@ pingcli_start(struct portal_ioctl_data *args) { const ptl_handle_ni_t *nip; unsigned ping_head_magic = PING_HEADER_MAGIC; + char str[PTL_NALFMT_SIZE]; int rc; client->tsk = current; client->args = args; - CDEBUG (D_OTHER, "pingcli_setup args: nid "LPX64", \ + CDEBUG (D_OTHER, "pingcli_setup args: nid "LPX64" (%s), \ nal %d, size %u, count: %u, timeout: %u\n", - args->ioc_nid, args->ioc_nal, args->ioc_size, + args->ioc_nid, + portals_nid2str(args->ioc_nid, args->ioc_nal, str), + args->ioc_nal, args->ioc_size, args->ioc_count, args->ioc_timeout); diff --git a/lustre/portals/utils/acceptor.c b/lustre/portals/utils/acceptor.c index 9fb2759..29b8d1e 100644 --- a/lustre/portals/utils/acceptor.c +++ b/lustre/portals/utils/acceptor.c @@ -19,6 +19,7 @@ #include #include #include +#include /* should get this from autoconf somehow */ #ifndef PIDFILE_DIR @@ -100,7 +101,7 @@ parse_size (int *sizep, char *str) } void -show_connection (int fd, __u32 net_ip, ptl_nid_t nid) +show_connection (int fd, __u32 net_ip) { struct hostent *h = gethostbyaddr ((char *)&net_ip, sizeof net_ip, AF_INET); __u32 host_ip = ntohl (net_ip); @@ -128,136 +129,8 @@ show_connection (int fd, __u32 net_ip, ptl_nid_t nid) else snprintf (host, sizeof(host), "%s", h->h_name); - syslog (LOG_INFO, "Accepted host: %s NID: "LPX64" snd: %d rcv %d nagle: %s\n", - host, nid, txmem, rxmem, nonagle ? "disabled" : "enabled"); -} - -int -sock_write (int cfd, void *buffer, int nob) -{ - while (nob > 0) - { - int rc = write (cfd, buffer, nob); - - if (rc < 0) - { - if (errno == EINTR) - continue; - - return (rc); - } - - if (rc == 0) - { - fprintf (stderr, "Unexpected zero sock_write\n"); - abort(); - } - - nob -= rc; - buffer = (char *)buffer + nob; - } - - return (0); -} - -int -sock_read (int cfd, void *buffer, int nob) -{ - while (nob > 0) - { - int rc = read (cfd, buffer, nob); - - if (rc < 0) - { - if (errno == EINTR) - continue; - - return (rc); - } - - if (rc == 0) /* EOF */ - { - errno = ECONNABORTED; - return (-1); - } - - nob -= rc; - buffer = (char *)buffer + nob; - } - - return (0); -} - -int -exchange_nids (int cfd, ptl_nid_t my_nid, ptl_nid_t *peer_nid) -{ - int rc; - ptl_hdr_t hdr; - ptl_magicversion_t *hmv = (ptl_magicversion_t *)&hdr.dest_nid; - - LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid)); - - memset (&hdr, 0, sizeof (hdr)); - - hmv->magic = __cpu_to_le32 (PORTALS_PROTO_MAGIC); - hmv->version_major = __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR); - hmv->version_minor = __cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR); - - hdr.src_nid = __cpu_to_le64 (my_nid); - hdr.type = __cpu_to_le32 (PTL_MSG_HELLO); - - /* Assume there's sufficient socket buffering for a portals HELLO header */ - rc = sock_write (cfd, &hdr, sizeof (hdr)); - if (rc != 0) { - perror ("Can't send initial HELLO"); - return (-1); - } - - /* First few bytes down the wire are the portals protocol magic and - * version, no matter what protocol version we're running. */ - - rc = sock_read (cfd, hmv, sizeof (*hmv)); - if (rc != 0) { - perror ("Can't read from peer"); - return (-1); - } - - if (__cpu_to_le32 (hmv->magic) != PORTALS_PROTO_MAGIC) { - fprintf (stderr, "Bad magic %#08x (%#08x expected)\n", - __cpu_to_le32 (hmv->magic), PORTALS_PROTO_MAGIC); - return (-1); - } - - if (__cpu_to_le16 (hmv->version_major) != PORTALS_PROTO_VERSION_MAJOR || - __cpu_to_le16 (hmv->version_minor) != PORTALS_PROTO_VERSION_MINOR) { - fprintf (stderr, "Incompatible protocol version %d.%d (%d.%d expected)\n", - __cpu_to_le16 (hmv->version_major), - __cpu_to_le16 (hmv->version_minor), - PORTALS_PROTO_VERSION_MAJOR, - PORTALS_PROTO_VERSION_MINOR); - } - - /* version 0 sends magic/version as the dest_nid of a 'hello' header, - * so read the rest of it in now... */ - LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0); - rc = sock_read (cfd, hmv + 1, sizeof (hdr) - sizeof (*hmv)); - if (rc != 0) { - perror ("Can't read rest of HELLO hdr"); - return (-1); - } - - /* ...and check we got what we expected */ - if (__cpu_to_le32 (hdr.type) != PTL_MSG_HELLO || - __cpu_to_le32 (PTL_HDR_LENGTH (&hdr)) != 0) { - fprintf (stderr, "Expecting a HELLO hdr with 0 payload," - " but got type %d with %d payload\n", - __cpu_to_le32 (hdr.type), - __cpu_to_le32 (PTL_HDR_LENGTH (&hdr))); - return (-1); - } - - *peer_nid = __le64_to_cpu (hdr.src_nid); - return (0); + syslog (LOG_INFO, "Accepted host: %s snd: %d rcv %d nagle: %s\n", + host, txmem, rxmem, nonagle ? "disabled" : "enabled"); } void @@ -277,10 +150,9 @@ int main(int argc, char **argv) int noclose = 0; int nonagle = 1; int nal = SOCKNAL; - int xchg_nids = 0; int bind_irq = 0; - while ((c = getopt (argc, argv, "N:r:s:nlxi")) != -1) + while ((c = getopt (argc, argv, "N:r:s:nli")) != -1) switch (c) { case 'r': @@ -301,10 +173,6 @@ int main(int argc, char **argv) noclose = 1; break; - case 'x': - xchg_nids = 1; - break; - case 'i': bind_irq = 1; break; @@ -410,7 +278,6 @@ int main(int argc, char **argv) int cfd; struct portal_ioctl_data data; struct portals_cfg pcfg; - ptl_nid_t peer_nid; cfd = accept(fd, (struct sockaddr *)&clntaddr, &len); if ( cfd < 0 ) { @@ -419,43 +286,20 @@ int main(int argc, char **argv) continue; } - if (!xchg_nids) - peer_nid = ntohl (clntaddr.sin_addr.s_addr); /* HOST byte order */ - else - { - PORTAL_IOC_INIT (data); - data.ioc_nal = nal; - rc = ioctl (pfd, IOC_PORTAL_GET_NID, &data); - if (rc < 0) - { - perror ("Can't get my NID"); - close (cfd); - continue; - } - - rc = exchange_nids (cfd, data.ioc_nid, &peer_nid); - if (rc != 0) - { - close (cfd); - continue; - } - } - - show_connection (cfd, clntaddr.sin_addr.s_addr, peer_nid); + show_connection (cfd, clntaddr.sin_addr.s_addr); PCFG_INIT(pcfg, NAL_CMD_REGISTER_PEER_FD); pcfg.pcfg_nal = nal; pcfg.pcfg_fd = cfd; - pcfg.pcfg_nid = peer_nid; pcfg.pcfg_flags = bind_irq; - + pcfg.pcfg_misc = SOCKNAL_CONN_NONE; /* == incoming connection */ + PORTAL_IOC_INIT(data); data.ioc_pbuf1 = (char*)&pcfg; data.ioc_plen1 = sizeof(pcfg); if (ioctl(pfd, IOC_PORTAL_NAL_CMD, &data) < 0) { perror("ioctl failed"); - } else { printf("client registered\n"); } diff --git a/lustre/portals/utils/portals.c b/lustre/portals/utils/portals.c index 4beac34..147d132 100644 --- a/lustre/portals/utils/portals.c +++ b/lustre/portals/utils/portals.c @@ -55,6 +55,7 @@ #include #include #include +#include #include "parser.h" unsigned int portal_debug; @@ -80,6 +81,7 @@ static name2num_t nalnames[] = { {"toe", TOENAL}, {"elan", QSWNAL}, {"gm", GMNAL}, + {"ib", IBNAL}, {"scimac", SCIMACNAL}, {NULL, -1} }; @@ -453,14 +455,13 @@ jt_ptl_print_autoconnects (int argc, char **argv) if (rc != 0) break; - printf (LPX64"@%s:%d #%d buffer %d nonagle %s xchg %s " - "affinity %s eager %s share %d\n", + printf (LPX64"@%s:%d #%d buffer %d " + "nonagle %s affinity %s eager %s share %d\n", pcfg.pcfg_nid, ptl_ipaddr_2_str (pcfg.pcfg_id, buffer), pcfg.pcfg_misc, pcfg.pcfg_count, pcfg.pcfg_size, (pcfg.pcfg_flags & 1) ? "on" : "off", (pcfg.pcfg_flags & 2) ? "on" : "off", (pcfg.pcfg_flags & 4) ? "on" : "off", - (pcfg.pcfg_flags & 8) ? "on" : "off", pcfg.pcfg_wait); } @@ -476,14 +477,13 @@ jt_ptl_add_autoconnect (int argc, char **argv) ptl_nid_t nid; __u32 ip; int port; - int xchange_nids = 0; int irq_affinity = 0; int share = 0; int eager = 0; int rc; if (argc < 4 || argc > 5) { - fprintf (stderr, "usage: %s nid ipaddr port [ixse]\n", argv[0]); + fprintf (stderr, "usage: %s nid ipaddr port [ise]\n", argv[0]); return 0; } @@ -511,9 +511,6 @@ jt_ptl_add_autoconnect (int argc, char **argv) while (*opts != 0) switch (*opts++) { - case 'x': - xchange_nids = 1; - break; case 'i': irq_affinity = 1; break; @@ -537,10 +534,9 @@ jt_ptl_add_autoconnect (int argc, char **argv) /* only passing one buffer size! */ pcfg.pcfg_size = MAX (g_socket_rxmem, g_socket_txmem); pcfg.pcfg_flags = (g_socket_nonagle ? 0x01 : 0) | - (xchange_nids ? 0x02 : 0) | - (irq_affinity ? 0x04 : 0) | - (share ? 0x08 : 0) | - (eager ? 0x10 : 0); + (irq_affinity ? 0x02 : 0) | + (share ? 0x04 : 0) | + (eager ? 0x08 : 0); rc = pcfg_ioctl (&pcfg); if (rc != 0) { @@ -636,10 +632,14 @@ jt_ptl_print_connections (int argc, char **argv) if (rc != 0) break; - printf (LPX64"@%s:%d\n", + printf (LPX64"@%s:%d:%s\n", pcfg.pcfg_nid, ptl_ipaddr_2_str (pcfg.pcfg_id, buffer), - pcfg.pcfg_misc); + pcfg.pcfg_misc, + (pcfg.pcfg_flags == SOCKNAL_CONN_ANY) ? "A" : + (pcfg.pcfg_flags == SOCKNAL_CONN_CONTROL) ? "C" : + (pcfg.pcfg_flags == SOCKNAL_CONN_BULK_IN) ? "I" : + (pcfg.pcfg_flags == SOCKNAL_CONN_BULK_OUT) ? "O" : "?"); } if (index == 0) @@ -647,82 +647,8 @@ jt_ptl_print_connections (int argc, char **argv) return 0; } -int -exchange_nids (int cfd, ptl_nid_t my_nid, ptl_nid_t *peer_nid) -{ - int rc; - ptl_hdr_t hdr; - ptl_magicversion_t *hmv = (ptl_magicversion_t *)&hdr.dest_nid; - - LASSERT (sizeof (*hmv) == sizeof (hdr.dest_nid)); - - memset (&hdr, 0, sizeof (hdr)); - - hmv->magic = __cpu_to_le32 (PORTALS_PROTO_MAGIC); - hmv->version_major = __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR); - hmv->version_minor = __cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR); - - hdr.src_nid = __cpu_to_le64 (my_nid); - hdr.type = __cpu_to_le32 (PTL_MSG_HELLO); - - /* Assume there's sufficient socket buffering for a portals HELLO header */ - rc = sock_write (cfd, &hdr, sizeof (hdr)); - if (rc != 0) { - perror ("Can't send initial HELLO"); - return (-1); - } - - /* First few bytes down the wire are the portals protocol magic and - * version, no matter what protocol version we're running. */ - - rc = sock_read (cfd, hmv, sizeof (*hmv)); - if (rc != 0) { - perror ("Can't read from peer"); - return (-1); - } - - if (hmv->magic != __cpu_to_le32 (PORTALS_PROTO_MAGIC)) { - fprintf (stderr, "Bad magic %#08x (%#08x expected)\n", - __le32_to_cpu (hmv->magic), PORTALS_PROTO_MAGIC); - return (-1); - } - - if (hmv->version_major != __cpu_to_le16 (PORTALS_PROTO_VERSION_MAJOR) || - hmv->version_minor != __cpu_to_le16 (PORTALS_PROTO_VERSION_MINOR)) { - fprintf (stderr, "Incompatible protocol version %d.%d (%d.%d expected)\n", - __le16_to_cpu (hmv->version_major), - __le16_to_cpu (hmv->version_minor), - PORTALS_PROTO_VERSION_MAJOR, - PORTALS_PROTO_VERSION_MINOR); - } - - /* version 0 sends magic/version as the dest_nid of a 'hello' header, - * so read the rest of it in now... */ - LASSERT (PORTALS_PROTO_VERSION_MAJOR == 0); - rc = sock_read (cfd, hmv + 1, sizeof (hdr) - sizeof (*hmv)); - if (rc != 0) { - perror ("Can't read rest of HELLO hdr"); - return (-1); - } - - /* ...and check we got what we expected */ - if (hdr.type != __cpu_to_le32 (PTL_MSG_HELLO) || - PTL_HDR_LENGTH (&hdr) != __cpu_to_le32 (0)) { - fprintf (stderr, "Expecting a HELLO hdr with 0 payload," - " but got type %d with %d payload\n", - __le32_to_cpu (hdr.type), - __le32_to_cpu (PTL_HDR_LENGTH (&hdr))); - return (-1); - } - - *peer_nid = __le64_to_cpu (hdr.src_nid); - return (0); -} - int jt_ptl_connect(int argc, char **argv) { - ptl_nid_t peer_nid; - struct portal_ioctl_data data; struct portals_cfg pcfg; struct sockaddr_in srvaddr; __u32 ipaddr; @@ -732,13 +658,13 @@ int jt_ptl_connect(int argc, char **argv) int rxmem = 0; int txmem = 0; int bind_irq = 0; - int xchange_nids = 0; + int type = SOCKNAL_CONN_ANY; int port; int o; int olen; if (argc < 3) { - fprintf(stderr, "usage: %s ip port [xi]\n", argv[0]); + fprintf(stderr, "usage: %s ip port [xibctr]\n", argv[0]); return 0; } @@ -764,8 +690,28 @@ int jt_ptl_connect(int argc, char **argv) bind_irq = 1; break; - case 'x': - xchange_nids = 1; + case 'I': + if (type != SOCKNAL_CONN_ANY) { + fprintf(stderr, "Can't flag type twice\n"); + return -1; + } + type = SOCKNAL_CONN_BULK_IN; + break; + + case 'O': + if (type != SOCKNAL_CONN_ANY) { + fprintf(stderr, "Can't flag type twice\n"); + return -1; + } + type = SOCKNAL_CONN_BULK_OUT; + break; + + case 'C': + if (type != SOCKNAL_CONN_ANY) { + fprintf(stderr, "Can't flag type twice\n"); + return -1; + } + type = SOCKNAL_CONN_CONTROL; break; default: @@ -826,33 +772,19 @@ int jt_ptl_connect(int argc, char **argv) if (getsockopt (fd, IPPROTO_TCP, TCP_NODELAY, &nonagle, &olen) != 0) fprintf (stderr, "Can't get nagle: %s\n", strerror (errno)); - if (!xchange_nids) - peer_nid = ipaddr; - else { - PORTAL_IOC_INIT (data); - data.ioc_nal = g_nal; - rc = l_ioctl(PORTALS_DEV_ID, IOC_PORTAL_GET_NID, &data); - if (rc != 0) { - fprintf (stderr, "failed to get my nid: %s\n", - strerror (errno)); - close (fd); - return (-1); - } - - rc = exchange_nids (fd, data.ioc_nid, &peer_nid); - if (rc != 0) { - close (fd); - return (-1); - } - } - printf("Connected host: %s NID "LPX64" snd: %d rcv: %d nagle: %s\n", argv[1], - peer_nid, txmem, rxmem, nonagle ? "Disabled" : "Enabled"); + printf("Connected host: %s snd: %d rcv: %d nagle: %s type: %s\n", + argv[1], txmem, rxmem, nonagle ? "Disabled" : "Enabled", + (type == SOCKNAL_CONN_ANY) ? "A" : + (type == SOCKNAL_CONN_CONTROL) ? "C" : + (type == SOCKNAL_CONN_BULK_IN) ? "I" : + (type == SOCKNAL_CONN_BULK_OUT) ? "O" : "?"); PCFG_INIT(pcfg, NAL_CMD_REGISTER_PEER_FD); + pcfg.pcfg_nal = g_nal; pcfg.pcfg_fd = fd; - pcfg.pcfg_nid = peer_nid; pcfg.pcfg_flags = bind_irq; - + pcfg.pcfg_misc = type; + rc = pcfg_ioctl(&pcfg); if (rc) { fprintf(stderr, "failed to register fd with portals: %s\n", @@ -861,7 +793,7 @@ int jt_ptl_connect(int argc, char **argv) return -1; } - printf("Connection to "LPX64" registered with socknal\n", peer_nid); + printf("Connection to %s registered with socknal\n", argv[1]); rc = close(fd); if (rc) @@ -1219,7 +1151,7 @@ jt_ptl_nagle (int argc, char **argv) int jt_ptl_add_route (int argc, char **argv) { - struct portal_ioctl_data data; + struct portals_cfg pcfg; ptl_nid_t nid1; ptl_nid_t nid2; ptl_nid_t gateway_nid; @@ -1254,13 +1186,13 @@ jt_ptl_add_route (int argc, char **argv) return (-1); } - PORTAL_IOC_INIT(data); - data.ioc_nid = gateway_nid; - data.ioc_nal = g_nal; - data.ioc_nid2 = MIN (nid1, nid2); - data.ioc_nid3 = MAX (nid1, nid2); + PCFG_INIT(pcfg, IOC_PORTAL_ADD_ROUTE); + pcfg.pcfg_nid = gateway_nid; + pcfg.pcfg_nal = g_nal; + pcfg.pcfg_nid2 = MIN (nid1, nid2); + pcfg.pcfg_nid3 = MAX (nid1, nid2); - rc = l_ioctl(PORTALS_DEV_ID, IOC_PORTAL_ADD_ROUTE, &data); + rc = pcfg_ioctl(&pcfg); if (rc != 0) { fprintf (stderr, "IOC_PORTAL_ADD_ROUTE failed: %s\n", strerror (errno)); @@ -1273,7 +1205,7 @@ jt_ptl_add_route (int argc, char **argv) int jt_ptl_del_route (int argc, char **argv) { - struct portal_ioctl_data data; + struct portals_cfg pcfg; ptl_nid_t nid; ptl_nid_t nid1 = PTL_NID_ANY; ptl_nid_t nid2 = PTL_NID_ANY; @@ -1317,13 +1249,13 @@ jt_ptl_del_route (int argc, char **argv) } } - PORTAL_IOC_INIT(data); - data.ioc_nal = g_nal; - data.ioc_nid = nid; - data.ioc_nid2 = nid1; - data.ioc_nid3 = nid2; + PCFG_INIT(pcfg, IOC_PORTAL_DEL_ROUTE); + pcfg.pcfg_nal = g_nal; + pcfg.pcfg_nid = nid; + pcfg.pcfg_nid2 = nid1; + pcfg.pcfg_nid3 = nid2; - rc = l_ioctl(PORTALS_DEV_ID, IOC_PORTAL_DEL_ROUTE, &data); + rc = pcfg_ioctl(&pcfg); if (rc != 0) { fprintf (stderr, "IOC_PORTAL_DEL_ROUTE ("LPX64") failed: %s\n", nid, strerror (errno)); @@ -1336,7 +1268,7 @@ jt_ptl_del_route (int argc, char **argv) int jt_ptl_notify_router (int argc, char **argv) { - struct portal_ioctl_data data; + struct portals_cfg pcfg; int enable; ptl_nid_t nid; int rc; @@ -1376,14 +1308,14 @@ jt_ptl_notify_router (int argc, char **argv) return (-1); } - PORTAL_IOC_INIT(data); - data.ioc_nal = g_nal; - data.ioc_nid = nid; - data.ioc_flags = enable; + PCFG_INIT(pcfg, IOC_PORTAL_NOTIFY_ROUTER); + pcfg.pcfg_nal = g_nal; + pcfg.pcfg_nid = nid; + pcfg.pcfg_flags = enable; /* Yeuch; 'cept I need a __u64 on 64 bit machines... */ - data.ioc_nid3 = (__u64)when; + pcfg.pcfg_nid3 = (__u64)when; - rc = l_ioctl(PORTALS_DEV_ID, IOC_PORTAL_NOTIFY_ROUTER, &data); + rc = pcfg_ioctl(&pcfg); if (rc != 0) { fprintf (stderr, "IOC_PORTAL_NOTIFY_ROUTER ("LPX64") failed: %s\n", @@ -1398,7 +1330,7 @@ int jt_ptl_print_routes (int argc, char **argv) { char buffer[3][128]; - struct portal_ioctl_data data; + struct portals_cfg pcfg; int rc; int index; int gateway_nal; @@ -1409,18 +1341,18 @@ jt_ptl_print_routes (int argc, char **argv) for (index = 0;;index++) { - PORTAL_IOC_INIT(data); - data.ioc_count = index; + PCFG_INIT(pcfg, IOC_PORTAL_GET_ROUTE); + pcfg.pcfg_count = index; - rc = l_ioctl(PORTALS_DEV_ID, IOC_PORTAL_GET_ROUTE, &data); + rc = pcfg_ioctl(&pcfg); if (rc != 0) break; - gateway_nal = data.ioc_nal; - gateway_nid = data.ioc_nid; - nid1 = data.ioc_nid2; - nid2 = data.ioc_nid3; - alive = data.ioc_flags; + gateway_nal = pcfg.pcfg_nal; + gateway_nid = pcfg.pcfg_nid; + nid1 = pcfg.pcfg_nid2; + nid2 = pcfg.pcfg_nid3; + alive = pcfg.pcfg_flags; printf ("%8s %18s : %s - %s, %s\n", nal2name (gateway_nal), diff --git a/lustre/portals/utils/ptlctl.c b/lustre/portals/utils/ptlctl.c index 1a8e637..c65ecb2 100644 --- a/lustre/portals/utils/ptlctl.c +++ b/lustre/portals/utils/ptlctl.c @@ -31,10 +31,10 @@ command_t list[] = { {"network", jt_ptl_network, 0,"setup the NAL (args: nal name)"}, {"print_autoconns", jt_ptl_print_autoconnects, 0, "print autoconnect entries (no args)"}, - {"add_autoconn", jt_ptl_add_autoconnect, 0, "add autoconnect entry (args: nid host [ixse])"}, + {"add_autoconn", jt_ptl_add_autoconnect, 0, "add autoconnect entry (args: nid host [ise])"}, {"del_autoconn", jt_ptl_del_autoconnect, 0, "delete autoconnect entry (args: [nid] [host] [ks])"}, {"print_conns", jt_ptl_print_connections, 0, "print connections (no args)"}, - {"connect", jt_ptl_connect, 0, "connect to a remote nid (args: host port [xi])"}, + {"connect", jt_ptl_connect, 0, "connect to a remote nid (args: host port [iIOC])"}, {"disconnect", jt_ptl_disconnect, 0, "disconnect from a remote nid (args: [nid] [host]"}, {"push", jt_ptl_push_connection, 0, "flush connection to a remote nid (args: [nid]"}, {"active_tx", jt_ptl_print_active_txs, 0, "print active transmits (no args)"}, -- 1.8.3.1