X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Flnet%2Fapi-ni.c;h=d8922ccbde6dba11e9511ddb2482e16a79725b69;hb=3b760208109b249fd9051d97dbc98664ca4b5769;hp=fb2fbdf177709402a45923eced3770accc33abb5;hpb=6c5561a1e1eeab18e8226d410f4aa9922fbab0d8;p=fs%2Flustre-release.git diff --git a/lnet/lnet/api-ni.c b/lnet/lnet/api-ni.c index fb2fbdf..d8922cc 100644 --- a/lnet/lnet/api-ni.c +++ b/lnet/lnet/api-ni.c @@ -128,6 +128,27 @@ module_param(lnet_recovery_limit, uint, 0644); MODULE_PARM_DESC(lnet_recovery_limit, "How long to attempt recovery of unhealthy peer interfaces in seconds. Set to 0 to allow indefinite recovery"); +unsigned int lnet_max_recovery_ping_interval = 900; +unsigned int lnet_max_recovery_ping_count = 9; +static int max_recovery_ping_interval_set(const char *val, + cfs_kernel_param_arg_t *kp); + +#define param_check_max_recovery_ping_interval(name, p) \ + __param_check(name, p, int) + +#ifdef HAVE_KERNEL_PARAM_OPS +static struct kernel_param_ops param_ops_max_recovery_ping_interval = { + .set = max_recovery_ping_interval_set, + .get = param_get_int, +}; +module_param(lnet_max_recovery_ping_interval, max_recovery_ping_interval, 0644); +#else +module_param_call(lnet_max_recovery_ping_interval, max_recovery_ping_interval, + param_get_int, &lnet_max_recovery_ping_interval, 0644); +#endif +MODULE_PARM_DESC(lnet_max_recovery_ping_interval, + "The max interval between LNet recovery pings, in seconds"); + static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT; static int intf_max_set(const char *val, cfs_kernel_param_arg_t *kp); @@ -261,8 +282,9 @@ static void lnet_set_lnd_timeout(void) */ static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0); -static int lnet_ping(struct lnet_process_id id, signed long timeout, - struct lnet_process_id __user *ids, int n_ids); +static int lnet_ping(struct lnet_process_id id4, struct lnet_nid *src_nid, + signed long timeout, struct lnet_process_id __user *ids, + int n_ids); static int lnet_discover(struct lnet_process_id id, __u32 force, struct lnet_process_id __user *ids, int n_ids); @@ -314,6 +336,39 @@ recovery_interval_set(const char *val, cfs_kernel_param_arg_t *kp) } static int +max_recovery_ping_interval_set(const char *val, cfs_kernel_param_arg_t *kp) +{ + int rc; + unsigned long value; + + rc = kstrtoul(val, 0, &value); + if (rc) { + CERROR("Invalid module parameter value for 'lnet_max_recovery_ping_interval'\n"); + return rc; + } + + if (!value) { + CERROR("Invalid max ping timeout. Must be strictly positive\n"); + return -EINVAL; + } + + /* The purpose of locking the api_mutex here is to ensure that + * the correct value ends up stored properly. + */ + mutex_lock(&the_lnet.ln_api_mutex); + lnet_max_recovery_ping_interval = value; + lnet_max_recovery_ping_count = 0; + value >>= 1; + while (value) { + lnet_max_recovery_ping_count++; + value >>= 1; + } + mutex_unlock(&the_lnet.ln_api_mutex); + + return 0; +} + +static int discovery_set(const char *val, cfs_kernel_param_arg_t *kp) { int rc; @@ -775,64 +830,64 @@ static void lnet_assert_wire_constants(void) version_minor) != 6); BUILD_BUG_ON((int)sizeof(((struct lnet_magicversion *)0)->version_minor) != 2); - /* Checks for struct struct lnet_hdr */ - BUILD_BUG_ON((int)sizeof(struct lnet_hdr) != 72); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_nid) != 0); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_nid) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_nid) != 8); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_nid) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, dest_pid) != 16); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->dest_pid) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, src_pid) != 20); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->src_pid) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, type) != 24); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->type) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, payload_length) != 28); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->payload_length) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg) != 40); + /* Checks for struct _lnet_hdr_nid4 */ + BUILD_BUG_ON((int)sizeof(struct _lnet_hdr_nid4) != 72); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, dest_nid) != 0); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->dest_nid) != 8); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, src_nid) != 8); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->src_nid) != 8); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, dest_pid) != 16); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->dest_pid) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, src_pid) != 20); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->src_pid) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, type) != 24); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->type) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, payload_length) != 28); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->payload_length) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg) != 32); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg) != 40); /* Ack */ - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) != 16); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.match_bits) != 48); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.ack.mlength) != 56); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.dst_wmd) != 32); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.dst_wmd) != 16); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.match_bits) != 48); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.match_bits) != 8); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.ack.mlength) != 56); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.ack.mlength) != 4); /* Put */ - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) != 16); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.match_bits) != 48); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.hdr_data) != 56); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.ptl_index) != 64); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.put.offset) != 68); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.ack_wmd) != 32); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.ack_wmd) != 16); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.match_bits) != 48); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.match_bits) != 8); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.hdr_data) != 56); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.hdr_data) != 8); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.ptl_index) != 64); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.ptl_index) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.put.offset) != 68); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.put.offset) != 4); /* Get */ - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.return_wmd) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) != 16); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.match_bits) != 48); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.ptl_index) != 56); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.src_offset) != 60); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) != 4); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.get.sink_length) != 64); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.return_wmd) != 32); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.return_wmd) != 16); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.match_bits) != 48); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.match_bits) != 8); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.ptl_index) != 56); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.ptl_index) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.src_offset) != 60); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.src_offset) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.get.sink_length) != 64); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.get.sink_length) != 4); /* Reply */ - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) != 16); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.reply.dst_wmd) != 32); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.reply.dst_wmd) != 16); /* Hello */ - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.incarnation) != 32); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) != 8); - BUILD_BUG_ON((int)offsetof(struct lnet_hdr, msg.hello.type) != 40); - BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.hello.incarnation) != 32); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.hello.incarnation) != 8); + BUILD_BUG_ON((int)offsetof(struct _lnet_hdr_nid4, msg.hello.type) != 40); + BUILD_BUG_ON((int)sizeof(((struct _lnet_hdr_nid4 *)0)->msg.hello.type) != 4); /* Checks for struct lnet_ni_status and related constants */ BUILD_BUG_ON(LNET_NI_STATUS_INVALID != 0x00000000); @@ -883,6 +938,15 @@ static void lnet_assert_wire_constants(void) BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq, acr_nid) != 8); BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq *)0)->acr_nid) != 8); + /* Checks for struct lnet_acceptor_connreq_v2 */ + BUILD_BUG_ON((int)sizeof(struct lnet_acceptor_connreq_v2) != 28); + BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_magic) != 0); + BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_magic) != 4); + BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_version) != 4); + BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_version) != 4); + BUILD_BUG_ON((int)offsetof(struct lnet_acceptor_connreq_v2, acr_nid) != 8); + BUILD_BUG_ON((int)sizeof(((struct lnet_acceptor_connreq_v2 *)0)->acr_nid) != 20); + /* Checks for struct lnet_counters_common */ BUILD_BUG_ON((int)sizeof(struct lnet_counters_common) != 60); BUILD_BUG_ON((int)offsetof(struct lnet_counters_common, lcc_msgs_alloc) != 0); @@ -1340,7 +1404,7 @@ lnet_prepare(lnet_pid_t requested_pid) } static int -lnet_unprepare (void) +lnet_unprepare(void) { /* NB no LNET_LOCK since this is the last reference. All LND instances * have shut down already, so it is safe to unlink and free all @@ -1395,8 +1459,8 @@ lnet_net2ni_locked(__u32 net_id, int cpt) list_for_each_entry(net, &the_lnet.ln_nets, net_list) { if (net->net_id == net_id) { - ni = list_entry(net->net_ni_list.next, struct lnet_ni, - ni_netlist); + ni = list_first_entry(&net->net_ni_list, struct lnet_ni, + ni_netlist); return ni; } } @@ -1453,7 +1517,7 @@ lnet_net_clr_pref_rtrs(struct lnet_net *net) int lnet_net_add_pref_rtr(struct lnet_net *net, - lnet_nid_t gw_nid) + struct lnet_nid *gw_nid) __must_hold(&the_lnet.ln_api_mutex) { struct lnet_nid_list *ne; @@ -1464,7 +1528,7 @@ __must_hold(&the_lnet.ln_api_mutex) * lock. */ list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) { - if (ne->nl_nid == gw_nid) + if (nid_same(&ne->nl_nid, gw_nid)) return -EEXIST; } @@ -1472,7 +1536,7 @@ __must_hold(&the_lnet.ln_api_mutex) if (!ne) return -ENOMEM; - ne->nl_nid = gw_nid; + ne->nl_nid = *gw_nid; /* Lock the cpt to protect against addition and checks in the * selection algorithm @@ -1485,11 +1549,11 @@ __must_hold(&the_lnet.ln_api_mutex) } bool -lnet_net_is_pref_rtr_locked(struct lnet_net *net, lnet_nid_t rtr_nid) +lnet_net_is_pref_rtr_locked(struct lnet_net *net, struct lnet_nid *rtr_nid) { struct lnet_nid_list *ne; - CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n", + CDEBUG(D_NET, "%s: rtr pref empty: %d\n", libcfs_net2str(net->net_id), list_empty(&net->net_rtr_pref_nids)); @@ -1498,40 +1562,62 @@ lnet_net_is_pref_rtr_locked(struct lnet_net *net, lnet_nid_t rtr_nid) list_for_each_entry(ne, &net->net_rtr_pref_nids, nl_list) { CDEBUG(D_NET, "Comparing pref %s with gw %s\n", - libcfs_nid2str(ne->nl_nid), - libcfs_nid2str(rtr_nid)); - if (rtr_nid == ne->nl_nid) + libcfs_nidstr(&ne->nl_nid), + libcfs_nidstr(rtr_nid)); + if (nid_same(rtr_nid, &ne->nl_nid)) return true; } return false; } +static unsigned int +lnet_nid4_cpt_hash(lnet_nid_t nid, unsigned int number) +{ + __u64 key = nid; + __u64 pair_bits = 0x0001000100010001LLU; + __u64 mask = pair_bits * 0xFF; + __u64 pair_sum; + + /* Use (sum-by-multiplication of nid bytes) mod (number of CPTs) + * to match nid to a CPT. + */ + pair_sum = (key & mask) + ((key >> 8) & mask); + pair_sum = (pair_sum * pair_bits) >> 48; + + CDEBUG(D_NET, "Match nid %s to cpt %u\n", + libcfs_nid2str(nid), (unsigned int)(pair_sum) % number); + + return (unsigned int)(pair_sum) % number; +} + unsigned int -lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number) +lnet_nid_cpt_hash(struct lnet_nid *nid, unsigned int number) { - __u64 key = nid; - unsigned int val; + unsigned int val; + u32 h = 0; + int i; LASSERT(number >= 1 && number <= LNET_CPT_NUMBER); if (number == 1) return 0; - val = hash_long(key, LNET_CPT_BITS); - /* NB: LNET_CP_NUMBER doesn't have to be PO2 */ + if (nid_is_nid4(nid)) + return lnet_nid4_cpt_hash(lnet_nid_to_nid4(nid), number); + + for (i = 0; i < 4; i++) + h = hash_32(nid->nid_addr[i]^h, 32); + val = hash_32(LNET_NID_NET(nid) ^ h, LNET_CPT_BITS); if (val < number) return val; - - return (unsigned int)(key + val + (val >> 1)) % number; + return (unsigned int)(h + val + (val >> 1)) % number; } int lnet_cpt_of_nid_locked(struct lnet_nid *nid, struct lnet_ni *ni) { struct lnet_net *net; - /* FIXME handle long-addr nid */ - lnet_nid_t nid4 = lnet_nid_to_nid4(nid); /* must called with hold of lnet_net_lock */ if (LNET_CPT_NUMBER == 1) @@ -1546,41 +1632,52 @@ lnet_cpt_of_nid_locked(struct lnet_nid *nid, struct lnet_ni *ni) */ if (ni != NULL) { if (ni->ni_cpts != NULL) - return ni->ni_cpts[lnet_nid_cpt_hash(nid4, + return ni->ni_cpts[lnet_nid_cpt_hash(nid, ni->ni_ncpts)]; else - return lnet_nid_cpt_hash(nid4, LNET_CPT_NUMBER); + return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER); } /* no NI provided so look at the net */ net = lnet_get_net_locked(LNET_NID_NET(nid)); if (net != NULL && net->net_cpts != NULL) { - return net->net_cpts[lnet_nid_cpt_hash(nid4, net->net_ncpts)]; + return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)]; } - return lnet_nid_cpt_hash(nid4, LNET_CPT_NUMBER); + return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER); } int -lnet_cpt_of_nid(lnet_nid_t nid4, struct lnet_ni *ni) +lnet_nid2cpt(struct lnet_nid *nid, struct lnet_ni *ni) { int cpt; int cpt2; - struct lnet_nid nid; if (LNET_CPT_NUMBER == 1) return 0; /* the only one */ - lnet_nid4_to_nid(nid4, &nid); cpt = lnet_net_lock_current(); - cpt2 = lnet_cpt_of_nid_locked(&nid, ni); + cpt2 = lnet_cpt_of_nid_locked(nid, ni); lnet_net_unlock(cpt); return cpt2; } +EXPORT_SYMBOL(lnet_nid2cpt); + +int +lnet_cpt_of_nid(lnet_nid_t nid4, struct lnet_ni *ni) +{ + struct lnet_nid nid; + + if (LNET_CPT_NUMBER == 1) + return 0; /* the only one */ + + lnet_nid4_to_nid(nid4, &nid); + return lnet_nid2cpt(&nid, ni); +} EXPORT_SYMBOL(lnet_cpt_of_nid); int @@ -1672,13 +1769,13 @@ lnet_nid_to_ni_addref(struct lnet_nid *nid) EXPORT_SYMBOL(lnet_nid_to_ni_addref); int -lnet_islocalnid(lnet_nid_t nid) +lnet_islocalnid(struct lnet_nid *nid) { struct lnet_ni *ni; int cpt; cpt = lnet_net_lock_current(); - ni = lnet_nid2ni_locked(nid, cpt); + ni = lnet_nid_to_ni_locked(nid, cpt); lnet_net_unlock(cpt); return ni != NULL; @@ -1864,8 +1961,8 @@ lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf, struct lnet_handle_md *ping_mdh, int ni_count, bool set_eq) { - struct lnet_process_id id = { - .nid = LNET_NID_ANY, + struct lnet_processid id = { + .nid = LNET_ANY_NID, .pid = LNET_PID_ANY }; struct lnet_me *me; @@ -1883,7 +1980,7 @@ lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf, } /* Ping target ME/MD */ - me = LNetMEAttach(LNET_RESERVED_PORTAL, id, + me = LNetMEAttach(LNET_RESERVED_PORTAL, &id, LNET_PROTO_PING_MATCHBITS, 0, LNET_UNLINK, LNET_INS_AFTER); if (IS_ERR(me)) { @@ -2081,12 +2178,12 @@ again: int lnet_push_target_post(struct lnet_ping_buffer *pbuf, struct lnet_handle_md *mdhp) { - struct lnet_process_id id = { LNET_NID_ANY, LNET_PID_ANY }; + struct lnet_processid id = { LNET_ANY_NID, LNET_PID_ANY }; struct lnet_md md = { NULL }; struct lnet_me *me; int rc; - me = LNetMEAttach(LNET_RESERVED_PORTAL, id, + me = LNetMEAttach(LNET_RESERVED_PORTAL, &id, LNET_PROTO_PING_MATCHBITS, 0, LNET_UNLINK, LNET_INS_AFTER); if (IS_ERR(me)) { @@ -2235,12 +2332,12 @@ lnet_clear_zombies_nis_locked(struct lnet_net *net) * list and shut them down in guaranteed thread context */ i = 2; - while (!list_empty(zombie_list)) { - int *ref; - int j; + while ((ni = list_first_entry_or_null(zombie_list, + struct lnet_ni, + ni_netlist)) != NULL) { + int *ref; + int j; - ni = list_entry(zombie_list->next, - struct lnet_ni, ni_netlist); list_del_init(&ni->ni_netlist); /* the ni should be in deleting state. If it's not it's * a bug */ @@ -2279,14 +2376,16 @@ lnet_clear_zombies_nis_locked(struct lnet_net *net) islo = ni->ni_net->net_lnd->lnd_type == LOLND; LASSERT(!in_interrupt()); - /* Holding the mutex makes it safe for lnd_shutdown + /* Holding the LND mutex makes it safe for lnd_shutdown * to call module_put(). Module unload cannot finish * until lnet_unregister_lnd() completes, and that - * requires the mutex. + * requires the LND mutex. */ + mutex_unlock(&the_lnet.ln_api_mutex); mutex_lock(&the_lnet.ln_lnd_mutex); (net->net_lnd->lnd_shutdown)(ni); mutex_unlock(&the_lnet.ln_lnd_mutex); + mutex_lock(&the_lnet.ln_api_mutex); if (!islo) CDEBUG(D_LNI, "Removed LNI %s\n", @@ -2331,9 +2430,9 @@ lnet_shutdown_lndnet(struct lnet_net *net) list_del_init(&net->net_list); - while (!list_empty(&net->net_ni_list)) { - ni = list_entry(net->net_ni_list.next, - struct lnet_ni, ni_netlist); + while ((ni = list_first_entry_or_null(&net->net_ni_list, + struct lnet_ni, + ni_netlist)) != NULL) { lnet_net_unlock(LNET_LOCK_EX); lnet_shutdown_lndni(ni); lnet_net_lock(LNET_LOCK_EX); @@ -2357,7 +2456,8 @@ lnet_shutdown_lndnets(void) /* NB called holding the global mutex */ /* All quiet on the API front */ - LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING); + LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING || + the_lnet.ln_state == LNET_STATE_STOPPING); LASSERT(the_lnet.ln_refcount == 0); lnet_net_lock(LNET_LOCK_EX); @@ -2378,11 +2478,10 @@ lnet_shutdown_lndnets(void) lnet_net_unlock(LNET_LOCK_EX); /* iterate through the net zombie list and delete each net */ - while (!list_empty(&the_lnet.ln_net_zombie)) { - net = list_entry(the_lnet.ln_net_zombie.next, - struct lnet_net, net_list); + while ((net = list_first_entry_or_null(&the_lnet.ln_net_zombie, + struct lnet_net, + net_list)) != NULL) lnet_shutdown_lndnet(net); - } spin_lock(&the_lnet.ln_msg_resend_lock); list_splice(&the_lnet.ln_msg_resend, &resend); @@ -2463,6 +2562,11 @@ lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun) lnet_ni_tq_credits(ni) * ni->ni_ncpts); atomic_set(&ni->ni_healthv, LNET_MAX_HEALTH_VALUE); + /* Nodes with small feet have little entropy. The NID for this + * node gives the most entropy in the low bits. + */ + add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid)); + CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n", libcfs_nidstr(&ni->ni_nid), ni->ni_net->net_tunables.lct_peer_tx_credits, @@ -2555,9 +2659,9 @@ lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun) * After than we want to delete the network being added, * to avoid a memory leak. */ - while (!list_empty(&net->net_ni_added)) { - ni = list_entry(net->net_ni_added.next, struct lnet_ni, - ni_netlist); + while ((ni = list_first_entry_or_null(&net->net_ni_added, + struct lnet_ni, + ni_netlist)) != NULL) { list_del_init(&ni->ni_netlist); /* make sure that the the NI we're about to start @@ -2621,12 +2725,10 @@ failed1: * shutdown the new NIs that are being started up * free the NET being started */ - while (!list_empty(&local_ni_list)) { - ni = list_entry(local_ni_list.next, struct lnet_ni, - ni_netlist); - + while ((ni = list_first_entry_or_null(&local_ni_list, + struct lnet_ni, + ni_netlist)) != NULL) lnet_shutdown_lndni(ni); - } failed0: lnet_net_free(net); @@ -2650,8 +2752,9 @@ lnet_startup_lndnets(struct list_head *netlist) the_lnet.ln_state = LNET_STATE_RUNNING; lnet_net_unlock(LNET_LOCK_EX); - while (!list_empty(netlist)) { - net = list_entry(netlist->next, struct lnet_net, net_list); + while ((net = list_first_entry_or_null(netlist, + struct lnet_net, + net_list)) != NULL) { list_del_init(&net->net_list); rc = lnet_startup_lndnet(net, NULL); @@ -2699,9 +2802,9 @@ static int lnet_genl_parse_list(struct sk_buff *msg, list->lkl_maxattr); nla_put_u16(msg, LN_SCALAR_ATTR_INDEX, count); - if (props[count].lkp_values) + if (props[count].lkp_value) nla_put_string(msg, LN_SCALAR_ATTR_VALUE, - props[count].lkp_values); + props[count].lkp_value); if (props[count].lkp_key_format) nla_put_u16(msg, LN_SCALAR_ATTR_KEY_FORMAT, props[count].lkp_key_format); @@ -2713,13 +2816,14 @@ static int lnet_genl_parse_list(struct sk_buff *msg, rc = lnet_genl_parse_list(msg, data, ++idx); if (rc < 0) return rc; + idx = rc; } nla_nest_end(msg, key); } nla_nest_end(msg, node); - return 0; + return idx; } int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq, @@ -2744,7 +2848,7 @@ int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq, canceled: if (rc < 0) genlmsg_cancel(msg, hdr); - return rc; + return rc > 0 ? 0 : rc; } EXPORT_SYMBOL(lnet_genl_send_scalar_list); @@ -2857,6 +2961,11 @@ LNetNIInit(lnet_pid_t requested_pid) CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount); + if (the_lnet.ln_state == LNET_STATE_STOPPING) { + mutex_unlock(&the_lnet.ln_api_mutex); + return -ESHUTDOWN; + } + if (the_lnet.ln_refcount > 0) { rc = the_lnet.ln_refcount++; mutex_unlock(&the_lnet.ln_api_mutex); @@ -2970,10 +3079,9 @@ err_empty_list: lnet_unprepare(); LASSERT(rc < 0); mutex_unlock(&the_lnet.ln_api_mutex); - while (!list_empty(&net_head)) { - struct lnet_net *net; - - net = list_entry(net_head.next, struct lnet_net, net_list); + while ((net = list_first_entry_or_null(&net_head, + struct lnet_net, + net_list)) != NULL) { list_del_init(&net->net_list); lnet_net_free(net); } @@ -3002,6 +3110,10 @@ LNetNIFini(void) } else { LASSERT(!the_lnet.ln_niinit_self); + lnet_net_lock(LNET_LOCK_EX); + the_lnet.ln_state = LNET_STATE_STOPPING; + lnet_net_unlock(LNET_LOCK_EX); + lnet_fault_fini(); lnet_router_debugfs_fini(); @@ -3213,14 +3325,15 @@ lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev) * a message being sent. This function accessed the net without * checking if the list is empty */ - if (prev == NULL) { - if (net == NULL) - net = list_entry(the_lnet.ln_nets.next, struct lnet_net, - net_list); + if (!prev) { + if (!net) + net = list_first_entry(&the_lnet.ln_nets, + struct lnet_net, + net_list); if (list_empty(&net->net_ni_list)) return NULL; - ni = list_entry(net->net_ni_list.next, struct lnet_ni, - ni_netlist); + ni = list_first_entry(&net->net_ni_list, struct lnet_ni, + ni_netlist); return ni; } @@ -3238,13 +3351,13 @@ lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev) return NULL; /* get the next net */ - net = list_entry(prev->ni_net->net_list.next, struct lnet_net, - net_list); + net = list_first_entry(&prev->ni_net->net_list, struct lnet_net, + net_list); if (list_empty(&net->net_ni_list)) return NULL; /* get the ni on it */ - ni = list_entry(net->net_ni_list.next, struct lnet_ni, - ni_netlist); + ni = list_first_entry(&net->net_ni_list, struct lnet_ni, + ni_netlist); return ni; } @@ -3253,7 +3366,7 @@ lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev) return NULL; /* there are more nis left */ - ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist); + ni = list_first_entry(&prev->ni_netlist, struct lnet_ni, ni_netlist); return ni; } @@ -3470,8 +3583,14 @@ static int lnet_handle_legacy_ip2nets(char *ip2nets, lnet_set_tune_defaults(tun); mutex_lock(&the_lnet.ln_api_mutex); - while (!list_empty(&net_head)) { - net = list_entry(net_head.next, struct lnet_net, net_list); + if (the_lnet.ln_state != LNET_STATE_RUNNING) { + rc = -ESHUTDOWN; + goto out; + } + + while ((net = list_first_entry_or_null(&net_head, + struct lnet_net, + net_list)) != NULL) { list_del_init(&net->net_list); rc = lnet_add_net_common(net, tun); if (rc < 0) @@ -3481,8 +3600,9 @@ static int lnet_handle_legacy_ip2nets(char *ip2nets, out: mutex_unlock(&the_lnet.ln_api_mutex); - while (!list_empty(&net_head)) { - net = list_entry(net_head.next, struct lnet_net, net_list); + while ((net = list_first_entry_or_null(&net_head, + struct lnet_net, + net_list)) != NULL) { list_del_init(&net->net_list); lnet_net_free(net); } @@ -3533,8 +3653,10 @@ int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf) lnet_set_tune_defaults(tun); mutex_lock(&the_lnet.ln_api_mutex); - - rc = lnet_add_net_common(net, tun); + if (the_lnet.ln_state != LNET_STATE_RUNNING) + rc = -ESHUTDOWN; + else + rc = lnet_add_net_common(net, tun); mutex_unlock(&the_lnet.ln_api_mutex); @@ -3557,6 +3679,10 @@ int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf) return -EINVAL; mutex_lock(&the_lnet.ln_api_mutex); + if (the_lnet.ln_state != LNET_STATE_RUNNING) { + rc = -ESHUTDOWN; + goto unlock_api_mutex; + } lnet_net_lock(0); @@ -3650,13 +3776,17 @@ lnet_dyn_add_net(struct lnet_ioctl_config_data *conf) return rc == 0 ? -EINVAL : rc; mutex_lock(&the_lnet.ln_api_mutex); + if (the_lnet.ln_state != LNET_STATE_RUNNING) { + rc = -ESHUTDOWN; + goto out_unlock_clean; + } if (rc > 1) { rc = -EINVAL; /* only add one network per call */ goto out_unlock_clean; } - net = list_entry(net_head.next, struct lnet_net, net_list); + net = list_first_entry(&net_head, struct lnet_net, net_list); list_del_init(&net->net_list); LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL)); @@ -3679,9 +3809,10 @@ lnet_dyn_add_net(struct lnet_ioctl_config_data *conf) out_unlock_clean: mutex_unlock(&the_lnet.ln_api_mutex); - while (!list_empty(&net_head)) { - /* net_head list is empty in success case */ - net = list_entry(net_head.next, struct lnet_net, net_list); + /* net_head list is empty in success case */ + while ((net = list_first_entry_or_null(&net_head, + struct lnet_net, + net_list)) != NULL) { list_del_init(&net->net_list); lnet_net_free(net); } @@ -3702,6 +3833,10 @@ lnet_dyn_del_net(__u32 net_id) return -EINVAL; mutex_lock(&the_lnet.ln_api_mutex); + if (the_lnet.ln_state != LNET_STATE_RUNNING) { + rc = -ESHUTDOWN; + goto out; + } lnet_net_lock(0); @@ -3879,8 +4014,10 @@ LNetCtl(unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; struct lnet_ioctl_config_data *config; - struct lnet_process_id id = {0}; + struct lnet_process_id id4 = {}; + struct lnet_processid id = {}; struct lnet_ni *ni; + struct lnet_nid nid; int rc; BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) + @@ -3889,7 +4026,7 @@ LNetCtl(unsigned int cmd, void *arg) switch (cmd) { case IOC_LIBCFS_GET_NI: rc = LNetGetId(data->ioc_count, &id); - data->ioc_nid = id.nid; + data->ioc_nid = lnet_nid_to_nid4(&id.nid); return rc; case IOC_LIBCFS_FAIL_NID: @@ -3908,10 +4045,11 @@ LNetCtl(unsigned int cmd, void *arg) config->cfg_config_u.cfg_route.rtr_sensitivity; } + lnet_nid4_to_nid(config->cfg_nid, &nid); mutex_lock(&the_lnet.ln_api_mutex); rc = lnet_add_route(config->cfg_net, config->cfg_config_u.cfg_route.rtr_hop, - config->cfg_nid, + &nid, config->cfg_config_u.cfg_route. rtr_priority, sensitivity); mutex_unlock(&the_lnet.ln_api_mutex); @@ -3924,8 +4062,9 @@ LNetCtl(unsigned int cmd, void *arg) if (config->cfg_hdr.ioc_len < sizeof(*config)) return -EINVAL; + lnet_nid4_to_nid(config->cfg_nid, &nid); mutex_lock(&the_lnet.ln_api_mutex); - rc = lnet_del_route(config->cfg_net, config->cfg_nid); + rc = lnet_del_route(config->cfg_net, &nid); mutex_unlock(&the_lnet.ln_api_mutex); return rc; @@ -4121,27 +4260,31 @@ LNetCtl(unsigned int cmd, void *arg) case IOC_LIBCFS_ADD_PEER_NI: { struct lnet_ioctl_peer_cfg *cfg = arg; + struct lnet_nid prim_nid; if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg)) return -EINVAL; mutex_lock(&the_lnet.ln_api_mutex); - rc = lnet_add_peer_ni(cfg->prcfg_prim_nid, - cfg->prcfg_cfg_nid, - cfg->prcfg_mr, false); + lnet_nid4_to_nid(cfg->prcfg_prim_nid, &prim_nid); + lnet_nid4_to_nid(cfg->prcfg_cfg_nid, &nid); + rc = lnet_add_peer_ni(&prim_nid, &nid, cfg->prcfg_mr, false); mutex_unlock(&the_lnet.ln_api_mutex); return rc; } case IOC_LIBCFS_DEL_PEER_NI: { struct lnet_ioctl_peer_cfg *cfg = arg; + struct lnet_nid prim_nid; if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg)) return -EINVAL; mutex_lock(&the_lnet.ln_api_mutex); - rc = lnet_del_peer_ni(cfg->prcfg_prim_nid, - cfg->prcfg_cfg_nid); + lnet_nid4_to_nid(cfg->prcfg_prim_nid, &prim_nid); + lnet_nid4_to_nid(cfg->prcfg_cfg_nid, &nid); + rc = lnet_del_peer_ni(&prim_nid, + &nid); mutex_unlock(&the_lnet.ln_api_mutex); return rc; } @@ -4250,10 +4393,12 @@ LNetCtl(unsigned int cmd, void *arg) } case IOC_LIBCFS_LNET_DIST: - rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]); + lnet_nid4_to_nid(data->ioc_nid, &nid); + rc = LNetDist(&nid, &nid, &data->ioc_u32[1]); if (rc < 0 && rc != -EHOSTUNREACH) return rc; + data->ioc_nid = lnet_nid_to_nid4(&nid); data->ioc_u32[0] = rc; return 0; @@ -4267,8 +4412,8 @@ LNetCtl(unsigned int cmd, void *arg) case IOC_LIBCFS_PING: { signed long timeout; - id.nid = data->ioc_nid; - id.pid = data->ioc_u32[0]; + id4.nid = data->ioc_nid; + id4.pid = data->ioc_u32[0]; /* If timeout is negative then set default of 3 minutes */ if (((s32)data->ioc_u32[1] <= 0) || @@ -4277,7 +4422,7 @@ LNetCtl(unsigned int cmd, void *arg) else timeout = nsecs_to_jiffies(data->ioc_u32[1] * NSEC_PER_MSEC); - rc = lnet_ping(id, timeout, data->ioc_pbuf1, + rc = lnet_ping(id4, &LNET_ANY_NID, timeout, data->ioc_pbuf1, data->ioc_plen1 / sizeof(struct lnet_process_id)); if (rc < 0) @@ -4289,9 +4434,21 @@ LNetCtl(unsigned int cmd, void *arg) case IOC_LIBCFS_PING_PEER: { struct lnet_ioctl_ping_data *ping = arg; + struct lnet_nid src_nid = LNET_ANY_NID; struct lnet_peer *lp; signed long timeout; + /* Check if the supplied ping data supports source nid + * NB: This check is sufficient if lnet_ioctl_ping_data has + * additional fields added, but if they are re-ordered or + * fields removed then this will break. It is expected that + * these ioctls will be replaced with netlink implementation, so + * it is probably not worth coming up with a more robust version + * compatibility scheme. + */ + if (ping->ping_hdr.ioc_len >= sizeof(struct lnet_ioctl_ping_data)) + lnet_nid4_to_nid(ping->ping_src, &src_nid); + /* If timeout is negative then set default of 3 minutes */ if (((s32)ping->op_param) <= 0 || ping->op_param > (DEFAULT_PEER_TIMEOUT * MSEC_PER_SEC)) @@ -4299,16 +4456,18 @@ LNetCtl(unsigned int cmd, void *arg) else timeout = nsecs_to_jiffies(ping->op_param * NSEC_PER_MSEC); - rc = lnet_ping(ping->ping_id, timeout, + rc = lnet_ping(ping->ping_id, &src_nid, timeout, ping->ping_buf, ping->ping_count); if (rc < 0) return rc; mutex_lock(&the_lnet.ln_api_mutex); - lp = lnet_find_peer(ping->ping_id.nid); + lnet_nid4_to_nid(ping->ping_id.nid, &nid); + lp = lnet_find_peer(&nid); if (lp) { - ping->ping_id.nid = lp->lp_primary_nid; + ping->ping_id.nid = + lnet_nid_to_nid4(&lp->lp_primary_nid); ping->mr_info = lnet_peer_is_multi_rail(lp); lnet_peer_decref_locked(lp); } @@ -4329,9 +4488,11 @@ LNetCtl(unsigned int cmd, void *arg) return rc; mutex_lock(&the_lnet.ln_api_mutex); - lp = lnet_find_peer(discover->ping_id.nid); + lnet_nid4_to_nid(discover->ping_id.nid, &nid); + lp = lnet_find_peer(&nid); if (lp) { - discover->ping_id.nid = lp->lp_primary_nid; + discover->ping_id.nid = + lnet_nid_to_nid4(&lp->lp_primary_nid); discover->mr_info = lnet_peer_is_multi_rail(lp); lnet_peer_decref_locked(lp); } @@ -4458,9 +4619,9 @@ LNetCtl(unsigned int cmd, void *arg) } EXPORT_SYMBOL(LNetCtl); -void LNetDebugPeer(struct lnet_process_id id) +void LNetDebugPeer(struct lnet_processid *id) { - lnet_debug_peer(id.nid); + lnet_debug_peer(lnet_nid_to_nid4(&id->nid)); } EXPORT_SYMBOL(LNetDebugPeer); @@ -4472,7 +4633,7 @@ EXPORT_SYMBOL(LNetDebugPeer); * \retval true If peer NID is on the local node. * \retval false If peer NID is not on the local node. */ -bool LNetIsPeerLocal(lnet_nid_t nid) +bool LNetIsPeerLocal(struct lnet_nid *nid) { struct lnet_net *net; struct lnet_ni *ni; @@ -4481,7 +4642,7 @@ bool LNetIsPeerLocal(lnet_nid_t nid) cpt = lnet_net_lock_current(); list_for_each_entry(net, &the_lnet.ln_nets, net_list) { list_for_each_entry(ni, &net->net_ni_list, ni_netlist) { - if (lnet_nid_to_nid4(&ni->ni_nid) == nid) { + if (nid_same(&ni->ni_nid, nid)) { lnet_net_unlock(cpt); return true; } @@ -4505,7 +4666,7 @@ EXPORT_SYMBOL(LNetIsPeerLocal); * \retval -ENOENT If no interface has been found. */ int -LNetGetId(unsigned int index, struct lnet_process_id *id) +LNetGetId(unsigned int index, struct lnet_processid *id) { struct lnet_ni *ni; struct lnet_net *net; @@ -4524,7 +4685,7 @@ LNetGetId(unsigned int index, struct lnet_process_id *id) if (index-- != 0) continue; - id->nid = lnet_nid_to_nid4(&ni->ni_nid); + id->nid = ni->ni_nid; id->pid = the_lnet.ln_pid; rc = 0; break; @@ -4563,20 +4724,22 @@ lnet_ping_event_handler(struct lnet_event *event) complete(&pd->completion); } -static int lnet_ping(struct lnet_process_id id, signed long timeout, - struct lnet_process_id __user *ids, int n_ids) +static int lnet_ping(struct lnet_process_id id4, struct lnet_nid *src_nid, + signed long timeout, struct lnet_process_id __user *ids, + int n_ids) { struct lnet_md md = { NULL }; struct ping_data pd = { 0 }; struct lnet_ping_buffer *pbuf; struct lnet_process_id tmpid; + struct lnet_processid id; int i; int nob; int rc; int rc2; /* n_ids limit is arbitrary */ - if (n_ids <= 0 || id.nid == LNET_NID_ANY) + if (n_ids <= 0 || id4.nid == LNET_NID_ANY) return -EINVAL; /* @@ -4586,8 +4749,8 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout, if (n_ids > lnet_interfaces_max) n_ids = lnet_interfaces_max; - if (id.pid == LNET_PID_ANY) - id.pid = LNET_PID_LUSTRE; + if (id4.pid == LNET_PID_ANY) + id4.pid = LNET_PID_LUSTRE; pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS); if (!pbuf) @@ -4610,8 +4773,8 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout, goto fail_ping_buffer_decref; } - rc = LNetGet(LNET_NID_ANY, pd.mdh, id, - LNET_RESERVED_PORTAL, + lnet_pid4_to_pid(id4, &id); + rc = LNetGet(src_nid, pd.mdh, &id, LNET_RESERVED_PORTAL, LNET_PROTO_PING_MATCHBITS, 0, false); if (rc != 0) { @@ -4639,7 +4802,7 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout, if (nob < 8) { CERROR("%s: ping info too short %d\n", - libcfs_id2str(id), nob); + libcfs_idstr(&id), nob); goto fail_ping_buffer_decref; } @@ -4647,19 +4810,19 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout, lnet_swap_pinginfo(pbuf); } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) { CERROR("%s: Unexpected magic %08x\n", - libcfs_id2str(id), pbuf->pb_info.pi_magic); + libcfs_idstr(&id), pbuf->pb_info.pi_magic); goto fail_ping_buffer_decref; } if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) { CERROR("%s: ping w/o NI status: 0x%x\n", - libcfs_id2str(id), pbuf->pb_info.pi_features); + libcfs_idstr(&id), pbuf->pb_info.pi_features); goto fail_ping_buffer_decref; } if (nob < LNET_PING_INFO_SIZE(0)) { CERROR("%s: Short reply %d(%d min)\n", - libcfs_id2str(id), + libcfs_idstr(&id), nob, (int)LNET_PING_INFO_SIZE(0)); goto fail_ping_buffer_decref; } @@ -4669,7 +4832,7 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout, if (nob < LNET_PING_INFO_SIZE(n_ids)) { CERROR("%s: Short reply %d(%d expected)\n", - libcfs_id2str(id), + libcfs_idstr(&id), nob, (int)LNET_PING_INFO_SIZE(n_ids)); goto fail_ping_buffer_decref; } @@ -4691,21 +4854,23 @@ static int lnet_ping(struct lnet_process_id id, signed long timeout, } static int -lnet_discover(struct lnet_process_id id, __u32 force, +lnet_discover(struct lnet_process_id id4, __u32 force, struct lnet_process_id __user *ids, int n_ids) { struct lnet_peer_ni *lpni; struct lnet_peer_ni *p; struct lnet_peer *lp; struct lnet_process_id *buf; + struct lnet_processid id; int cpt; int i; int rc; if (n_ids <= 0 || - id.nid == LNET_NID_ANY) + id4.nid == LNET_NID_ANY) return -EINVAL; + lnet_pid4_to_pid(id4, &id); if (id.pid == LNET_PID_ANY) id.pid = LNET_PID_LUSTRE; @@ -4721,7 +4886,7 @@ lnet_discover(struct lnet_process_id id, __u32 force, return -ENOMEM; cpt = lnet_net_lock_current(); - lpni = lnet_nid2peerni_locked(id.nid, LNET_NID_ANY, cpt); + lpni = lnet_peerni_by_nid_locked(&id.nid, NULL, cpt); if (IS_ERR(lpni)) { rc = PTR_ERR(lpni); goto out; @@ -4747,7 +4912,7 @@ lnet_discover(struct lnet_process_id id, __u32 force, * and lookup the lpni again */ lnet_peer_ni_decref_locked(lpni); - lpni = lnet_find_peer_ni_locked(id.nid); + lpni = lnet_peer_ni_find_locked(&id.nid); if (!lpni) { rc = -ENOENT; goto out;