gnilnd updates for SUSE 15 SP2
Use -Werror when checking for -Wno-stringop-truncation to
ensure the compile test is valid
Adjust for kernel dropping time_t, timeval, timespec
Adjust for kernel switching to timer_setup
Adjust for kernel dropping global_page_state(), use
nr_free_pages instead
Cleanup string format for stricter checking
Test-Parameters: trivial
HPE-bug-id: LUS-9453
Signed-off-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Change-Id: I5dcda1497fa5b1f2cf4a215517700f07374fcf7f
Reviewed-on: https://review.whamcloud.com/40426
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Petros Koutoupis <petros.koutoupis@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
AC_MSG_CHECKING([for -Wno-stringop-truncation support])
saved_flags="$CFLAGS"
AC_MSG_CHECKING([for -Wno-stringop-truncation support])
saved_flags="$CFLAGS"
- CFLAGS="$CFLAGS -Wno-stringop-truncation"
+ CFLAGS="$CFLAGS -Werror -Wno-stringop-truncation"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
EXTRA_KCFLAGS="$EXTRA_KCFLAGS -Wno-stringop-truncation"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
EXTRA_KCFLAGS="$EXTRA_KCFLAGS -Wno-stringop-truncation"
LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
atomic_read(&dev->gnd_n_mdd_held) == 0 &&
atomic64_read(&dev->gnd_nbytes_map) == 0,
LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 &&
atomic_read(&dev->gnd_n_mdd_held) == 0 &&
atomic64_read(&dev->gnd_nbytes_map) == 0,
- "%d SMSG mappings of %ld bytes still mapped or held %d\n",
+ "%d SMSG mappings of %lld bytes still mapped or held %d\n",
atomic_read(&dev->gnd_n_mdd),
atomic_read(&dev->gnd_n_mdd),
- atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_n_mdd_held));
+ (u64)atomic64_read(&dev->gnd_nbytes_map),
+ atomic_read(&dev->gnd_n_mdd_held));
LASSERT(list_empty(&dev->gnd_map_list));
LASSERT(list_empty(&dev->gnd_map_list));
int kgnilnd_base_startup(void)
{
int kgnilnd_base_startup(void)
{
long long pkmem = libcfs_kmem_read();
int rc;
int i;
long long pkmem = libcfs_kmem_read();
int rc;
int i;
* initialised with seconds + microseconds at startup time. So we
* rely on NOT creating connections more frequently on average than
* 1MHz to ensure we don't use old connstamps when we reboot. */
* initialised with seconds + microseconds at startup time. So we
* rely on NOT creating connections more frequently on average than
* 1MHz to ensure we don't use old connstamps when we reboot. */
kgnilnd_data.kgn_connstamp =
kgnilnd_data.kgn_peerstamp =
kgnilnd_data.kgn_connstamp =
kgnilnd_data.kgn_peerstamp =
- (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ (ts.tv_sec * 1000000) + (ts.tv_nsec / 100);
init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
init_rwsem(&kgnilnd_data.kgn_net_rw_sem);
atomic_set(&dev->gnd_ndgrams, 0);
atomic_set(&dev->gnd_nwcdgrams, 0);
/* setup timer for RDMAQ processing */
atomic_set(&dev->gnd_ndgrams, 0);
atomic_set(&dev->gnd_nwcdgrams, 0);
/* setup timer for RDMAQ processing */
- setup_timer(&dev->gnd_rdmaq_timer, kgnilnd_schedule_device_timer,
- (unsigned long)dev);
+ cfs_timer_setup(&dev->gnd_rdmaq_timer,
+ kgnilnd_schedule_device_timer,
+ (unsigned long)dev, 0);
/* setup timer for mapping processing */
/* setup timer for mapping processing */
- setup_timer(&dev->gnd_map_timer, kgnilnd_schedule_device_timer,
- (unsigned long)dev);
+ cfs_timer_setup(&dev->gnd_map_timer,
+ kgnilnd_schedule_device_timer,
+ (unsigned long)dev, 0);
#ifndef _GNILND_GNILND_H_
#define _GNILND_GNILND_H_
#ifndef _GNILND_GNILND_H_
#define _GNILND_GNILND_H_
+#define DEBUG_SUBSYSTEM S_LND
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/in.h>
#include <linux/nmi.h>
#include <linux/in.h>
#include <linux/nmi.h>
-#define DEBUG_SUBSYSTEM S_LND
-
#include <lnet/lib-lnet.h>
#include <gni_pub.h>
#include <lnet/lib-lnet.h>
#include <gni_pub.h>
kgn_msg_t *grx_msg; /* message */
struct lnet_msg *grx_lntmsg; /* lnet msg for this rx (eager only) */
int grx_eager; /* if eager, we copied msg to somewhere */
kgn_msg_t *grx_msg; /* message */
struct lnet_msg *grx_lntmsg; /* lnet msg for this rx (eager only) */
int grx_eager; /* if eager, we copied msg to somewhere */
- struct timespec grx_received; /* time this msg received */
+ struct timespec64 grx_received; /* time this msg received */
} kgn_rx_t;
typedef struct kgn_data {
} kgn_rx_t;
typedef struct kgn_data {
void kgnilnd_schedule_device(kgn_device_t *dev);
void kgnilnd_device_callback(__u32 devid, __u64 arg);
void kgnilnd_schedule_device(kgn_device_t *dev);
void kgnilnd_device_callback(__u32 devid, __u64 arg);
-void kgnilnd_schedule_device_timer(unsigned long arg);
+void kgnilnd_schedule_device_timer(cfs_timer_cb_arg_t data);
+void kgnilnd_schedule_device_timer_rd(cfs_timer_cb_arg_t data);
int kgnilnd_reaper(void *arg);
int kgnilnd_scheduler(void *arg);
int kgnilnd_reaper(void *arg);
int kgnilnd_scheduler(void *arg);
-void kgnilnd_schedule_device_timer(unsigned long arg)
+void kgnilnd_schedule_device_timer(cfs_timer_cb_arg_t data)
- kgn_device_t *dev = (kgn_device_t *) arg;
+ kgn_device_t *dev = cfs_from_timer(dev, data, gnd_map_timer);
+
+ kgnilnd_schedule_device(dev);
+}
+
+void kgnilnd_schedule_device_timer_rd(cfs_timer_cb_arg_t data)
+{
+ kgn_device_t *dev = cfs_from_timer(dev, data, gnd_rdmaq_timer);
kgnilnd_schedule_device(dev);
}
kgnilnd_schedule_device(dev);
}
kgnilnd_dump_blob(D_BUFFS, "RDMA payload",
tx->tx_buffer, nob);
}
kgnilnd_dump_blob(D_BUFFS, "RDMA payload",
tx->tx_buffer, nob);
}
- /* fall through to dump log */
case 1:
libcfs_debug_dumplog();
break;
case 1:
libcfs_debug_dumplog();
break;
tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) {
atomic64_sub(bytes, &dev->gnd_rdmaq_bytes_out);
LASSERTF(atomic64_read(&dev->gnd_rdmaq_bytes_out) >= 0,
- "bytes_out negative! %ld\n", atomic64_read(&dev->gnd_rdmaq_bytes_out));
+ "bytes_out negative! %lld\n",
+ (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out));
GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to %lld",
GNIDBG_TX(D_NETTRACE, tx, "rdma -- %d to %lld",
- bytes, atomic64_read(&dev->gnd_rdmaq_bytes_out));
+ bytes, (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out));
}
atomic_dec(&dev->gnd_n_mdd);
}
atomic_dec(&dev->gnd_n_mdd);
/* it was sent, break out of switch to avoid default case of queueing */
break;
}
/* it was sent, break out of switch to avoid default case of queueing */
break;
}
- /* needs to queue to try again, so fall through to default case */
+ /* needs to queue to try again, so... */
+ /* fall through... */
case GNILND_MSG_NOOP:
/* Just make sure this goes out first for this conn */
add_tail = 0;
case GNILND_MSG_NOOP:
/* Just make sure this goes out first for this conn */
add_tail = 0;
case 2:
kgnilnd_dump_blob(D_BUFFS, "bad payload checksum",
&rxmsg[1], rxmsg->gnm_payload_len);
case 2:
kgnilnd_dump_blob(D_BUFFS, "bad payload checksum",
&rxmsg[1], rxmsg->gnm_payload_len);
- /* fall through to dump */
case 1:
libcfs_debug_dumplog();
break;
case 1:
libcfs_debug_dumplog();
break;
-kgnilnd_reaper_poke_with_stick(unsigned long arg)
+kgnilnd_reaper_poke_with_stick(cfs_timer_cb_arg_t arg)
{
wake_up(&kgnilnd_data.kgn_reaper_waitq);
}
{
wake_up(&kgnilnd_data.kgn_reaper_waitq);
}
prepare_to_wait(&kgnilnd_data.kgn_reaper_waitq, &wait,
TASK_INTERRUPTIBLE);
spin_unlock(&kgnilnd_data.kgn_reaper_lock);
prepare_to_wait(&kgnilnd_data.kgn_reaper_waitq, &wait,
TASK_INTERRUPTIBLE);
spin_unlock(&kgnilnd_data.kgn_reaper_lock);
- setup_timer(&timer, kgnilnd_reaper_poke_with_stick,
- next_check_time);
+ cfs_timer_setup(&timer, kgnilnd_reaper_poke_with_stick,
+ next_check_time, 0);
mod_timer(&timer, (long) jiffies + timeout);
/* check flag variables before committing */
mod_timer(&timer, (long) jiffies + timeout);
/* check flag variables before committing */
case GNILND_MSG_PUT_REQ:
case GNILND_MSG_GET_REQ_REV:
tx->tx_msg.gnm_u.putreq.gnprm_cookie = tx->tx_id.txe_cookie;
case GNILND_MSG_PUT_REQ:
case GNILND_MSG_GET_REQ_REV:
tx->tx_msg.gnm_u.putreq.gnprm_cookie = tx->tx_id.txe_cookie;
case GNILND_MSG_PUT_ACK:
case GNILND_MSG_PUT_REQ_REV:
case GNILND_MSG_GET_ACK_REV:
case GNILND_MSG_PUT_ACK:
case GNILND_MSG_PUT_REQ_REV:
case GNILND_MSG_GET_ACK_REV:
new_ok -= atomic64_read(&dev->gnd_rdmaq_bytes_out);
atomic64_set(&dev->gnd_rdmaq_bytes_ok, new_ok);
new_ok -= atomic64_read(&dev->gnd_rdmaq_bytes_out);
atomic64_set(&dev->gnd_rdmaq_bytes_ok, new_ok);
- CDEBUG(D_NET, "resetting rdmaq bytes to %ld, deadline +%lu -> %lu, "
- "current out %ld\n",
- atomic64_read(&dev->gnd_rdmaq_bytes_ok), dead_bump, dev->gnd_rdmaq_deadline,
- atomic64_read(&dev->gnd_rdmaq_bytes_out));
+ CDEBUG(D_NET, "resetting rdmaq bytes to %lld, deadline +%lu -> %lu, current out %lld\n",
+ (s64)atomic64_read(&dev->gnd_rdmaq_bytes_ok), dead_bump, dev->gnd_rdmaq_deadline,
+ (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out));
}
spin_unlock(&dev->gnd_rdmaq_lock);
}
}
spin_unlock(&dev->gnd_rdmaq_lock);
}
rx->grx_msg = msg;
rx->grx_conn = conn;
rx->grx_eager = 0;
rx->grx_msg = msg;
rx->grx_conn = conn;
rx->grx_eager = 0;
- rx->grx_received = current_kernel_time();
+ ktime_get_ts64(&rx->grx_received);
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NET_LOOKUP)) {
rc = -ENONET;
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_NET_LOOKUP)) {
rc = -ENONET;
void
kgnilnd_setup_smsg_attr(gni_smsg_attr_t *smsg_attr)
void
kgnilnd_setup_smsg_attr(gni_smsg_attr_t *smsg_attr)
* to memory exhaustion during massive reconnects during a network
* outage. Limit the amount of fma blocks to use by always keeping
* a percent of pages free initially set to 25% of total memory. */
* to memory exhaustion during massive reconnects during a network
* outage. Limit the amount of fma blocks to use by always keeping
* a percent of pages free initially set to 25% of total memory. */
- if (global_page_state(NR_FREE_PAGES) < kgnilnd_data.free_pages_limit) {
+ if (nr_free_pages() < kgnilnd_data.free_pages_limit) {
LCONSOLE_INFO("Exceeding free page limit of %ld. "
"Free pages available %ld\n",
kgnilnd_data.free_pages_limit,
LCONSOLE_INFO("Exceeding free page limit of %ld. "
"Free pages available %ld\n",
kgnilnd_data.free_pages_limit,
- global_page_state(NR_FREE_PAGES));
+struct kgnilnd_dgram_timer {
+ struct timer_list timer;
+ kgn_device_t *dev;
+};
+
-kgnilnd_dgram_poke_with_stick(unsigned long arg)
+kgnilnd_dgram_poke_with_stick(cfs_timer_cb_arg_t arg)
- int dev_id = arg;
- kgn_device_t *dev = &kgnilnd_data.kgn_devices[dev_id];
+ struct kgnilnd_dgram_timer *t = cfs_from_timer(t, arg, timer);
- wake_up(&dev->gnd_dgram_waitq);
+ wake_up(&t->dev->gnd_dgram_waitq);
}
/* use single thread for dgrams - should be sufficient for performance */
}
/* use single thread for dgrams - should be sufficient for performance */
int rc, did_something;
unsigned long next_purge_check = jiffies - 1;
unsigned long timeout;
int rc, did_something;
unsigned long next_purge_check = jiffies - 1;
unsigned long timeout;
- struct timer_list timer;
- unsigned long deadline = 0;
+ struct kgnilnd_dgram_timer timer;
+ unsigned long deadline = 0;
DEFINE_WAIT(wait);
snprintf(name, sizeof(name), "kgnilnd_dg_%02d", dev->gnd_id);
DEFINE_WAIT(wait);
snprintf(name, sizeof(name), "kgnilnd_dg_%02d", dev->gnd_id);
prepare_to_wait(&dev->gnd_dgram_waitq, &wait, TASK_INTERRUPTIBLE);
prepare_to_wait(&dev->gnd_dgram_waitq, &wait, TASK_INTERRUPTIBLE);
- setup_timer(&timer, kgnilnd_dgram_poke_with_stick, dev->gnd_id);
- mod_timer(&timer, (long) jiffies + timeout);
+ cfs_timer_setup(&timer.timer,
+ kgnilnd_dgram_poke_with_stick,
+ dev, 0);
+ timer.dev = dev;
+ mod_timer(&timer.timer, (long) jiffies + timeout);
/* last second chance for others to poke us */
did_something += xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_IDLE);
/* last second chance for others to poke us */
did_something += xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_IDLE);
deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout);
}
deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout);
}
- del_singleshot_timer_sync(&timer);
+ del_singleshot_timer_sync(&timer.timer);
finish_wait(&dev->gnd_dgram_waitq, &wait);
}
finish_wait(&dev->gnd_dgram_waitq, &wait);
}
kgnilnd_stats_seq_show(struct seq_file *sf, void *v)
{
kgn_device_t *dev;
kgnilnd_stats_seq_show(struct seq_file *sf, void *v)
{
kgn_device_t *dev;
if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
seq_printf(sf, "kgnilnd is not initialized yet\n");
if (kgnilnd_data.kgn_init < GNILND_INIT_ALL) {
seq_printf(sf, "kgnilnd is not initialized yet\n");
/* sampling is racy, but so is reading this file! */
smp_rmb();
/* sampling is racy, but so is reading this file! */
smp_rmb();
- seq_printf(sf, "time: %lu.%lu\n"
+ seq_printf(sf, "time: %llu.%lu\n"
"ntx: %d\n"
"npeers: %d\n"
"nconns: %d\n"
"ntx: %d\n"
"npeers: %d\n"
"nconns: %d\n"
"n_mdd: %d\n"
"n_mdd_held: %d\n"
"n_eager_allocs: %d\n"
"n_mdd: %d\n"
"n_mdd_held: %d\n"
"n_eager_allocs: %d\n"
- "GART map bytes: %ld\n"
+ "GART map bytes: %lld\n"
"TX queued maps: %d\n"
"TX phys nmaps: %d\n"
"TX phys bytes: %lu\n"
"TX queued maps: %d\n"
"TX phys nmaps: %d\n"
"TX phys bytes: %lu\n"
- "RDMAQ bytes_auth: %ld\n"
- "RDMAQ bytes_left: %ld\n"
+ "RDMAQ bytes_auth: %lld\n"
+ "RDMAQ bytes_left: %lld\n"
"RDMAQ nstalls: %d\n"
"dev mutex delay: %ld\n"
"dev n_yield: %d\n"
"RDMAQ nstalls: %d\n"
"dev mutex delay: %ld\n"
"dev n_yield: %d\n"
"SMSG fast_ok: %d\n"
"SMSG fast_block: %d\n"
"SMSG ntx: %u\n"
"SMSG fast_ok: %d\n"
"SMSG fast_block: %d\n"
"SMSG ntx: %u\n"
+ "SMSG tx_bytes: %llu\n"
+ "SMSG rx_bytes: %llu\n"
+ "RDMA tx_bytes: %llu\n"
+ "RDMA rx_bytes: %llu\n"
"VMAP short: %d\n"
"VMAP cksum: %d\n"
"KMAP short: %d\n"
"RDMA REV length: %d\n"
"RDMA REV offset: %d\n"
"RDMA REV copy: %d\n",
"VMAP short: %d\n"
"VMAP cksum: %d\n"
"KMAP short: %d\n"
"RDMA REV length: %d\n"
"RDMA REV offset: %d\n"
"RDMA REV copy: %d\n",
- now.tv_sec, now.tv_usec,
+ (s64)now.tv_sec, now.tv_nsec,
atomic_read(&kgnilnd_data.kgn_ntx),
atomic_read(&kgnilnd_data.kgn_npeers),
atomic_read(&kgnilnd_data.kgn_nconns),
atomic_read(&kgnilnd_data.kgn_ntx),
atomic_read(&kgnilnd_data.kgn_npeers),
atomic_read(&kgnilnd_data.kgn_nconns),
atomic_read(&dev->gnd_nfmablk),
atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
atomic_read(&kgnilnd_data.kgn_neager_allocs),
atomic_read(&dev->gnd_nfmablk),
atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held),
atomic_read(&kgnilnd_data.kgn_neager_allocs),
- atomic64_read(&dev->gnd_nbytes_map),
+ (s64)atomic64_read(&dev->gnd_nbytes_map),
atomic_read(&dev->gnd_nq_map),
dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
atomic_read(&dev->gnd_nq_map),
dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE,
- atomic64_read(&dev->gnd_rdmaq_bytes_out),
- atomic64_read(&dev->gnd_rdmaq_bytes_ok),
+ (s64)atomic64_read(&dev->gnd_rdmaq_bytes_out),
+ (s64)atomic64_read(&dev->gnd_rdmaq_bytes_ok),
atomic_read(&dev->gnd_rdmaq_nstalls),
dev->gnd_mutex_delay,
atomic_read(&dev->gnd_n_yield),
atomic_read(&dev->gnd_rdmaq_nstalls),
dev->gnd_mutex_delay,
atomic_read(&dev->gnd_n_yield),
atomic_read(&dev->gnd_fast_ok),
atomic_read(&dev->gnd_fast_block),
atomic_read(&dev->gnd_short_ntx),
atomic_read(&dev->gnd_fast_ok),
atomic_read(&dev->gnd_fast_block),
atomic_read(&dev->gnd_short_ntx),
- atomic64_read(&dev->gnd_short_txbytes),
+ (s64)atomic64_read(&dev->gnd_short_txbytes),
atomic_read(&dev->gnd_short_nrx),
atomic_read(&dev->gnd_short_nrx),
- atomic64_read(&dev->gnd_short_rxbytes),
+ (s64)atomic64_read(&dev->gnd_short_rxbytes),
atomic_read(&dev->gnd_rdma_ntx),
atomic_read(&dev->gnd_rdma_ntx),
- atomic64_read(&dev->gnd_rdma_txbytes),
+ (s64)atomic64_read(&dev->gnd_rdma_txbytes),
atomic_read(&dev->gnd_rdma_nrx),
atomic_read(&dev->gnd_rdma_nrx),
- atomic64_read(&dev->gnd_rdma_rxbytes),
+ (s64)atomic64_read(&dev->gnd_rdma_rxbytes),
atomic_read(&kgnilnd_data.kgn_nvmap_short),
atomic_read(&kgnilnd_data.kgn_nvmap_cksum),
atomic_read(&kgnilnd_data.kgn_nkmap_short),
atomic_read(&kgnilnd_data.kgn_nvmap_short),
atomic_read(&kgnilnd_data.kgn_nvmap_cksum),
atomic_read(&kgnilnd_data.kgn_nkmap_short),
* 2012-12-11T16:06:16.966751 123@gni ...
*/
getnstimeofday(&now);
* 2012-12-11T16:06:16.966751 123@gni ...
*/
getnstimeofday(&now);
- time_to_tm(now.tv_sec, 0, &ctm);
+ time64_to_tm(now.tv_sec, 0, &ctm);
jifs = jiffies;
write_lock(&kgnilnd_data.kgn_peer_conn_lock);
jifs = jiffies;
write_lock(&kgnilnd_data.kgn_peer_conn_lock);
.mode = 0644,
.proc_handler = &proc_peer_state,
},
.mode = 0644,
.proc_handler = &proc_peer_state,
},
};
static struct ctl_table kgnilnd_top_table[2] = {
};
static struct ctl_table kgnilnd_top_table[2] = {
.mode = 0555,
.child = kgnilnd_table
},
.mode = 0555,
.child = kgnilnd_table
},
};
void kgnilnd_insert_sysctl(void)
};
void kgnilnd_insert_sysctl(void)