- replace #if with #ifdef when checking config macros.
- srpc_lnet_ev_handler expects LNET_LOCK to be held.
int *gm_nrx_small;
int *gm_nrx_large;
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
cfs_sysctl_table_header_t *gm_sysctl; /* sysctl interface */
#endif
} gmnal_tunables_t;
.gm_nrx_large = &nrx_large,
};
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
static cfs_sysctl_table_t gmnal_ctl_table[] = {
{
.ctl_name = 1,
int status;
CDEBUG(D_TRACE, "This is the gmnal module initialisation routine\n");
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
gmnal_tunables.gm_sysctl =
cfs_register_sysctl_table(gmnal_top_ctl_table, 0);
gmnal_unload(void)
{
gmnal_fini();
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
if (gmnal_tunables.gm_sysctl != NULL)
cfs_unregister_sysctl_table(gmnal_tunables.gm_sysctl);
#endif
int *kib_peercredits; /* # concurrent sends to 1 peer */
int *kib_sd_retries; /* # concurrent sends to 1 peer */
int *kib_concurrent_sends; /* send work queue sizing */
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
cfs_sysctl_table_header_t *kib_sysctl; /* sysctl interface */
#endif
} kib_tunables_t;
LASSERT (page != NULL);
return page;
}
-#if CONFIG_HIGHMEM
+#ifdef CONFIG_HIGHMEM
if (vaddr >= PKMAP_BASE &&
vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
/* No highmem pages only used for bulk (kiov) I/O */
.kib_concurrent_sends = &concurrent_sends,
};
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
/* NB max_size specified for proc_dostring entries only needs to be big enough
* not to truncate the printout; it only needs to be the actual size of the
int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
int *kib_fmr_cache; /* enable FMR pool cache? */
#endif
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
cfs_sysctl_table_header_t *kib_sysctl; /* sysctl interface */
#endif
} kib_tunables_t;
#endif
};
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
static char ipif_basename_space[32];
if (vaddr >= VMALLOC_START &&
vaddr < VMALLOC_END)
page = vmalloc_to_page ((void *)vaddr);
-#if CONFIG_HIGHMEM
+#ifdef CONFIG_HIGHMEM
else if (vaddr >= PKMAP_BASE &&
vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE))
page = vmalloc_to_page ((void *)vaddr);
.kib_keepalive = &keepalive,
};
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
static cfs_sysctl_table_t kibnal_ctl_table[] = {
{
int *kptl_simulation_bitmap;/* simulation bitmap */
#endif
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
cfs_sysctl_table_header_t *kptl_sysctl; /* sysctl interface */
#endif
} kptl_tunables_t;
};
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
#ifdef CRAY_XT3
static char ptltrace_basename_space[1024];
int *kqn_inject_csum_error; /* # csum errors to inject */
#endif
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
cfs_sysctl_table_header_t *kqn_sysctl; /* sysctl interface */
#endif
} kqswnal_tunables_t;
#endif
};
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
static cfs_sysctl_table_t kqswnal_ctl_table[] = {
{
.ctl_name = 1,
#include "socklnd.h"
-# if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+# if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
static cfs_sysctl_table_t ksocknal_ctl_table[21];
cfs_sysctl_table_t ksocknal_top_ctl_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec
};
-#if CPU_AFFINITY
+#ifdef CPU_AFFINITY
ksocknal_ctl_table[i++] = (cfs_sysctl_table_t) {
.ctl_name = j++,
.procname = "irq_affinity",
void
ksocknal_lib_bind_irq (unsigned int irq)
{
-#if (defined(CONFIG_SMP) && CPU_AFFINITY)
+#if (defined(CONFIG_SMP) && defined(CPU_AFFINITY))
int bind;
int cpu;
char cmdline[64];
ksocknal_lib_sock_irq (struct socket *sock)
{
int irq = 0;
-#if CPU_AFFINITY
+#ifdef CPU_AFFINITY
struct dst_entry *dst;
if (!*ksocknal_tunables.ksnd_irq_affinity)
ksocknal_ctl_table[i++] = (ctl_table)
{j++, "nagle", ksocknal_tunables.ksnd_nagle,
sizeof(int), 0644, NULL, &proc_dointvec};
-#if CPU_AFFINITY
+#ifdef CPU_AFFINITY
ksocknal_ctl_table[i++] = (ctl_table)
{j++, "irq_affinity", ksocknal_tunables.ksnd_irq_affinity,
sizeof(int), 0644, NULL, &proc_dointvec};
if (vaddr >= VMALLOC_START &&
vaddr < VMALLOC_END)
page = vmalloc_to_page ((void *)vaddr);
-#if CONFIG_HIGHMEM
+#ifdef CONFIG_HIGHMEM
else if (vaddr >= PKMAP_BASE &&
vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE))
page = vmalloc_to_page ((void *)vaddr);
#if IBNAL_USE_FMR
int *kib_fmr_remaps; /* # FMR maps before unmap required */
#endif
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
cfs_sysctl_table_header_t *kib_sysctl; /* sysctl interface */
#endif
} kib_tunables_t;
#endif
#if IBNAL_VOIDSTAR_SGADDR
-# if CONFIG_HIGHMEM
-# if CONFIG_X86 && CONFIG_HIGHMEM4G
+# if defined(CONFIG_HIGHMEM)
+# if defined(CONFIG_X86) && defined(CONFIG_HIGHMEM4G)
/* truncation to void* doesn't matter if 0 <= physmem < 4G
* so allow x86 with 32 bit phys addrs */
-# elif CONFIG_IA64
+# elif defined(CONFIG_IA64)
/* OK anyway on 64-bit arch */
# else
# error "Can't support HIGHMEM when vv_scatgat_t::v_address is void *"
LASSERT (page != NULL);
return page;
}
-#if CONFIG_HIGHMEM
+#ifdef CONFIG_HIGHMEM
if (vaddr >= PKMAP_BASE &&
vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
/* No highmem pages only used for bulk (kiov) I/O */
#endif
};
-#if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
static char hca_basename_space[32];
static char ipif_basename_space[32];
void
srpc_service_recycle_buffer (srpc_service_t *sv, srpc_buffer_t *buf)
{
- if (sv->sv_shuttingdown)
- goto free;
+ if (sv->sv_shuttingdown) goto free;
if (sv->sv_nprune == 0) {
if (srpc_service_post_buffer(sv, buf) != 0)
rc = LNetEQPoll(&srpc_data.rpc_lnet_eq, 1,
timeout * 1000, &ev, &i);
- if (rc == 0)
- return 0;
+ if (rc == 0) return 0;
LASSERT (rc == -EOVERFLOW || rc == 1);
abort();
}
+ LNET_LOCK();
srpc_lnet_ev_handler(&ev);
+ LNET_UNLOCK();
return 1;
}