static char debug_file_name[1024];
unsigned int libcfs_subsystem_debug = ~0;
-CFS_MODULE_PARM(libcfs_subsystem_debug, "i", int, 0644,
- "Lustre kernel debug subsystem mask");
+module_param(libcfs_subsystem_debug, int, 0644);
+MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask");
EXPORT_SYMBOL(libcfs_subsystem_debug);
unsigned int libcfs_debug = (D_CANTMASK |
D_NETERROR | D_HA | D_CONFIG | D_IOCTL | D_LFSCK);
-CFS_MODULE_PARM(libcfs_debug, "i", int, 0644,
- "Lustre kernel debug mask");
+module_param(libcfs_debug, int, 0644);
+MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask");
EXPORT_SYMBOL(libcfs_debug);
-unsigned int libcfs_debug_mb;
-CFS_MODULE_PARM(libcfs_debug_mb, "i", uint, 0644,
- "Total debug buffer size.");
+static unsigned int libcfs_debug_mb;
+module_param(libcfs_debug_mb, uint, 0644);
+MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size.");
unsigned int libcfs_printk = D_CANTMASK;
-CFS_MODULE_PARM(libcfs_printk, "i", uint, 0644,
- "Lustre kernel debug console mask");
+module_param(libcfs_printk, uint, 0644);
+MODULE_PARM_DESC(libcfs_printk, "Lustre kernel debug console mask");
unsigned int libcfs_console_ratelimit = 1;
-CFS_MODULE_PARM(libcfs_console_ratelimit, "i", uint, 0644,
- "Lustre kernel debug console ratelimit (0 to disable)");
+module_param(libcfs_console_ratelimit, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)");
unsigned int libcfs_console_max_delay;
-CFS_MODULE_PARM(libcfs_console_max_delay, "l", uint, 0644,
- "Lustre kernel debug console max delay (jiffies)");
+module_param(libcfs_console_max_delay, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)");
unsigned int libcfs_console_min_delay;
-CFS_MODULE_PARM(libcfs_console_min_delay, "l", uint, 0644,
- "Lustre kernel debug console min delay (jiffies)");
+module_param(libcfs_console_min_delay, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)");
unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF;
-CFS_MODULE_PARM(libcfs_console_backoff, "i", uint, 0644,
- "Lustre kernel debug console backoff factor");
+module_param(libcfs_console_backoff, uint, 0644);
+MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor");
unsigned int libcfs_debug_binary = 1;
unsigned int libcfs_watchdog_ratelimit = 300;
unsigned int libcfs_panic_on_lbug = 1;
-CFS_MODULE_PARM(libcfs_panic_on_lbug, "i", uint, 0644,
- "Lustre kernel panic on LBUG");
+module_param(libcfs_panic_on_lbug, uint, 0644);
+MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG");
atomic_t libcfs_kmemory = ATOMIC_INIT(0);
EXPORT_SYMBOL(libcfs_kmemory);
/* We need to pass a pointer here, but elsewhere this must be a const */
static char *libcfs_debug_file_path;
-CFS_MODULE_PARM(libcfs_debug_file_path, "s", charp, 0644,
- "Path for dumping debug logs, "
- "set 'NONE' to prevent log dumping");
+module_param(libcfs_debug_file_path, charp, 0644);
+MODULE_PARM_DESC(libcfs_debug_file_path,
+ "Path for dumping debug logs, set 'NONE' to prevent log dumping");
int libcfs_panic_in_progress;
* >1 : specify number of partitions
*/
static int cpu_npartitions;
-CFS_MODULE_PARM(cpu_npartitions, "i", int, 0444, "# of CPU partitions");
+module_param(cpu_npartitions, int, 0444);
+MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
/**
* modparam for setting CPU partitions patterns:
* NB: If user specified cpu_pattern, cpu_npartitions will be ignored
*/
static char *cpu_pattern = "";
-CFS_MODULE_PARM(cpu_pattern, "s", charp, 0444, "CPU partitions pattern");
+module_param(cpu_pattern, charp, 0444);
+MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
struct cfs_cpt_data {
/* serialize hotplug etc */
#include "gnilnd.h"
static int credits = 256;
-CFS_MODULE_PARM(credits, "i", int, 0444,
- "# concurrent sends");
+module_param(credits, int, 0444);
+MODULE_PARM_DESC(credits, "# concurrent sends");
static int eager_credits = 256 * 1024;
-CFS_MODULE_PARM(eager_credits, "i", int, 0644,
- "# eager buffers");
+module_param(eager_credits, int, 0644);
+MODULE_PARM_DESC(eager_credits, "# eager buffers");
static int peer_credits = 16;
-CFS_MODULE_PARM(peer_credits, "i", int, 0444,
- "# LNet peer credits");
+module_param(peer_credits, int, 0444);
+MODULE_PARM_DESC(peer_credits, "# LNet peer credits");
/* NB - we'll not actually limit sends to this, we just size the mailbox buffer
* such that at most we'll have concurrent_sends * max_immediate messages
* in the mailbox */
static int concurrent_sends = 0;
-CFS_MODULE_PARM(concurrent_sends, "i", int, 0444,
- "# concurrent HW sends to 1 peer");
+module_param(concurrent_sends, int, 0444);
+MODULE_PARM_DESC(concurrent_sends, "# concurrent HW sends to 1 peer");
/* default for 2k nodes @ 16 peer credits */
static int fma_cq_size = 32768;
-CFS_MODULE_PARM(fma_cq_size, "i", int, 0444,
- "size of the completion queue");
+module_param(fma_cq_size, int, 0444);
+MODULE_PARM_DESC(fma_cq_size, "size of the completion queue");
static int timeout = GNILND_BASE_TIMEOUT;
/* can't change @ runtime because LNet gets NI data at startup from
* this value */
-CFS_MODULE_PARM(timeout, "i", int, 0444,
- "communications timeout (seconds)");
+module_param(timeout, int, 0444);
+MODULE_PARM_DESC(timeout, "communications timeout (seconds)");
/* time to wait between datagram timeout and sending of next dgram */
static int min_reconnect_interval = GNILND_MIN_RECONNECT_TO;
-CFS_MODULE_PARM(min_reconnect_interval, "i", int, 0644,
- "minimum connection retry interval (seconds)");
+module_param(min_reconnect_interval, int, 0644);
+MODULE_PARM_DESC(min_reconnect_interval, "minimum connection retry interval (seconds)");
/* if this goes longer than timeout, we'll timeout the TX before
* the dgram */
static int max_reconnect_interval = GNILND_MAX_RECONNECT_TO;
-CFS_MODULE_PARM(max_reconnect_interval, "i", int, 0644,
- "maximum connection retry interval (seconds)");
+module_param(max_reconnect_interval, int, 0644);
+MODULE_PARM_DESC(max_reconnect_interval, "maximum connection retry interval (seconds)");
static int max_immediate = 2048;
-CFS_MODULE_PARM(max_immediate, "i", int, 0444,
- "immediate/RDMA breakpoint");
+module_param(max_immediate, int, 0444);
+MODULE_PARM_DESC(max_immediate, "immediate/RDMA breakpoint");
static int checksum = GNILND_CHECKSUM_DEFAULT;
-CFS_MODULE_PARM(checksum, "i", int, 0644,
- "0: None, 1: headers, 2: short msg, 3: all traffic");
+module_param(checksum, int, 0644);
+MODULE_PARM_DESC(checksum, "0: None, 1: headers, 2: short msg, 3: all traffic");
static int checksum_dump = 0;
-CFS_MODULE_PARM(checksum_dump, "i", int, 0644,
- "0: None, 1: dump log on failure, 2: payload data to D_INFO log");
+module_param(checksum_dump, int, 0644);
+MODULE_PARM_DESC(checksum_dump, "0: None, 1: dump log on failure, 2: payload data to D_INFO log");
static int bte_dlvr_mode = GNILND_RDMA_DLVR_OPTION;
-CFS_MODULE_PARM(bte_dlvr_mode, "i", int, 0644,
- "enable hashing for BTE (RDMA) transfers");
+module_param(bte_dlvr_mode, int, 0644);
+MODULE_PARM_DESC(bte_dlvr_mode, "enable hashing for BTE (RDMA) transfers");
static int bte_relaxed_ordering = 1;
-CFS_MODULE_PARM(bte_relaxed_ordering, "i", int, 0644,
- "enable relaxed ordering (PASSPW) for BTE (RDMA) transfers");
+module_param(bte_relaxed_ordering, int, 0644);
+MODULE_PARM_DESC(bte_relaxed_ordering, "enable relaxed ordering (PASSPW) for BTE (RDMA) transfers");
#ifdef CONFIG_MK1OM
static int ptag = GNI_PTAG_LND_KNC;
#else
static int ptag = GNI_PTAG_LND;
#endif
-CFS_MODULE_PARM(ptag, "i", int, 0444,
- "ptag for Gemini CDM");
+module_param(ptag, int, 0444);
+MODULE_PARM_DESC(ptag, "ptag for Gemini CDM");
static int pkey = GNI_JOB_CREATE_COOKIE(GNI_PKEY_LND, 0);
-CFS_MODULE_PARM(pkey, "i", int, 0444, "pkey for CDM");
+module_param(pkey, int, 0444);
+MODULE_PARM_DESC(pkey, "pkey for CDM");
static int max_retransmits = 1024;
-CFS_MODULE_PARM(max_retransmits, "i", int, 0444,
- "max retransmits for FMA");
+module_param(max_retransmits, int, 0444);
+MODULE_PARM_DESC(max_retransmits, "max retransmits for FMA");
static int nwildcard = 4;
-CFS_MODULE_PARM(nwildcard, "i", int, 0444,
- "# wildcard datagrams to post per net (interface)");
+module_param(nwildcard, int, 0444);
+MODULE_PARM_DESC(nwildcard, "# wildcard datagrams to post per net (interface)");
static int nice = -20;
-CFS_MODULE_PARM(nice, "i", int, 0444,
- "nice value for kgnilnd threads, default -20");
+module_param(nice, int, 0444);
+MODULE_PARM_DESC(nice, "nice value for kgnilnd threads, default -20");
static int rdmaq_intervals = 4;
-CFS_MODULE_PARM(rdmaq_intervals, "i", int, 0644,
- "# intervals per second for rdmaq throttling, default 4, 0 to disable");
+module_param(rdmaq_intervals, int, 0644);
+MODULE_PARM_DESC(rdmaq_intervals, "# intervals per second for rdmaq throttling, default 4, 0 to disable");
static int loops = 100;
-CFS_MODULE_PARM(loops, "i", int, 0644,
- "# of loops before scheduler is friendly, default 100");
+module_param(loops, int, 0644);
+MODULE_PARM_DESC(loops, "# of loops before scheduler is friendly, default 100");
static int hash_size = 503;
-CFS_MODULE_PARM(hash_size, "i", int, 0444,
- "prime number for peer/conn hash sizing, default 503");
+module_param(hash_size, int, 0444);
+MODULE_PARM_DESC(hash_size, "prime number for peer/conn hash sizing, default 503");
static int peer_health = 0;
-CFS_MODULE_PARM(peer_health, "i", int, 0444,
- "Disable peer timeout for LNet peer health, default off, > 0 to enable");
+module_param(peer_health, int, 0444);
+MODULE_PARM_DESC(peer_health, "Disable peer timeout for LNet peer health, default off, > 0 to enable");
static int peer_timeout = -1;
-CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
- "Peer timeout used for peer_health, default based on gnilnd timeout, > -1 to manually set");
+module_param(peer_timeout, int, 0444);
+MODULE_PARM_DESC(peer_timeout, "Peer timeout used for peer_health, default based on gnilnd timeout, > -1 to manually set");
static int vmap_cksum = 0;
-CFS_MODULE_PARM(vmap_cksum, "i", int, 0644,
- "use vmap for all kiov checksumming, default off");
+module_param(vmap_cksum, int, 0644);
+MODULE_PARM_DESC(vmap_cksum, "use vmap for all kiov checksumming, default off");
static int mbox_per_block = GNILND_FMABLK;
-CFS_MODULE_PARM(mbox_per_block, "i", int, 0644,
- "mailboxes per block");
+module_param(mbox_per_block, int, 0644);
+MODULE_PARM_DESC(mbox_per_block, "mailboxes per block");
static int nphys_mbox = 0;
-CFS_MODULE_PARM(nphys_mbox, "i", int, 0444,
- "# mbox to preallocate from physical memory, default 0");
+module_param(nphys_mbox, int, 0444);
+MODULE_PARM_DESC(nphys_mbox, "# mbox to preallocate from physical memory, default 0");
static int mbox_credits = GNILND_MBOX_CREDITS;
-CFS_MODULE_PARM(mbox_credits, "i", int, 0644,
- "number of credits per mailbox");
+module_param(mbox_credits, int, 0644);
+MODULE_PARM_DESC(mbox_credits, "number of credits per mailbox");
static int sched_threads = GNILND_SCHED_THREADS;
-CFS_MODULE_PARM(sched_threads, "i", int, 0444,
- "number of threads for moving data");
+module_param(sched_threads, int, 0444);
+MODULE_PARM_DESC(sched_threads, "number of threads for moving data");
static int net_hash_size = 11;
-CFS_MODULE_PARM(net_hash_size, "i", int, 0444,
- "prime number for net hash sizing, default 11");
+module_param(net_hash_size, int, 0444);
+MODULE_PARM_DESC(net_hash_size, "prime number for net hash sizing, default 11");
static int hardware_timeout = GNILND_HARDWARE_TIMEOUT;
-CFS_MODULE_PARM(hardware_timeout, "i", int, 0444,
- "maximum time for traffic to get from one node to another");
+module_param(hardware_timeout, int, 0444);
+MODULE_PARM_DESC(hardware_timeout, "maximum time for traffic to get from one node to another");
static int mdd_timeout = GNILND_MDD_TIMEOUT;
-CFS_MODULE_PARM(mdd_timeout, "i", int, 0644,
- "maximum time (in minutes) for mdd to be held");
+module_param(mdd_timeout, int, 0644);
+MODULE_PARM_DESC(mdd_timeout, "maximum time (in minutes) for mdd to be held");
static int sched_timeout = GNILND_SCHED_TIMEOUT;
-CFS_MODULE_PARM(sched_timeout, "i", int, 0644,
- "scheduler aliveness in seconds max time");
+module_param(sched_timeout, int, 0644);
+MODULE_PARM_DESC(sched_timeout, "scheduler aliveness in seconds max time");
static int sched_nice = GNILND_SCHED_NICE;
-CFS_MODULE_PARM(sched_nice, "i", int, 0444,
- "scheduler's nice setting, default compute 0 service -20");
+module_param(sched_nice, int, 0444);
+MODULE_PARM_DESC(sched_nice, "scheduler's nice setting, default compute 0 service -20");
static int reverse_rdma = GNILND_REVERSE_RDMA;
-CFS_MODULE_PARM(reverse_rdma, "i", int, 0644,
- "Normal 0: Reverse GET: 1 Reverse Put: 2 Reverse Both: 3");
+module_param(reverse_rdma, int, 0644);
+MODULE_PARM_DESC(reverse_rdma, "Normal 0: Reverse GET: 1 Reverse Put: 2 Reverse Both: 3");
static int dgram_timeout = GNILND_DGRAM_TIMEOUT;
-CFS_MODULE_PARM(dgram_timeout, "i", int, 0644,
- "dgram thread aliveness seconds max time");
+module_param(dgram_timeout, int, 0644);
+MODULE_PARM_DESC(dgram_timeout, "dgram thread aliveness seconds max time");
static int efault_lbug = 0;
-CFS_MODULE_PARM(efault_lbug, "i", int, 0644,
- "If a compute receives an EFAULT in"
- " a message should it LBUG. 0 off 1 on");
+module_param(efault_lbug, int, 0644);
+MODULE_PARM_DESC(efault_lbug, "If a compute receives an EFAULT in a message should it LBUG. 0 off 1 on");
static int fast_reconn = GNILND_FAST_RECONNECT;
-CFS_MODULE_PARM(fast_reconn, "i", int, 0644,
- "fast reconnect on connection timeout");
+module_param(fast_reconn, int, 0644);
+MODULE_PARM_DESC(fast_reconn, "fast reconnect on connection timeout");
static int max_conn_purg = GNILND_PURGATORY_MAX;
-CFS_MODULE_PARM(max_conn_purg, "i", int, 0644,
- "Max number of connections per peer in purgatory");
+module_param(max_conn_purg, int, 0644);
+MODULE_PARM_DESC(max_conn_purg, "Max number of connections per peer in purgatory");
static int thread_affinity = 0;
-CFS_MODULE_PARM(thread_affinity, "i", int, 0444,
- "scheduler thread affinity default 0 (disabled)");
+module_param(thread_affinity, int, 0444);
+MODULE_PARM_DESC(thread_affinity, "scheduler thread affinity default 0 (disabled)");
static int thread_safe = GNILND_TS_ENABLE;
-CFS_MODULE_PARM(thread_safe, "i", int, 0444,
- "Use kgni thread safe API if available");
+module_param(thread_safe, int, 0444);
+MODULE_PARM_DESC(thread_safe, "Use kgni thread safe API if available");
static int reg_fail_timeout = GNILND_REGFAILTO_DISABLE;
-CFS_MODULE_PARM(reg_fail_timeout, "i", int, 0644,
- "fmablk registration timeout LBUG");
+module_param(reg_fail_timeout, int, 0644);
+MODULE_PARM_DESC(reg_fail_timeout, "fmablk registration timeout LBUG");
kgn_tunables_t kgnilnd_tunables = {
.kgn_min_reconnect_interval = &min_reconnect_interval,
#include "o2iblnd.h"
static int service = 987;
-CFS_MODULE_PARM(service, "i", int, 0444,
- "service number (within RDMA_PS_TCP)");
+module_param(service, int, 0444);
+MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
static int cksum = 0;
-CFS_MODULE_PARM(cksum, "i", int, 0644,
- "set non-zero to enable message (not RDMA) checksums");
+module_param(cksum, int, 0644);
+MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
static int timeout = 50;
-CFS_MODULE_PARM(timeout, "i", int, 0644,
- "timeout (seconds)");
+module_param(timeout, int, 0644);
+MODULE_PARM_DESC(timeout, "timeout (seconds)");
/* Number of threads in each scheduler pool which is percpt,
* we will estimate reasonable value based on CPUs if it's set to zero. */
static int nscheds;
-CFS_MODULE_PARM(nscheds, "i", int, 0444,
- "number of threads in each scheduler pool");
+module_param(nscheds, int, 0444);
+MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int ntx = 512;
-CFS_MODULE_PARM(ntx, "i", int, 0444,
- "# of message descriptors allocated for each pool");
+module_param(ntx, int, 0444);
+MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
/* NB: this value is shared by all CPTs */
static int credits = 256;
-CFS_MODULE_PARM(credits, "i", int, 0444,
- "# concurrent sends");
+module_param(credits, int, 0444);
+MODULE_PARM_DESC(credits, "# concurrent sends");
static int peer_credits = 8;
-CFS_MODULE_PARM(peer_credits, "i", int, 0444,
- "# concurrent sends to 1 peer");
+module_param(peer_credits, int, 0444);
+MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
static int peer_credits_hiw = 0;
-CFS_MODULE_PARM(peer_credits_hiw, "i", int, 0444,
- "when eagerly to return credits");
+module_param(peer_credits_hiw, int, 0444);
+MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
static int peer_buffer_credits = 0;
-CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
- "# per-peer router buffer credits");
+module_param(peer_buffer_credits, int, 0444);
+MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
static int peer_timeout = 180;
-CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
- "Seconds without aliveness news to declare peer dead (<=0 to disable)");
+module_param(peer_timeout, int, 0444);
+MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
static char *ipif_name = "ib0";
-CFS_MODULE_PARM(ipif_name, "s", charp, 0444,
- "IPoIB interface name");
+module_param(ipif_name, charp, 0444);
+MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
static int retry_count = 5;
-CFS_MODULE_PARM(retry_count, "i", int, 0644,
- "Retransmissions when no ACK received");
+module_param(retry_count, int, 0644);
+MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received");
static int rnr_retry_count = 6;
-CFS_MODULE_PARM(rnr_retry_count, "i", int, 0644,
- "RNR retransmissions");
+module_param(rnr_retry_count, int, 0644);
+MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
static int keepalive = 100;
-CFS_MODULE_PARM(keepalive, "i", int, 0644,
- "Idle time in seconds before sending a keepalive");
+module_param(keepalive, int, 0644);
+MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
-static int ib_mtu = 0;
-CFS_MODULE_PARM(ib_mtu, "i", int, 0444,
- "IB MTU 256/512/1024/2048/4096");
+static int ib_mtu;
+module_param(ib_mtu, int, 0444);
+MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
-static int concurrent_sends = 0;
-CFS_MODULE_PARM(concurrent_sends, "i", int, 0444,
- "send work-queue sizing");
+static int concurrent_sends;
+module_param(concurrent_sends, int, 0444);
+MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
-static int map_on_demand = 0;
-CFS_MODULE_PARM(map_on_demand, "i", int, 0444,
- "map on demand");
+static int map_on_demand;
+module_param(map_on_demand, int, 0444);
+MODULE_PARM_DESC(map_on_demand, "map on demand");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int fmr_pool_size = 512;
-CFS_MODULE_PARM(fmr_pool_size, "i", int, 0444,
- "size of fmr pool on each CPT (>= ntx / 4)");
+module_param(fmr_pool_size, int, 0444);
+MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
/* NB: this value is shared by all CPTs, it can grow at runtime */
static int fmr_flush_trigger = 384;
-CFS_MODULE_PARM(fmr_flush_trigger, "i", int, 0444,
- "# dirty FMRs that triggers pool flush");
+module_param(fmr_flush_trigger, int, 0444);
+MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
static int fmr_cache = 1;
-CFS_MODULE_PARM(fmr_cache, "i", int, 0444,
- "non-zero to enable FMR caching");
+module_param(fmr_cache, int, 0444);
+MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
/*
* 0: disable failover
* 2: force to failover (for debug)
*/
static int dev_failover = 0;
-CFS_MODULE_PARM(dev_failover, "i", int, 0444,
- "HCA failover for bonding (0 off, 1 on, other values reserved)");
+module_param(dev_failover, int, 0444);
+MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
-static int require_privileged_port = 0;
-CFS_MODULE_PARM(require_privileged_port, "i", int, 0644,
- "require privileged port when accepting connection");
+static int require_privileged_port;
+module_param(require_privileged_port, int, 0644);
+MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
static int use_privileged_port = 1;
-CFS_MODULE_PARM(use_privileged_port, "i", int, 0644,
- "use privileged port when initiating connection");
+module_param(use_privileged_port, int, 0644);
+MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
kib_tunables_t kiblnd_tunables = {
.kib_dev_failover = &dev_failover,
#include "socklnd.h"
static int sock_timeout = 50;
-CFS_MODULE_PARM(sock_timeout, "i", int, 0644,
- "dead socket timeout (seconds)");
+module_param(sock_timeout, int, 0644);
+MODULE_PARM_DESC(sock_timeout, "dead socket timeout (seconds)");
static int credits = 256;
-CFS_MODULE_PARM(credits, "i", int, 0444,
- "# concurrent sends");
+module_param(credits, int, 0444);
+MODULE_PARM_DESC(credits, "# concurrent sends");
static int peer_credits = 8;
-CFS_MODULE_PARM(peer_credits, "i", int, 0444,
- "# concurrent sends to 1 peer");
+module_param(peer_credits, int, 0444);
+MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
-static int peer_buffer_credits = 0;
-CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
- "# per-peer router buffer credits");
+static int peer_buffer_credits;
+module_param(peer_buffer_credits, int, 0444);
+MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
static int peer_timeout = 180;
-CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
- "Seconds without aliveness news to declare peer dead (<=0 to disable)");
+module_param(peer_timeout, int, 0444);
+MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
/* Number of daemons in each thread pool which is percpt,
* we will estimate reasonable value based on CPUs if it's not set. */
static unsigned int nscheds;
-CFS_MODULE_PARM(nscheds, "i", int, 0444,
- "# scheduler daemons in each pool while starting");
+module_param(nscheds, int, 0444);
+MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting");
static int nconnds = 4;
-CFS_MODULE_PARM(nconnds, "i", int, 0444,
- "# connection daemons while starting");
+module_param(nconnds, int, 0444);
+MODULE_PARM_DESC(nconnds, "# connection daemons while starting");
static int nconnds_max = 64;
-CFS_MODULE_PARM(nconnds_max, "i", int, 0444,
- "max # connection daemons");
+module_param(nconnds_max, int, 0444);
+MODULE_PARM_DESC(nconnds_max, "max # connection daemons");
static int min_reconnectms = 1000;
-CFS_MODULE_PARM(min_reconnectms, "i", int, 0644,
- "min connection retry interval (mS)");
+module_param(min_reconnectms, int, 0644);
+MODULE_PARM_DESC(min_reconnectms, "min connection retry interval (mS)");
static int max_reconnectms = 60000;
-CFS_MODULE_PARM(max_reconnectms, "i", int, 0644,
- "max connection retry interval (mS)");
+module_param(max_reconnectms, int, 0644);
+MODULE_PARM_DESC(max_reconnectms, "max connection retry interval (mS)");
static int eager_ack;
-CFS_MODULE_PARM(eager_ack, "i", int, 0644,
- "send tcp ack packets eagerly");
+module_param(eager_ack, int, 0644);
+MODULE_PARM_DESC(eager_ack, "send tcp ack packets eagerly");
static int typed_conns = 1;
-CFS_MODULE_PARM(typed_conns, "i", int, 0444,
- "use different sockets for bulk");
+module_param(typed_conns, int, 0444);
+MODULE_PARM_DESC(typed_conns, "use different sockets for bulk");
static int min_bulk = (1<<10);
-CFS_MODULE_PARM(min_bulk, "i", int, 0644,
- "smallest 'large' message");
+module_param(min_bulk, int, 0644);
+MODULE_PARM_DESC(min_bulk, "smallest 'large' message");
# define DEFAULT_BUFFER_SIZE 0
static int tx_buffer_size = DEFAULT_BUFFER_SIZE;
-CFS_MODULE_PARM(tx_buffer_size, "i", int, 0644,
- "socket tx buffer size (0 for system default)");
+module_param(tx_buffer_size, int, 0644);
+MODULE_PARM_DESC(tx_buffer_size, "socket tx buffer size (0 for system default)");
static int rx_buffer_size = DEFAULT_BUFFER_SIZE;
-CFS_MODULE_PARM(rx_buffer_size, "i", int, 0644,
- "socket rx buffer size (0 for system default)");
+module_param(rx_buffer_size, int, 0644);
+MODULE_PARM_DESC(rx_buffer_size, "socket rx buffer size (0 for system default)");
static int nagle = 0;
-CFS_MODULE_PARM(nagle, "i", int, 0644,
- "enable NAGLE?");
+module_param(nagle, int, 0644);
+MODULE_PARM_DESC(nagle, "enable NAGLE?");
static int round_robin = 1;
-CFS_MODULE_PARM(round_robin, "i", int, 0644,
- "Round robin for multiple interfaces");
+module_param(round_robin, int, 0644);
+MODULE_PARM_DESC(round_robin, "Round robin for multiple interfaces");
static int keepalive = 30;
-CFS_MODULE_PARM(keepalive, "i", int, 0644,
- "# seconds before send keepalive");
+module_param(keepalive, int, 0644);
+MODULE_PARM_DESC(keepalive, "# seconds before send keepalive");
static int keepalive_idle = 30;
-CFS_MODULE_PARM(keepalive_idle, "i", int, 0644,
- "# idle seconds before probe");
+module_param(keepalive_idle, int, 0644);
+MODULE_PARM_DESC(keepalive_idle, "# idle seconds before probe");
#define DEFAULT_KEEPALIVE_COUNT 5
static int keepalive_count = DEFAULT_KEEPALIVE_COUNT;
-CFS_MODULE_PARM(keepalive_count, "i", int, 0644,
- "# missed probes == dead");
+module_param(keepalive_count, int, 0644);
+MODULE_PARM_DESC(keepalive_count, "# missed probes == dead");
static int keepalive_intvl = 5;
-CFS_MODULE_PARM(keepalive_intvl, "i", int, 0644,
- "seconds between probes");
+module_param(keepalive_intvl, int, 0644);
+MODULE_PARM_DESC(keepalive_intvl, "seconds between probes");
static int enable_csum = 0;
-CFS_MODULE_PARM(enable_csum, "i", int, 0644,
- "enable check sum");
+module_param(enable_csum, int, 0644);
+MODULE_PARM_DESC(enable_csum, "enable check sum");
static int inject_csum_error = 0;
-CFS_MODULE_PARM(inject_csum_error, "i", int, 0644,
- "set non-zero to inject a checksum error");
+module_param(inject_csum_error, int, 0644);
+MODULE_PARM_DESC(inject_csum_error, "set non-zero to inject a checksum error");
+
#ifdef CPU_AFFINITY
static int enable_irq_affinity = 0;
-CFS_MODULE_PARM(enable_irq_affinity, "i", int, 0644,
- "enable IRQ affinity");
+module_param(enable_irq_affinity, int, 0644);
+MODULE_PARM_DESC(enable_irq_affinity, "enable IRQ affinity");
#endif
static int nonblk_zcack = 1;
-CFS_MODULE_PARM(nonblk_zcack, "i", int, 0644,
- "always send ZC-ACK on non-blocking connection");
+module_param(nonblk_zcack, int, 0644);
+MODULE_PARM_DESC(nonblk_zcack, "always send ZC-ACK on non-blocking connection");
static unsigned int zc_min_payload = (16 << 10);
-CFS_MODULE_PARM(zc_min_payload, "i", int, 0644,
- "minimum payload size to zero copy");
+module_param(zc_min_payload, int, 0644);
+MODULE_PARM_DESC(zc_min_payload, "minimum payload size to zero copy");
static unsigned int zc_recv = 0;
-CFS_MODULE_PARM(zc_recv, "i", int, 0644,
- "enable ZC recv for Chelsio driver");
+module_param(zc_recv, int, 0644);
+MODULE_PARM_DESC(zc_recv, "enable ZC recv for Chelsio driver");
static unsigned int zc_recv_min_nfrags = 16;
-CFS_MODULE_PARM(zc_recv_min_nfrags, "i", int, 0644,
- "minimum # of fragments to enable ZC recv");
+module_param(zc_recv_min_nfrags, int, 0644);
+MODULE_PARM_DESC(zc_recv_min_nfrags, "minimum # of fragments to enable ZC recv");
#ifdef SOCKNAL_BACKOFF
static int backoff_init = 3;
-CFS_MODULE_PARM(backoff_init, "i", int, 0644,
- "seconds for initial tcp backoff");
+module_param(backoff_init, int, 0644);
+MODULE_PARM_DESC(backoff_init, "seconds for initial tcp backoff");
static int backoff_max = 3;
-CFS_MODULE_PARM(backoff_max, "i", int, 0644,
- "seconds for maximum tcp backoff");
+module_param(backoff_max, int, 0644);
+MODULE_PARM_DESC(backoff_max, "seconds for maximum tcp backoff");
#endif
#if SOCKNAL_VERSION_DEBUG
static int protocol = 3;
-CFS_MODULE_PARM(protocol, "i", int, 0644,
- "protocol version");
+module_param(protocol, int, 0644);
+MODULE_PARM_DESC(protocol, "protocol version");
#endif
ksock_tunables_t ksocknal_tunables;
static char *accept = "secure";
-CFS_MODULE_PARM(accept, "s", charp, 0444,
- "Accept connections (secure|all|none)");
-CFS_MODULE_PARM(accept_port, "i", int, 0444,
- "Acceptor's port (same on all nodes)");
-CFS_MODULE_PARM(accept_backlog, "i", int, 0444,
- "Acceptor's listen backlog");
-CFS_MODULE_PARM(accept_timeout, "i", int, 0644,
- "Acceptor's timeout (seconds)");
+module_param(accept, charp, 0444);
+MODULE_PARM_DESC(accept, "Accept connections (secure|all|none)");
+module_param(accept_port, int, 0444);
+MODULE_PARM_DESC(accept_port, "Acceptor's port (same on all nodes)");
+module_param(accept_backlog, int, 0444);
+MODULE_PARM_DESC(accept_backlog, "Acceptor's listen backlog");
+module_param(accept_timeout, int, 0644);
+MODULE_PARM_DESC(accept_timeout, "Acceptor's timeout (seconds)");
static char *accept_type = NULL;
EXPORT_SYMBOL(the_lnet);
static char *ip2nets = "";
-CFS_MODULE_PARM(ip2nets, "s", charp, 0444,
- "LNET network <- IP table");
+module_param(ip2nets, charp, 0444);
+MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
static char *networks = "";
-CFS_MODULE_PARM(networks, "s", charp, 0444,
- "local networks");
+module_param(networks, charp, 0444);
+MODULE_PARM_DESC(networks, "local networks");
static char *routes = "";
-CFS_MODULE_PARM(routes, "s", charp, 0444,
- "routes to non-local networks");
+module_param(routes, charp, 0444);
+MODULE_PARM_DESC(routes, "routes to non-local networks");
static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
-CFS_MODULE_PARM(rnet_htable_size, "i", int, 0444,
- "size of remote network hash table");
+module_param(rnet_htable_size, int, 0444);
+MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
static int lnet_ping(lnet_process_id_t id, int timeout_ms,
lnet_process_id_t __user *ids, int n_ids);
#include <lnet/lib-lnet.h>
static int local_nid_dist_zero = 1;
-CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
- "Reserved");
+module_param(local_nid_dist_zero, int, 0444);
+MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
int
lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
#include <lnet/lib-lnet.h>
/* NB: add /proc interfaces in upcoming patches */
-int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
-CFS_MODULE_PARM(portal_rotor, "i", int, 0644,
- "redirect PUTs to different cpu-partitions");
+int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
+module_param(portal_rotor, int, 0644);
+MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions");
static int
lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
#include <lnet/lib-dlc.h>
static int config_on_load = 0;
-CFS_MODULE_PARM(config_on_load, "i", int, 0444,
- "configure network at module load");
+module_param(config_on_load, int, 0444);
+MODULE_PARM_DESC(config_on_load, "configure network at module load");
static struct mutex lnet_config_mutex;
PAGE_CACHE_SHIFT)
static char *forwarding = "";
-CFS_MODULE_PARM(forwarding, "s", charp, 0444,
- "Explicitly enable/disable forwarding between networks");
+module_param(forwarding, charp, 0444);
+MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
static int tiny_router_buffers;
-CFS_MODULE_PARM(tiny_router_buffers, "i", int, 0444,
- "# of 0 payload messages to buffer in the router");
+module_param(tiny_router_buffers, int, 0444);
+MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
static int small_router_buffers;
-CFS_MODULE_PARM(small_router_buffers, "i", int, 0444,
- "# of small (1 page) messages to buffer in the router");
+module_param(small_router_buffers, int, 0444);
+MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
static int large_router_buffers;
-CFS_MODULE_PARM(large_router_buffers, "i", int, 0444,
- "# of large messages to buffer in the router");
-static int peer_buffer_credits = 0;
-CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
- "# router buffer credits per peer");
+module_param(large_router_buffers, int, 0444);
+MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
+static int peer_buffer_credits;
+module_param(peer_buffer_credits, int, 0444);
+MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
static int auto_down = 1;
-CFS_MODULE_PARM(auto_down, "i", int, 0444,
- "Automatically mark peers down on comms error");
+module_param(auto_down, int, 0444);
+MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
int
lnet_peer_buffer_credits(lnet_ni_t *ni)
/* forward ref's */
static int lnet_router_checker(void *);
-static int check_routers_before_use = 0;
-CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
- "Assume routers are down and ping them before use");
+static int check_routers_before_use;
+module_param(check_routers_before_use, int, 0444);
+MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
int avoid_asym_router_failure = 1;
-CFS_MODULE_PARM(avoid_asym_router_failure, "i", int, 0644,
- "Avoid asymmetrical router failures (0 to disable)");
+module_param(avoid_asym_router_failure, int, 0644);
+MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
static int dead_router_check_interval = 60;
-CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0644,
- "Seconds between dead router health checks (<= 0 to disable)");
+module_param(dead_router_check_interval, int, 0644);
+MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
static int live_router_check_interval = 60;
-CFS_MODULE_PARM(live_router_check_interval, "i", int, 0644,
- "Seconds between live router health checks (<= 0 to disable)");
+module_param(live_router_check_interval, int, 0644);
+MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
static int router_ping_timeout = 50;
-CFS_MODULE_PARM(router_ping_timeout, "i", int, 0644,
- "Seconds to wait for the reply to a router health query");
+module_param(router_ping_timeout, int, 0644);
+MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
int
lnet_peers_start_down(void)
#include "selftest.h"
static int brw_srv_workitems = SFW_TEST_WI_MAX;
-CFS_MODULE_PARM(brw_srv_workitems, "i", int, 0644, "# BRW server workitems");
+module_param(brw_srv_workitems, int, 0644);
+MODULE_PARM_DESC(brw_srv_workitems, "# BRW server workitems");
static int brw_inject_errors;
-CFS_MODULE_PARM(brw_inject_errors, "i", int, 0644,
- "# data errors to inject randomly, zero by default");
+module_param(brw_inject_errors, int, 0644);
+MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
static void
brw_client_fini (sfw_test_instance_t *tsi)
lst_sid_t LST_INVALID_SID = {LNET_NID_ANY, -1};
static int session_timeout = 100;
-CFS_MODULE_PARM(session_timeout, "i", int, 0444,
- "test session timeout in seconds (100 by default, 0 == never)");
+module_param(session_timeout, int, 0444);
+MODULE_PARM_DESC(session_timeout, "test session timeout in seconds (100 by default, 0 == never)");
static int rpc_timeout = 64;
-CFS_MODULE_PARM(rpc_timeout, "i", int, 0644,
- "rpc timeout in seconds (64 by default, 0 == never)");
+module_param(rpc_timeout, int, 0644);
+MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)");
#define sfw_unpack_id(id) \
do { \
#define LST_PING_TEST_MAGIC 0xbabeface
static int ping_srv_workitems = SFW_TEST_WI_MAX;
-CFS_MODULE_PARM(ping_srv_workitems, "i", int, 0644, "# PING server workitems");
+module_param(ping_srv_workitems, int, 0644);
+MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems");
typedef struct {
spinlock_t pnd_lock; /* serialize */
#include "ldlm_internal.h"
static int ldlm_num_threads;
-CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
- "number of DLM service threads to start");
+module_param(ldlm_num_threads, int, 0444);
+MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
static char *ldlm_cpts;
-CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444,
- "CPU partitions ldlm threads should run on");
+module_param(ldlm_cpts, charp, 0444);
+MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
static struct mutex ldlm_ref_mutex;
static int ldlm_refcount;
#include "ldlm_internal.h"
unsigned int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
-CFS_MODULE_PARM(ldlm_enqueue_min, "i", uint, 0644,
- "lock enqueue timeout minimum");
+module_param(ldlm_enqueue_min, uint, 0644);
+MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
/* in client side, whether the cached locks will be canceled before replay */
unsigned int ldlm_cancel_unused_locks_before_replay = 1;
OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev));
}
-CFS_MODULE_PARM(max_loop, "i", int, 0444, "maximum of lloop_device");
+module_param(max_loop, int, 0444);
+MODULE_PARM_DESC(max_loop, "maximum of lloop_device");
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre virtual block device");
MODULE_VERSION(LUSTRE_VERSION_STRING);
static unsigned int max_mod_rpcs_per_client = 8;
-CFS_MODULE_PARM(max_mod_rpcs_per_client, "i", uint, 0644,
- "maximum number of modify RPCs in flight allowed per client");
+module_param(max_mod_rpcs_per_client, uint, 0644);
+MODULE_PARM_DESC(max_mod_rpcs_per_client, "maximum number of modify RPCs in flight allowed per client");
mdl_mode_t mdt_mdl_lock_modes[] = {
[LCK_MINMODE] = MDL_MINMODE,
* * Initialized in mdt_mod_init().
* */
static unsigned long mdt_num_threads;
-CFS_MODULE_PARM(mdt_num_threads, "ul", ulong, 0444,
- "number of MDS service threads to start "
- "(deprecated in favor of mds_num_threads)");
+module_param(mdt_num_threads, ulong, 0444);
+MODULE_PARM_DESC(mdt_num_threads, "number of MDS service threads to start (deprecated in favor of mds_num_threads)");
static unsigned long mds_num_threads;
-CFS_MODULE_PARM(mds_num_threads, "ul", ulong, 0444,
- "number of MDS service threads to start");
+module_param(mds_num_threads, ulong, 0444);
+MODULE_PARM_DESC(mds_num_threads, "number of MDS service threads to start");
static char *mds_num_cpts;
-CFS_MODULE_PARM(mds_num_cpts, "c", charp, 0444,
- "CPU partitions MDS threads should run on");
+module_param(mds_num_cpts, charp, 0444);
+MODULE_PARM_DESC(mds_num_cpts, "CPU partitions MDS threads should run on");
static unsigned long mds_rdpg_num_threads;
-CFS_MODULE_PARM(mds_rdpg_num_threads, "ul", ulong, 0444,
- "number of MDS readpage service threads to start");
+module_param(mds_rdpg_num_threads, ulong, 0444);
+MODULE_PARM_DESC(mds_rdpg_num_threads, "number of MDS readpage service threads to start");
static char *mds_rdpg_num_cpts;
-CFS_MODULE_PARM(mds_rdpg_num_cpts, "c", charp, 0444,
- "CPU partitions MDS readpage threads should run on");
+module_param(mds_rdpg_num_cpts, charp, 0444);
+MODULE_PARM_DESC(mds_rdpg_num_cpts, "CPU partitions MDS readpage threads should run on");
/* NB: these two should be removed along with setattr service in the future */
static unsigned long mds_attr_num_threads;
-CFS_MODULE_PARM(mds_attr_num_threads, "ul", ulong, 0444,
- "number of MDS setattr service threads to start");
+module_param(mds_attr_num_threads, ulong, 0444);
+MODULE_PARM_DESC(mds_attr_num_threads, "number of MDS setattr service threads to start");
static char *mds_attr_num_cpts;
-CFS_MODULE_PARM(mds_attr_num_cpts, "c", charp, 0444,
- "CPU partitions MDS setattr threads should run on");
+module_param(mds_attr_num_cpts, charp, 0444);
+MODULE_PARM_DESC(mds_attr_num_cpts, "CPU partitions MDS setattr threads should run on");
/* device init/fini methods */
static void mds_stop_ptlrpc_service(struct mds_device *m)
#ifdef CONFIG_PROC_FS
static int lprocfs_no_percpu_stats = 0;
-CFS_MODULE_PARM(lprocfs_no_percpu_stats, "i", int, 0644,
- "Do not alloc percpu data for lprocfs stats");
+module_param(lprocfs_no_percpu_stats, int, 0644);
+MODULE_PARM_DESC(lprocfs_no_percpu_stats, "Do not alloc percpu data for lprocfs stats");
#define MAX_STRING_SIZE 128
static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
-CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
- "Percentage of memory to be used as lu_object cache");
+module_param(lu_cache_percent, int, 0644);
+MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache");
static long lu_cache_nr = LU_CACHE_NR_DEFAULT;
-CFS_MODULE_PARM(lu_cache_nr, "l", long, 0644,
- "Maximum number of objects in lu_object cache");
+module_param(lu_cache_nr, long, 0644);
+MODULE_PARM_DESC(lu_cache_nr, "Maximum number of objects in lu_object cache");
static void lu_object_free(const struct lu_env *env, struct lu_object *o);
static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx);
#include <lustre_linkea.h>
int ldiskfs_pdo = 1;
-CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
- "ldiskfs with parallel directory operations");
+module_param(ldiskfs_pdo, int, 0644);
+MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
int ldiskfs_track_declares_assert;
-CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
- "LBUG during tracking of declares");
+module_param(ldiskfs_track_declares_assert, int, 0644);
+MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
/* Slab to allocate dynlocks */
struct kmem_cache *dynlock_cachep;
#include "osd_scrub.h"
static unsigned int osd_oi_count = OSD_OI_FID_NR;
-CFS_MODULE_PARM(osd_oi_count, "i", int, 0444,
- "Number of Object Index containers to be created, "
- "it's only valid for new filesystem.");
+module_param(osd_oi_count, int, 0444);
+MODULE_PARM_DESC(osd_oi_count, "Number of Object Index containers to be created, it's only valid for new filesystem.");
/** to serialize concurrent OI index initialization */
static struct mutex oi_init_lock;
}
extern unsigned int osd_oi_count;
-CFS_MODULE_PARM(osd_oi_count, "i", int, 0444,
- "Number of Object Index containers to be created, "
- "it's only valid for new filesystem.");
+module_param(osd_oi_count, int, 0444);
+MODULE_PARM_DESC(osd_oi_count, "Number of Object Index containers to be created, it's only valid for new filesystem.");
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_ZFS_NAME")");
/* Default to max data size covered by a level-1 indirect block */
static unsigned long osd_sync_destroy_max_size =
1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
-CFS_MODULE_PARM(osd_sync_destroy_max_size, "ul", ulong, 0444,
- "Maximum object size to use synchronous destroy.");
+module_param(osd_sync_destroy_max_size, ulong, 0444);
+MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
static inline void
osd_object_set_destroy_type(struct osd_object *obj)
#include "ost_internal.h"
int oss_max_threads = 512;
-CFS_MODULE_PARM(oss_max_threads, "i", int, 0444,
- "maximum number of OSS service threads");
+module_param(oss_max_threads, int, 0444);
+MODULE_PARM_DESC(oss_max_threads, "maximum number of OSS service threads");
static int oss_num_threads;
-CFS_MODULE_PARM(oss_num_threads, "i", int, 0444,
- "number of OSS service threads to start");
+module_param(oss_num_threads, int, 0444);
+MODULE_PARM_DESC(oss_num_threads, "number of OSS service threads to start");
static int ost_num_threads;
-CFS_MODULE_PARM(ost_num_threads, "i", int, 0444,
- "number of OST service threads to start (deprecated)");
+module_param(ost_num_threads, int, 0444);
+MODULE_PARM_DESC(ost_num_threads, "number of OST service threads to start (deprecated)");
static int oss_num_create_threads;
-CFS_MODULE_PARM(oss_num_create_threads, "i", int, 0444,
- "number of OSS create threads to start");
+module_param(oss_num_create_threads, int, 0444);
+MODULE_PARM_DESC(oss_num_create_threads, "number of OSS create threads to start");
static char *oss_cpts;
-CFS_MODULE_PARM(oss_cpts, "s", charp, 0444,
- "CPU partitions OSS threads should run on");
+module_param(oss_cpts, charp, 0444);
+MODULE_PARM_DESC(oss_cpts, "CPU partitions OSS threads should run on");
static char *oss_io_cpts;
-CFS_MODULE_PARM(oss_io_cpts, "s", charp, 0444,
- "CPU partitions OSS IO threads should run on");
+module_param(oss_io_cpts, charp, 0444);
+MODULE_PARM_DESC(oss_io_cpts, "CPU partitions OSS IO threads should run on");
#define OST_WATCHDOG_TIMEOUT (obd_timeout * 1000)
#define NRS_POL_NAME_TBF "tbf"
static int tbf_jobid_cache_size = 8192;
-CFS_MODULE_PARM(tbf_jobid_cache_size, "i", int, 0644,
- "The size of jobid cache");
+module_param(tbf_jobid_cache_size, int, 0644);
+MODULE_PARM_DESC(tbf_jobid_cache_size, "The size of jobid cache");
static int tbf_rate = 10000;
-CFS_MODULE_PARM(tbf_rate, "i", int, 0644,
- "Default rate limit in RPCs/s");
+module_param(tbf_rate, int, 0644);
+MODULE_PARM_DESC(tbf_rate, "Default rate limit in RPCs/s");
static int tbf_depth = 3;
-CFS_MODULE_PARM(tbf_depth, "i", int, 0644,
- "How many tokens that a client can save up");
+module_param(tbf_depth, int, 0644);
+MODULE_PARM_DESC(tbf_depth, "How many tokens that a client can save up");
static enum hrtimer_restart nrs_tbf_timer_cb(struct hrtimer *timer)
{
#include "ptlrpc_internal.h"
static int suppress_pings;
-CFS_MODULE_PARM(suppress_pings, "i", int, 0644, "Suppress pings");
+module_param(suppress_pings, int, 0644);
+MODULE_PARM_DESC(suppress_pings, "Suppress pings");
struct mutex pinger_mutex;
static struct list_head pinger_imports =
* is used to derive a setting for ptlrpcd_per_cpt_max.
*/
static int max_ptlrpcds;
-CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
- "Max ptlrpcd thread count to be started.");
+module_param(max_ptlrpcds, int, 0644);
+MODULE_PARM_DESC(max_ptlrpcds, "Max ptlrpcd thread count to be started.");
/*
* ptlrpcd_bind_policy is obsolete, but retained to ensure that
* is used to derive a setting for ptlrpcd_partner_group_size.
*/
static int ptlrpcd_bind_policy;
-CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
- "Ptlrpcd threads binding mode (obsolete).");
+module_param(ptlrpcd_bind_policy, int, 0644);
+MODULE_PARM_DESC(ptlrpcd_bind_policy,
+ "Ptlrpcd threads binding mode (obsolete).");
/*
* ptlrpcd_per_cpt_max: The maximum number of ptlrpcd threads to run
* in a CPT.
*/
static int ptlrpcd_per_cpt_max;
-CFS_MODULE_PARM(ptlrpcd_per_cpt_max, "i", int, 0644,
- "Max ptlrpcd thread count to be started per cpt.");
+MODULE_PARM_DESC(ptlrpcd_per_cpt_max,
+ "Max ptlrpcd thread count to be started per cpt.");
/*
* ptlrpcd_partner_group_size: The desired number of threads in each
* a CPT partners of each other.
*/
static int ptlrpcd_partner_group_size;
-CFS_MODULE_PARM(ptlrpcd_partner_group_size, "i", int, 0644,
- "Number of ptlrpcd threads in a partner group.");
+module_param(ptlrpcd_partner_group_size, int, 0644);
+MODULE_PARM_DESC(ptlrpcd_partner_group_size,
+ "Number of ptlrpcd threads in a partner group.");
/*
* ptlrpcd_cpts: A CPT string describing the CPU partitions that
* run ptlrpcd threads on CPTS 0, 1, 2, 3, 5, and 7.
*/
static char *ptlrpcd_cpts;
-CFS_MODULE_PARM(ptlrpcd_cpts, "s", charp, 0644,
- "CPU partitions ptlrpcd threads should run in");
+module_param(ptlrpcd_cpts, charp, 0644);
+MODULE_PARM_DESC(ptlrpcd_cpts,
+ "CPU partitions ptlrpcd threads should run in");
/* ptlrpcds_cpt_idx maps cpt numbers to an index in the ptlrpcds array. */
static int *ptlrpcds_cpt_idx;
static int mult = 20 - PAGE_CACHE_SHIFT;
static int enc_pool_max_memory_mb;
-CFS_MODULE_PARM(enc_pool_max_memory_mb, "i", int, 0644,
- "Encoding pool max memory (MB), 1/8 of total physical memory by default");
-
+module_param(enc_pool_max_memory_mb, int, 0644);
+MODULE_PARM_DESC(enc_pool_max_memory_mb,
+ "Encoding pool max memory (MB), 1/8 of total physical memory by default");
/****************************************
* bulk encryption page pools *
/* The following are visible and mutable through /sys/module/ptlrpc */
int test_req_buffer_pressure = 0;
-CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
- "set non-zero to put pressure on request buffer pools");
-CFS_MODULE_PARM(at_min, "i", int, 0644,
- "Adaptive timeout minimum (sec)");
-CFS_MODULE_PARM(at_max, "i", int, 0644,
- "Adaptive timeout maximum (sec)");
-CFS_MODULE_PARM(at_history, "i", int, 0644,
- "Adaptive timeouts remember the slowest event that took place "
- "within this period (sec)");
-CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
- "How soon before an RPC deadline to send an early reply");
-CFS_MODULE_PARM(at_extra, "i", int, 0644,
- "How much extra time to give with each early reply");
-
+module_param(test_req_buffer_pressure, int, 0444);
+MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools");
+module_param(at_min, int, 0644);
+MODULE_PARM_DESC(at_min, "Adaptive timeout minimum (sec)");
+module_param(at_max, int, 0644);
+MODULE_PARM_DESC(at_max, "Adaptive timeout maximum (sec)");
+module_param(at_history, int, 0644);
+MODULE_PARM_DESC(at_history,
+ "Adaptive timeouts remember the slowest event that took place within this period (sec)");
+module_param(at_early_margin, int, 0644);
+MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply");
+module_param(at_extra, int, 0644);
+MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply");
/* forward ref */
static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
#include "lquota_internal.h"
static int hash_lqs_cur_bits = HASH_LQE_CUR_BITS;
-CFS_MODULE_PARM(hash_lqs_cur_bits, "i", int, 0444,
- "the current bits of lqe hash");
+module_param(hash_lqs_cur_bits, int, 0444);
+MODULE_PARM_DESC(hash_lqs_cur_bits, "the current bits of lqe hash");
static unsigned
lqe64_hash_hash(struct cfs_hash *hs, const void *key, unsigned mask)