CFS_PAGE_SIZE||PAGE_SIZE
cfs_proc_dir_entry_t||struct proc_dir_entry
cfs_rcu_head_t||struct rcu_head
+cfs_rand||prandom_u32
+cfs_srand||add_device_randomness
cfs_trimwhite||strim
cfs_time_add_64||ktime_add
cfs_time_after||time_after
* Lustre is a trademark of Oracle Corporation, Inc.
*/
+#include <linux/random.h>
#include <libcfs/libcfs.h>
unsigned long cfs_fail_loc = 0;
/* Fail 1/cfs_fail_val times */
if (cfs_fail_loc & CFS_FAIL_RAND) {
- if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
+ if (cfs_fail_val < 2 || prandom_u32_max(cfs_fail_val) > 0)
return 0;
}
#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/random.h>
#include <lnet/lib-lnet.h>
#include <uapi/linux/lnet/lnetctl.h>
if (attr->u.drop.da_interval != 0) {
rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
rule->dr_drop_time = ktime_get_seconds() +
- cfs_rand() % attr->u.drop.da_interval;
+ prandom_u32_max(attr->u.drop.da_interval);
} else {
- rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
+ rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
}
lnet_net_lock(LNET_LOCK_EX);
memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
if (attr->u.drop.da_rate != 0) {
- rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
+ rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
} else {
rule->dr_drop_time = ktime_get_seconds() +
- cfs_rand() % attr->u.drop.da_interval;
+ prandom_u32_max(attr->u.drop.da_interval);
rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
}
spin_unlock(&rule->dr_lock);
static void
lnet_fault_match_health(enum lnet_msg_hstatus *hstatus, __u32 mask)
{
- unsigned int random;
int choice;
int delta;
int best_delta;
int i;
/* assign a random failure */
- random = cfs_rand();
- choice = random % (LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
+ choice = prandom_u32_max(LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
if (choice == 0)
choice++;
/* match this rule, check drop rate now */
spin_lock(&rule->dr_lock);
if (attr->u.drop.da_random) {
- int value = cfs_rand() % attr->u.drop.da_interval;
+ int value = prandom_u32_max(attr->u.drop.da_interval);
if (value >= (attr->u.drop.da_interval / 2))
drop = true;
else
rule->dr_time_base = now;
rule->dr_drop_time = rule->dr_time_base +
- cfs_rand() % attr->u.drop.da_interval;
+ prandom_u32_max(attr->u.drop.da_interval);
rule->dr_time_base += attr->u.drop.da_interval;
CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
count = rule->dr_stat.fs_count;
if (do_div(count, attr->u.drop.da_rate) == 0) {
rule->dr_drop_at = rule->dr_stat.fs_count +
- cfs_rand() % attr->u.drop.da_rate;
+ prandom_u32_max(attr->u.drop.da_rate);
CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
libcfs_nid2str(attr->fa_src),
libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
rule->dl_time_base = now;
rule->dl_delay_time = rule->dl_time_base +
- cfs_rand() % attr->u.delay.la_interval;
+ prandom_u32_max(attr->u.delay.la_interval);
rule->dl_time_base += attr->u.delay.la_interval;
CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
count = rule->dl_stat.fs_count;
if (do_div(count, attr->u.delay.la_rate) == 0) {
rule->dl_delay_at = rule->dl_stat.fs_count +
- cfs_rand() % attr->u.delay.la_rate;
+ prandom_u32_max(attr->u.delay.la_rate);
CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
libcfs_nid2str(attr->fa_src),
libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
rule->dl_time_base = ktime_get_seconds() +
attr->u.delay.la_interval;
rule->dl_delay_time = ktime_get_seconds() +
- cfs_rand() % attr->u.delay.la_interval;
+ prandom_u32_max(attr->u.delay.la_interval);
} else {
- rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
+ rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
}
rule->dl_msg_send = -1;
memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
if (attr->u.delay.la_rate != 0) {
- rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
+ rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
} else {
rule->dl_delay_time = ktime_get_seconds() +
- cfs_rand() % attr->u.delay.la_interval;
+ prandom_u32_max(attr->u.delay.la_interval);
rule->dl_time_base = ktime_get_seconds() +
attr->u.delay.la_interval;
}
* different nodes are using the same list of routers, they end up
* preferring different routers.
*/
- offset = cfs_rand() % (len + 1);
+ offset = prandom_u32_max(len + 1);
list_for_each(e, &rnet->lrn_routes) {
if (offset == 0)
break;
#define DEBUG_SUBSYSTEM S_LMV
#include <asm/div64.h>
+#include <linux/random.h>
+
#include <libcfs/libcfs.h>
#include <uapi/linux/lustre/lustre_idl.h>
#include <lustre_swab.h>
if (total_weight) {
#if BITS_PER_LONG == 32
- rand = cfs_rand() % (unsigned int)total_weight;
+ rand = prandom_u32_max((u32)total_weight);
/*
* If total_weight > 32-bit, first generate the high
* 32 bits of the random number, then add in the low
* 32 bits (truncated to the upper limit, if needed)
*/
if (total_weight > 0xffffffffULL)
- rand = (__u64)(cfs_rand() %
- (unsigned int)(total_weight >> 32)) << 32;
+ rand = prandom_u32_max((u32)(total_weight >> 32)) << 32;
else
rand = 0;
if (rand == (total_weight & 0xffffffff00000000ULL))
- rand |= cfs_rand() % (unsigned int)total_weight;
+ rand |= prandom_u32_max((u32)total_weight);
else
- rand |= cfs_rand();
+ rand |= prandom_u32();
#else
- rand = ((__u64)cfs_rand() << 32 | cfs_rand()) % total_weight;
+ rand = prandom_u32() | prandom_u32_max((u32)total_weight);
#endif
} else {
rand = 0;
#define DEBUG_SUBSYSTEM S_LOV
#include <asm/div64.h>
+#include <linux/random.h>
+
#include <libcfs/libcfs.h>
#include <uapi/linux/lustre/lustre_idl.h>
#include <lustre_swab.h>
down_read(&m->lod_qos.lq_rw_sem);
spin_lock(&lqr->lqr_alloc);
if (--lqr->lqr_start_count <= 0) {
- lqr->lqr_start_idx = cfs_rand() % osts->op_count;
+ lqr->lqr_start_idx = prandom_u32_max(osts->op_count);
lqr->lqr_start_count =
(LOV_CREATE_RESEED_MIN / max(osts->op_count, 1U) +
LOV_CREATE_RESEED_MULT) * max(osts->op_count, 1U);
/* Find enough OSTs with weighted random allocation. */
nfound = 0;
while (nfound < stripe_count) {
- __u64 rand, cur_weight;
+ u64 rand, cur_weight;
cur_weight = 0;
rc = -ENOSPC;
if (total_weight) {
#if BITS_PER_LONG == 32
- rand = cfs_rand() % (unsigned)total_weight;
+ rand = prandom_u32_max((u32)total_weight);
/* If total_weight > 32-bit, first generate the high
* 32 bits of the random number, then add in the low
- * 32 bits (truncated to the upper limit, if needed) */
+ * 32 bits (truncated to the upper limit, if needed)
+ */
if (total_weight > 0xffffffffULL)
- rand = (__u64)(cfs_rand() %
- (unsigned)(total_weight >> 32)) << 32;
+ rand = prandom_u32_max((u32)(total_weight >> 32)) << 32;
else
rand = 0;
if (rand == (total_weight & 0xffffffff00000000ULL))
- rand |= cfs_rand() % (unsigned)total_weight;
+ rand |= prandom_u32_max((u32)total_weight);
else
- rand |= cfs_rand();
+ rand |= prandom_u32();
#else
- rand = ((__u64)cfs_rand() << 32 | cfs_rand()) %
- total_weight;
+ rand = prandom_u32() | prandom_u32_max((u32)total_weight);
#endif
} else {
rand = 0;
#include <linux/module.h>
#include <linux/kthread.h>
+#include <linux/random.h>
#include <dt_object.h>
#include <lprocfs_status.h>
* in order to not flood the MGS.
*/
#define MGC_TIMEOUT_MIN_SECONDS 5
-#define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 */
+#define MGC_TIMEOUT_RAND_CENTISEC 500
static int mgc_requeue_thread(void *data)
{
while (!(rq_state & RQ_STOP)) {
struct l_wait_info lwi;
struct config_llog_data *cld, *cld_prev;
- int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
+ int rand = prandom_u32_max(MGC_TIMEOUT_RAND_CENTISEC);
int to;
/* Any new or requeued lostlocks will change the state */
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/delay.h>
+#include <linux/random.h>
#include <obd_class.h>
#include <lustre_fid.h>
ctxt->loc_dir = o;
llog_ctxt_put(ctxt);
- llog_test_rand = cfs_rand();
+ llog_test_rand = prandom_u32();
rc = llog_run_tests(&env, tgt);
if (rc)
*/
#define DEBUG_SUBSYSTEM S_RPC
+
+#include <linux/random.h>
+
#include <obd_support.h>
#include <obd_class.h>
#include "ptlrpc_internal.h"
if (delay_data->delay_pct == 0 || /* Not delaying anything */
(delay_data->delay_pct != 100 &&
- delay_data->delay_pct < cfs_rand() % 100))
+ delay_data->delay_pct < prandom_u32_max(100)))
return 1;
- nrq->nr_u.delay.req_start_time = ktime_get_real_seconds() + cfs_rand() %
- (delay_data->max_delay -
- delay_data->min_delay + 1) +
+ nrq->nr_u.delay.req_start_time = ktime_get_real_seconds() +
+ prandom_u32_max(delay_data->max_delay - delay_data->min_delay + 1) +
delay_data->min_delay;
return cfs_binheap_insert(delay_data->delay_binheap, &nrq->nr_node);