PAGE_CACHE_SHIFT||PAGE_SHIFT
PAGE_CACHE_SIZE||PAGE_SIZE
prandom_u32||get_random_u32
+prandom_u32_max||get_random_u32_below
return seq_printf||seq_printf
setup_timer||cfs_timer_setup
= seq_printf||seq_printf
/* Fail 1/cfs_fail_val times */
if (cfs_fail_loc & CFS_FAIL_RAND) {
- if (cfs_fail_val < 2 || prandom_u32_max(cfs_fail_val) > 0)
+ if (cfs_fail_val < 2 || get_random_u32_below(cfs_fail_val) > 0)
return 0;
}
if (attr->u.drop.da_interval != 0) {
rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
rule->dr_drop_time = ktime_get_seconds() +
- prandom_u32_max(attr->u.drop.da_interval);
+ get_random_u32_below(attr->u.drop.da_interval);
} else {
- rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
+ rule->dr_drop_at = get_random_u32_below(attr->u.drop.da_rate);
}
lnet_net_lock(LNET_LOCK_EX);
memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
if (attr->u.drop.da_rate != 0) {
- rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
+ rule->dr_drop_at = get_random_u32_below(attr->u.drop.da_rate);
} else {
rule->dr_drop_time = ktime_get_seconds() +
- prandom_u32_max(attr->u.drop.da_interval);
+ get_random_u32_below(attr->u.drop.da_interval);
rule->dr_time_base = ktime_get_seconds() + attr->u.drop.da_interval;
}
spin_unlock(&rule->dr_lock);
int i;
/* assign a random failure */
- choice = prandom_u32_max(LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
+ choice = get_random_u32_below(LNET_MSG_STATUS_END - LNET_MSG_STATUS_OK);
if (choice == 0)
choice++;
/* match this rule, check drop rate now */
spin_lock(&rule->dr_lock);
if (attr->u.drop.da_random) {
- int value = prandom_u32_max(attr->u.drop.da_interval);
+ int value = get_random_u32_below(attr->u.drop.da_interval);
if (value >= (attr->u.drop.da_interval / 2))
drop = true;
else
rule->dr_time_base = now;
rule->dr_drop_time = rule->dr_time_base +
- prandom_u32_max(attr->u.drop.da_interval);
+ get_random_u32_below(attr->u.drop.da_interval);
rule->dr_time_base += attr->u.drop.da_interval;
CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lld\n",
count = rule->dr_stat.fs_count;
if (do_div(count, attr->u.drop.da_rate) == 0) {
rule->dr_drop_at = rule->dr_stat.fs_count +
- prandom_u32_max(attr->u.drop.da_rate);
+ get_random_u32_below(attr->u.drop.da_rate);
CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
libcfs_nid2str(attr->fa_src),
libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
rule->dl_time_base = now;
rule->dl_delay_time = rule->dl_time_base +
- prandom_u32_max(attr->u.delay.la_interval);
+ get_random_u32_below(attr->u.delay.la_interval);
rule->dl_time_base += attr->u.delay.la_interval;
CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lld\n",
count = rule->dl_stat.fs_count;
if (do_div(count, attr->u.delay.la_rate) == 0) {
rule->dl_delay_at = rule->dl_stat.fs_count +
- prandom_u32_max(attr->u.delay.la_rate);
+ get_random_u32_below(attr->u.delay.la_rate);
CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
libcfs_nid2str(attr->fa_src),
libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
rule->dl_time_base = ktime_get_seconds() +
attr->u.delay.la_interval;
rule->dl_delay_time = ktime_get_seconds() +
- prandom_u32_max(attr->u.delay.la_interval);
+ get_random_u32_below(attr->u.delay.la_interval);
} else {
- rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
+ rule->dl_delay_at = get_random_u32_below(attr->u.delay.la_rate);
}
rule->dl_msg_send = -1;
memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
if (attr->u.delay.la_rate != 0) {
- rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
+ rule->dl_delay_at = get_random_u32_below(attr->u.delay.la_rate);
} else {
rule->dl_delay_time = ktime_get_seconds() +
- prandom_u32_max(attr->u.delay.la_interval);
+ get_random_u32_below(attr->u.delay.la_interval);
rule->dl_time_base = ktime_get_seconds() +
attr->u.delay.la_interval;
}
* different nodes are using the same list of routers, they end up
* preferring different routers.
*/
- offset = prandom_u32_max(len + 1);
+ offset = get_random_u32_below(len + 1);
list_for_each(e, &rnet->lrn_routes) {
if (offset == 0)
break;
]) # LC_HAVE_FILEMAP_GET_FOLIOS_CONTIG
#
+# LC_HAVE_GET_RANDOM_U32_BELOW
+#
+# Linux commit v6.1-13825-g3c202d14a9d7
+# prandom: remove prandom_u32_max()
+#
+AC_DEFUN([LC_SRC_HAVE_GET_RANDOM_U32_BELOW], [
+ LB2_LINUX_TEST_SRC([get_random_u32_below], [
+ #include <linux/random.h>
+ ],[
+ u32 rand32 = get_random_u32_below(99);
+ (void)rand32;
+ ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_GET_RANDOM_U32_BELOW], [
+ AC_MSG_CHECKING([if get_random_u32_below()is available])
+ LB2_LINUX_TEST_RESULT([get_random_u32_below], [
+ AC_DEFINE(HAVE_GET_RANDOM_U32_BELOW, 1,
+ [get_random_u32_below() is available])
+ ],[
+ AC_DEFINE([get_random_u32_below(v)], [prandom_u32_max(v)],
+ [get_random_u32_below() is not available])
+ ])
+]) # LC_HAVE_GET_RANDOM_U32_BELOW
+
+#
+# LC_HAVE_ACL_WITH_DENTRY
+#
+# Linux commit v6.1-rc1-2-g138060ba92b3
+# fs: pass dentry to set acl method
+# Linux commit v6.1-rc1-4-g7420332a6ff4
+# fs: add new get acl method
+#
+AC_DEFUN([LC_SRC_HAVE_ACL_WITH_DENTRY], [
+ LB2_LINUX_TEST_SRC([acl_with_dentry], [
+ #include <linux/fs.h>
+ ],[
+ struct user_namespace *ns = NULL;
+ struct dentry *dentry = NULL;
+
+ ((struct inode_operations *)1)->get_acl(ns, dentry, 0);
+ (void)ns; (void)dentry;
+ ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_ACL_WITH_DENTRY], [
+ AC_MSG_CHECKING([if 'get_acl' and 'set_acl' use dentry argument])
+ LB2_LINUX_TEST_RESULT([acl_with_dentry], [
+ AC_DEFINE(HAVE_ACL_WITH_DENTRY, 1,
+ ['get_acl' and 'set_acl' use dentry argument])
+ ])
+]) # LC_HAVE_ACL_WITH_DENTRY
+
+#
# LC_PROG_LINUX
#
# Lustre linux kernel checks
LC_SRC_NFS_FILLDIR_USE_CTX_RETURN_BOOL
LC_SRC_HAVE_FILEMAP_GET_FOLIOS_CONTIG
+ # 6.2
+ LC_SRC_HAVE_GET_RANDOM_U32_BELOW
+ LC_SRC_HAVE_ACL_WITH_DENTRY
+
# kernel patch to extend integrity interface
LC_SRC_BIO_INTEGRITY_PREP_FN
])
LC_NFS_FILLDIR_USE_CTX_RETURN_BOOL
LC_HAVE_FILEMAP_GET_FOLIOS_CONTIG
+ # 6.2
+ LC_HAVE_GET_RANDOM_U32_BELOW
+ LC_HAVE_ACL_WITH_DENTRY
+
# kernel patch to extend integrity interface
LC_BIO_INTEGRITY_PREP_FN
])
#include "llite_internal.h"
-struct posix_acl *ll_get_acl(struct inode *inode, int type
-#ifdef HAVE_GET_ACL_RCU_ARG
- , bool rcu
-#endif /* HAVE_GET_ACL_RCU_ARG */
- )
+struct posix_acl *ll_get_acl(
+ #ifdef HAVE_ACL_WITH_DENTRY
+ struct user_namespace *ns, struct dentry *dentry, int type)
+ #elif defined HAVE_GET_ACL_RCU_ARG
+ struct inode *inode, int type, bool rcu)
+ #else
+ struct inode *inode, int type)
+ #endif /* HAVE_GET_ACL_RCU_ARG */
{
+#ifdef HAVE_ACL_WITH_DENTRY
+ struct inode *inode = dentry->d_inode;
+#endif
struct ll_inode_info *lli = ll_i2info(inode);
struct posix_acl *acl = NULL;
ENTRY;
}
#ifdef HAVE_IOP_SET_ACL
-int ll_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+int ll_set_acl(struct user_namespace *mnt_userns,
+#ifdef HAVE_ACL_WITH_DENTRY
+ struct dentry *dentry,
+#else
+ struct inode *inode,
+#endif
struct posix_acl *acl, int type)
{
+#ifdef HAVE_ACL_WITH_DENTRY
+ struct inode *inode = dentry->d_inode;
+#endif
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *req = NULL;
const char *name = NULL;
int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask,
unsigned int flags, bool foreign);
#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
-struct posix_acl *ll_get_acl(struct inode *inode, int type
-#ifdef HAVE_GET_ACL_RCU_ARG
- , bool rcu
-#endif /* HAVE_GET_ACL_RCU_ARG */
- );
-int ll_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+struct posix_acl *ll_get_acl(
+ #ifdef HAVE_ACL_WITH_DENTRY
+ struct user_namespace *, struct dentry *, int);
+ #elif defined HAVE_GET_ACL_RCU_ARG
+ struct inode *inode, int type, bool rcu);
+ #else
+ struct inode *inode, int type);
+ #endif /* HAVE_GET_ACL_RCU_ARG */
+
+int ll_set_acl(struct user_namespace *mnt_userns,
+ #ifdef HAVE_ACL_WITH_DENTRY
+ struct dentry *dentry,
+ #else
+ struct inode *inode,
+ #endif
struct posix_acl *acl, int type);
#else /* !CONFIG_LUSTRE_FS_POSIX_ACL */
#define ll_get_acl NULL
spin_lock(&lqr->lqr_alloc);
if (--lqr->lqr_start_count <= 0) {
atomic_set(&lqr->lqr_start_idx,
- prandom_u32_max(osts->op_count));
+ get_random_u32_below(osts->op_count));
lqr->lqr_start_count =
(LOV_CREATE_RESEED_MIN / max(osts->op_count, 1U) +
LOV_CREATE_RESEED_MULT) * max(osts->op_count, 1U);
spin_lock(&lqr->lqr_alloc);
if (--lqr->lqr_start_count <= 0) {
atomic_set(&lqr->lqr_start_idx,
- prandom_u32_max(pool->op_count));
+ get_random_u32_below(pool->op_count));
lqr->lqr_start_count =
(LOV_CREATE_RESEED_MIN / max(pool->op_count, 1U) +
LOV_CREATE_RESEED_MULT) * max(pool->op_count, 1U);
*/
to = mgc_requeue_timeout_min == 0 ? 1 : mgc_requeue_timeout_min;
to = cfs_time_seconds(mgc_requeue_timeout_min) +
- prandom_u32_max(cfs_time_seconds(to));
+ get_random_u32_below(cfs_time_seconds(to));
wait_event_idle_timeout(rq_waitq,
rq_state & (RQ_STOP | RQ_PRECLEANUP), to);
* 32 bits (truncated to the upper limit, if needed)
*/
if (ep_ro > 0xffffffffULL)
- rand = (u64)prandom_u32_max((u32)(ep_ro >> 32)) << 32;
+ rand = (u64)get_random_u32_below((u32)(ep_ro >> 32)) << 32;
if (rand == (ep_ro & 0xffffffff00000000ULL))
- rand |= prandom_u32_max((u32)ep_ro);
+ rand |= get_random_u32_below((u32)ep_ro);
else
rand |= get_random_u32();
#else
if (delay_data->delay_pct == 0 || /* Not delaying anything */
(delay_data->delay_pct != 100 &&
- delay_data->delay_pct < prandom_u32_max(100)))
+ delay_data->delay_pct < get_random_u32_below(100)))
return 1;
nrq->nr_u.delay.req_start_time = ktime_get_real_seconds() +
- prandom_u32_max(delay_data->max_delay - delay_data->min_delay + 1) +
+ get_random_u32_below(delay_data->max_delay - delay_data->min_delay + 1) +
delay_data->min_delay;
return binheap_insert(delay_data->delay_binheap, &nrq->nr_node);