Whamcloud - gitweb
git://git.whamcloud.com
/
fs
/
lustre-release.git
/ commitdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
| commitdiff |
tree
raw
|
patch
| inline |
side by side
(parent:
74d0cf9
)
LU-5620 ptlrpc: Add QoS for opcode in NRS-TBF
18/11918/37
author
Qian Yingjin
<qian@ddn.com>
Mon, 6 Jun 2016 08:40:35 +0000
(16:40 +0800)
committer
Oleg Drokin
<oleg.drokin@intel.com>
Fri, 10 Feb 2017 03:51:07 +0000
(
03:51
+0000)
This patch add a new QoS feature in TBF policy which could
limits the rate based on opcode.
The syntax is like:
lctl set_param x.x.x.nrs_tbf_rule=
"[reg|hp] start <rule_name> <arguments>..."
Start the tbf opcode QoS:
lctl set_param ost.OSS.ost_io.nrs_policies="tbf opcode"
Limit the ost_read and ost_write respectively:
lctl set_param ost.OSS.ost_io.nrs_tbf_rule=
"start ost_r opcode={ost_read} rate=100"
lctl set_param ost.OSS.ost_io.nrs_tbf_rule=
"start ost_w opcode={ost_write} rate=200"
Limit both ost_read and ost_write:
lctl set_param ost.OSS.ost_io.nrs_tbf_rule=
"start ost_rw opcode={ost_read ost_write} rate=200"
The limit numbers like 100, 200 mean the number of
requests per second.
And, the opcode-based policy can not be combined with
NID-based and JobID-based policies now.
Test-Parameters: alwaysuploadlogs
Signed-off-by: Wu Libin <lwu@ddn.com>
Signed-off-by: Wang Shilong <wshilong@ddn.com>
Signed-off-by: Qian Yingjin <qian@ddn.com>
Change-Id: I4ff93972df560ad1ebc8e38e942d503518a835c7
Reviewed-on: https://review.whamcloud.com/11918
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Lai Siyao <lai.siyao@intel.com>
Reviewed-by: Emoly Liu <emoly.liu@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/include/lustre_net.h
patch
|
blob
|
history
lustre/include/lustre_nrs_tbf.h
patch
|
blob
|
history
lustre/ptlrpc/lproc_ptlrpc.c
patch
|
blob
|
history
lustre/ptlrpc/nrs_tbf.c
patch
|
blob
|
history
lustre/tests/sanityn.sh
patch
|
blob
|
history
diff --git
a/lustre/include/lustre_net.h
b/lustre/include/lustre_net.h
index
08b2251
..
3c1f595
100644
(file)
--- a/
lustre/include/lustre_net.h
+++ b/
lustre/include/lustre_net.h
@@
-2657,6
+2657,7
@@
void ptlrpcd_decref(void);
* @{
*/
const char* ll_opcode2str(__u32 opcode);
+const int ll_str2opcode(const char *ops);
#ifdef CONFIG_PROC_FS
void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
diff --git
a/lustre/include/lustre_nrs_tbf.h
b/lustre/include/lustre_nrs_tbf.h
index
0e5c929
..
bac1ccf
100644
(file)
--- a/
lustre/include/lustre_nrs_tbf.h
+++ b/
lustre/include/lustre_nrs_tbf.h
@@
-56,6
+56,8
@@
struct nrs_tbf_client {
lnet_nid_t tc_nid;
/** Jobid of the client. */
char tc_jobid[LUSTRE_JOBID_SIZE];
+ /** opcode of the client. */
+ __u32 tc_opcode;
/** Reference number of the client. */
atomic_t tc_ref;
/** Lock to protect rule and linkage. */
@@
-111,6
+113,10
@@
struct nrs_tbf_rule {
struct list_head tr_jobids;
/** Jobid list string of the rule.*/
char *tr_jobids_str;
+ /** Opcode bitmap of the rule. */
+ struct cfs_bitmap *tr_opcodes;
+ /** Opcode list string of the rule.*/
+ char *tr_opcodes_str;
/** RPC/s limit. */
__u64 tr_rpc_rate;
/** Time to wait for next token. */
@@
-149,10
+155,21
@@
struct nrs_tbf_ops {
#define NRS_TBF_TYPE_JOBID "jobid"
#define NRS_TBF_TYPE_NID "nid"
+#define NRS_TBF_TYPE_OPCODE "opcode"
#define NRS_TBF_TYPE_MAX_LEN 20
-#define NRS_TBF_FLAG_INVALID 0
-#define NRS_TBF_FLAG_JOBID 0x0000001
-#define NRS_TBF_FLAG_NID 0x0000002
+
+enum nrs_tbf_flag {
+ NRS_TBF_FLAG_INVALID = 0x0000000,
+ NRS_TBF_FLAG_JOBID = 0x0000001,
+ NRS_TBF_FLAG_NID = 0x0000002,
+ NRS_TBF_FLAG_OPCODE = 0x0000004,
+};
+
+struct nrs_tbf_type {
+ const char *ntt_name;
+ enum nrs_tbf_flag ntt_flag;
+ struct nrs_tbf_ops *ntt_ops;
+};
struct nrs_tbf_bucket {
/**
@@
-240,6
+257,8
@@
struct nrs_tbf_cmd {
char *ts_nids_str;
struct list_head ts_jobids;
char *ts_jobids_str;
+ struct cfs_bitmap *ts_opcodes;
+ char *ts_opcodes_str;
__u32 ts_valid_type;
__u32 ts_rule_flags;
char *ts_next_name;
diff --git
a/lustre/ptlrpc/lproc_ptlrpc.c
b/lustre/ptlrpc/lproc_ptlrpc.c
index
4eb84d0
..
897f5ea
100644
(file)
--- a/
lustre/ptlrpc/lproc_ptlrpc.c
+++ b/
lustre/ptlrpc/lproc_ptlrpc.c
@@
-175,6
+175,19
@@
const char *ll_opcode2str(__u32 opcode)
return ll_rpc_opcode_table[offset].opname;
}
+const int ll_str2opcode(const char *ops)
+{
+ int i;
+
+ for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
+ if (ll_rpc_opcode_table[i].opname != NULL &&
+ strcmp(ll_rpc_opcode_table[i].opname, ops) == 0)
+ return ll_rpc_opcode_table[i].opcode;
+ }
+
+ return -EINVAL;
+}
+
static const char *ll_eopcode2str(__u32 opcode)
{
LASSERT(ll_eopcode_table[opcode].opcode == opcode);
diff --git
a/lustre/ptlrpc/nrs_tbf.c
b/lustre/ptlrpc/nrs_tbf.c
index
64e5f97
..
f3ba8f0
100644
(file)
--- a/
lustre/ptlrpc/nrs_tbf.c
+++ b/
lustre/ptlrpc/nrs_tbf.c
@@
-254,6
+254,7
@@
nrs_tbf_cli_init(struct nrs_tbf_head *head,
{
struct nrs_tbf_rule *rule;
+ memset(cli, 0, sizeof(*cli));
cli->tc_in_heap = false;
head->th_ops->o_cli_init(cli, req);
INIT_LIST_HEAD(&cli->tc_list);
@@
-992,8
+993,8
@@
static struct nrs_tbf_ops nrs_tbf_jobid_ops = {
* This uses ptlrpc_request::rq_peer.nid as its key, in order to hash
* nrs_tbf_client objects.
*/
-#define NRS_TBF_NID_BKT_BITS
8
-#define NRS_TBF_NID_BITS
16
+#define NRS_TBF_NID_BKT_BITS 8
+#define NRS_TBF_NID_BITS 16
static unsigned nrs_tbf_nid_hop_hash(struct cfs_hash *hs, const void *key,
unsigned mask)
@@
-1229,6
+1230,312
@@
static struct nrs_tbf_ops nrs_tbf_nid_ops = {
.o_rule_fini = nrs_tbf_nid_rule_fini,
};
+static void nrs_tbf_opcode_rule_fini(struct nrs_tbf_rule *rule)
+{
+ if (rule->tr_opcodes != NULL)
+ CFS_FREE_BITMAP(rule->tr_opcodes);
+
+ LASSERT(rule->tr_opcodes_str != NULL);
+ OBD_FREE(rule->tr_opcodes_str, strlen(rule->tr_opcodes_str) + 1);
+}
+
+static unsigned nrs_tbf_opcode_hop_hash(struct cfs_hash *hs, const void *key,
+ unsigned mask)
+{
+ return cfs_hash_djb2_hash(key, sizeof(__u32), mask);
+}
+
+static int nrs_tbf_opcode_hop_keycmp(const void *key, struct hlist_node *hnode)
+{
+ const __u32 *opc = key;
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
+ struct nrs_tbf_client,
+ tc_hnode);
+
+ return *opc == cli->tc_opcode;
+}
+
+static void *nrs_tbf_opcode_hop_key(struct hlist_node *hnode)
+{
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
+ struct nrs_tbf_client,
+ tc_hnode);
+
+ return &cli->tc_opcode;
+}
+
+static void *nrs_tbf_opcode_hop_object(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct nrs_tbf_client, tc_hnode);
+}
+
+static void nrs_tbf_opcode_hop_get(struct cfs_hash *hs,
+ struct hlist_node *hnode)
+{
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
+ struct nrs_tbf_client,
+ tc_hnode);
+
+ atomic_inc(&cli->tc_ref);
+}
+
+static void nrs_tbf_opcode_hop_put(struct cfs_hash *hs,
+ struct hlist_node *hnode)
+{
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
+ struct nrs_tbf_client,
+ tc_hnode);
+
+ atomic_dec(&cli->tc_ref);
+}
+
+static void nrs_tbf_opcode_hop_exit(struct cfs_hash *hs,
+ struct hlist_node *hnode)
+{
+ struct nrs_tbf_client *cli = hlist_entry(hnode,
+ struct nrs_tbf_client,
+ tc_hnode);
+
+ LASSERTF(atomic_read(&cli->tc_ref) == 0,
+ "Busy TBF object from client with opcode %s, with %d refs\n",
+ ll_opcode2str(cli->tc_opcode),
+ atomic_read(&cli->tc_ref));
+
+ nrs_tbf_cli_fini(cli);
+}
+static struct cfs_hash_ops nrs_tbf_opcode_hash_ops = {
+ .hs_hash = nrs_tbf_opcode_hop_hash,
+ .hs_keycmp = nrs_tbf_opcode_hop_keycmp,
+ .hs_key = nrs_tbf_opcode_hop_key,
+ .hs_object = nrs_tbf_opcode_hop_object,
+ .hs_get = nrs_tbf_opcode_hop_get,
+ .hs_put = nrs_tbf_opcode_hop_put,
+ .hs_put_locked = nrs_tbf_opcode_hop_put,
+ .hs_exit = nrs_tbf_opcode_hop_exit,
+};
+
+static int
+nrs_tbf_opcode_startup(struct ptlrpc_nrs_policy *policy,
+ struct nrs_tbf_head *head)
+{
+ struct nrs_tbf_cmd start = { 0 };
+ int rc;
+
+ head->th_cli_hash = cfs_hash_create("nrs_tbf_hash",
+ NRS_TBF_NID_BITS,
+ NRS_TBF_NID_BITS,
+ NRS_TBF_NID_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &nrs_tbf_opcode_hash_ops,
+ CFS_HASH_RW_BKTLOCK);
+ if (head->th_cli_hash == NULL)
+ return -ENOMEM;
+
+ start.u.tc_start.ts_opcodes = NULL;
+ start.u.tc_start.ts_opcodes_str = "*";
+
+ start.u.tc_start.ts_rpc_rate = tbf_rate;
+ start.u.tc_start.ts_rule_flags = NTRS_DEFAULT;
+ start.tc_name = NRS_TBF_DEFAULT_RULE;
+ rc = nrs_tbf_rule_start(policy, head, &start);
+
+ return rc;
+}
+
+static struct nrs_tbf_client *
+nrs_tbf_opcode_cli_find(struct nrs_tbf_head *head,
+ struct ptlrpc_request *req)
+{
+ __u32 opc;
+
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ return cfs_hash_lookup(head->th_cli_hash, &opc);
+}
+
+static struct nrs_tbf_client *
+nrs_tbf_opcode_cli_findadd(struct nrs_tbf_head *head,
+ struct nrs_tbf_client *cli)
+{
+ return cfs_hash_findadd_unique(head->th_cli_hash, &cli->tc_opcode,
+ &cli->tc_hnode);
+}
+
+static void
+nrs_tbf_opcode_cli_init(struct nrs_tbf_client *cli,
+ struct ptlrpc_request *req)
+{
+ cli->tc_opcode = lustre_msg_get_opc(req->rq_reqmsg);
+}
+
+#define MAX_OPCODE_LEN 32
+static int
+nrs_tbf_opcode_set_bit(const struct cfs_lstr *id, struct cfs_bitmap *opcodes)
+{
+ int op = 0;
+ char opcode_str[MAX_OPCODE_LEN];
+
+ if (id->ls_len + 1 > MAX_OPCODE_LEN)
+ return -EINVAL;
+
+ memcpy(opcode_str, id->ls_str, id->ls_len);
+ opcode_str[id->ls_len] = '\0';
+
+ op = ll_str2opcode(opcode_str);
+ if (op < 0)
+ return -EINVAL;
+
+ cfs_bitmap_set(opcodes, op);
+ return 0;
+}
+
+static int
+nrs_tbf_opcode_list_parse(char *str, int len, struct cfs_bitmap *opcodes)
+{
+ struct cfs_lstr src;
+ struct cfs_lstr res;
+ int rc = 0;
+
+ ENTRY;
+
+ src.ls_str = str;
+ src.ls_len = len;
+ while (src.ls_str) {
+ rc = cfs_gettok(&src, ' ', &res);
+ if (rc == 0) {
+ rc = -EINVAL;
+ break;
+ }
+ rc = nrs_tbf_opcode_set_bit(&res, opcodes);
+ if (rc)
+ break;
+ }
+
+ RETURN(rc);
+}
+
+static void nrs_tbf_opcode_cmd_fini(struct nrs_tbf_cmd *cmd)
+{
+ if (cmd->u.tc_start.ts_opcodes)
+ CFS_FREE_BITMAP(cmd->u.tc_start.ts_opcodes);
+
+ if (cmd->u.tc_start.ts_opcodes_str)
+ OBD_FREE(cmd->u.tc_start.ts_opcodes_str,
+ strlen(cmd->u.tc_start.ts_opcodes_str) + 1);
+
+}
+
+static int nrs_tbf_opcode_parse(struct nrs_tbf_cmd *cmd, char *id)
+{
+ struct cfs_lstr src;
+ int rc;
+
+ cmd->u.tc_start.ts_opcodes = CFS_ALLOCATE_BITMAP(LUSTRE_MAX_OPCODES);
+ if (cmd->u.tc_start.ts_opcodes == NULL)
+ return -ENOMEM;
+
+ src.ls_str = id;
+ src.ls_len = strlen(id);
+ rc = nrs_tbf_check_id_value(&src, "opcode");
+ if (rc)
+ GOTO(out, rc);
+
+ OBD_ALLOC(cmd->u.tc_start.ts_opcodes_str, src.ls_len + 1);
+ if (cmd->u.tc_start.ts_opcodes_str == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ memcpy(cmd->u.tc_start.ts_opcodes_str, src.ls_str, src.ls_len);
+
+ /* parse opcode list */
+ rc = nrs_tbf_opcode_list_parse(cmd->u.tc_start.ts_opcodes_str,
+ strlen(cmd->u.tc_start.ts_opcodes_str),
+ cmd->u.tc_start.ts_opcodes);
+out:
+ if (rc != 0)
+ nrs_tbf_opcode_cmd_fini(cmd);
+
+ return rc;
+}
+
+static int
+nrs_tbf_opcode_rule_match(struct nrs_tbf_rule *rule,
+ struct nrs_tbf_client *cli)
+{
+ if (rule->tr_opcodes == NULL)
+ return 0;
+
+ return cfs_bitmap_check(rule->tr_opcodes, cli->tc_opcode);
+}
+
+static int nrs_tbf_opcode_rule_init(struct ptlrpc_nrs_policy *policy,
+ struct nrs_tbf_rule *rule,
+ struct nrs_tbf_cmd *start)
+{
+ LASSERT(start->u.tc_start.ts_opcodes_str != NULL);
+ OBD_ALLOC(rule->tr_opcodes_str,
+ strlen(start->u.tc_start.ts_opcodes_str) + 1);
+ if (rule->tr_opcodes_str == NULL)
+ return -ENOMEM;
+
+ strncpy(rule->tr_opcodes_str, start->u.tc_start.ts_opcodes_str,
+ strlen(start->u.tc_start.ts_opcodes_str) + 1);
+
+ if (start->u.tc_start.ts_opcodes == NULL)
+ return 0;
+
+ rule->tr_opcodes = CFS_ALLOCATE_BITMAP(LUSTRE_MAX_OPCODES);
+ if (rule->tr_opcodes == NULL) {
+ OBD_FREE(rule->tr_opcodes_str,
+ strlen(start->u.tc_start.ts_opcodes_str) + 1);
+ return -ENOMEM;
+ }
+
+ cfs_bitmap_copy(rule->tr_opcodes, start->u.tc_start.ts_opcodes);
+
+ return 0;
+}
+
+static int
+nrs_tbf_opcode_rule_dump(struct nrs_tbf_rule *rule, struct seq_file *m)
+{
+ seq_printf(m, "%s {%s} %llu, ref %d\n", rule->tr_name,
+ rule->tr_opcodes_str, rule->tr_rpc_rate,
+ atomic_read(&rule->tr_ref) - 1);
+ return 0;
+}
+
+
+struct nrs_tbf_ops nrs_tbf_opcode_ops = {
+ .o_name = NRS_TBF_TYPE_OPCODE,
+ .o_startup = nrs_tbf_opcode_startup,
+ .o_cli_find = nrs_tbf_opcode_cli_find,
+ .o_cli_findadd = nrs_tbf_opcode_cli_findadd,
+ .o_cli_put = nrs_tbf_nid_cli_put,
+ .o_cli_init = nrs_tbf_opcode_cli_init,
+ .o_rule_init = nrs_tbf_opcode_rule_init,
+ .o_rule_dump = nrs_tbf_opcode_rule_dump,
+ .o_rule_match = nrs_tbf_opcode_rule_match,
+ .o_rule_fini = nrs_tbf_opcode_rule_fini,
+};
+
+static struct nrs_tbf_type nrs_tbf_types[] = {
+ {
+ .ntt_name = NRS_TBF_TYPE_JOBID,
+ .ntt_flag = NRS_TBF_FLAG_JOBID,
+ .ntt_ops = &nrs_tbf_jobid_ops,
+ },
+ {
+ .ntt_name = NRS_TBF_TYPE_NID,
+ .ntt_flag = NRS_TBF_FLAG_NID,
+ .ntt_ops = &nrs_tbf_nid_ops,
+ },
+ {
+ .ntt_name = NRS_TBF_TYPE_OPCODE,
+ .ntt_flag = NRS_TBF_FLAG_OPCODE,
+ .ntt_ops = &nrs_tbf_opcode_ops,
+ },
+};
+
/**
* Is called before the policy transitions into
* ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED; allocates and initializes a
@@
-1247,18
+1554,22
@@
static int nrs_tbf_start(struct ptlrpc_nrs_policy *policy, char *arg)
struct nrs_tbf_head *head;
struct nrs_tbf_ops *ops;
__u32 type;
+ int found = 0;
+ int i;
int rc = 0;
if (arg == NULL || strlen(arg) > NRS_TBF_TYPE_MAX_LEN)
GOTO(out, rc = -EINVAL);
- if (strcmp(arg, NRS_TBF_TYPE_NID) == 0) {
- ops = &nrs_tbf_nid_ops;
- type = NRS_TBF_FLAG_NID;
- } else if (strcmp(arg, NRS_TBF_TYPE_JOBID) == 0) {
- ops = &nrs_tbf_jobid_ops;
- type = NRS_TBF_FLAG_JOBID;
- } else
+ for (i = 0; i < ARRAY_SIZE(nrs_tbf_types); i++) {
+ if (strcmp(arg, nrs_tbf_types[i].ntt_name) == 0) {
+ ops = nrs_tbf_types[i].ntt_ops;
+ type = nrs_tbf_types[i].ntt_flag;
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0)
GOTO(out, rc = -ENOTSUPP);
OBD_CPT_ALLOC_PTR(head, nrs_pol2cptab(policy), nrs_pol2cptid(policy));
@@
-1455,6
+1766,7
@@
static int nrs_tbf_res_get(struct ptlrpc_nrs_policy *policy,
sizeof(*cli), moving_req ? GFP_ATOMIC : __GFP_IO);
if (cli == NULL)
return -ENOMEM;
+
nrs_tbf_cli_init(head, cli, req);
tmp = head->th_ops->o_cli_findadd(head, cli);
if (tmp != cli) {
@@
-1768,26
+2080,32
@@
static int nrs_tbf_id_parse(struct nrs_tbf_cmd *cmd, char *token)
{
int rc;
- if (cmd->u.tc_start.ts_valid_type & NRS_TBF_FLAG_JOBID)
+ switch (cmd->u.tc_start.ts_valid_type) {
+ case NRS_TBF_FLAG_JOBID:
rc = nrs_tbf_jobid_parse(cmd, token);
- else if (cmd->u.tc_start.ts_valid_type & NRS_TBF_FLAG_NID)
+ break;
+ case NRS_TBF_FLAG_NID:
rc = nrs_tbf_nid_parse(cmd, token);
- else if (cmd->u.tc_start.ts_valid_type == NRS_TBF_FLAG_INVALID)
- rc = -EINVAL;
- else
- rc = 0;
+ break;
+ case NRS_TBF_FLAG_OPCODE:
+ rc = nrs_tbf_opcode_parse(cmd, token);
+ break;
+ default:
+ RETURN(-EINVAL);
+ }
return rc;
}
-
static void nrs_tbf_cmd_fini(struct nrs_tbf_cmd *cmd)
{
if (cmd->tc_cmd == NRS_CTL_TBF_START_RULE) {
- if (cmd->u.tc_start.ts_valid_type
&
NRS_TBF_FLAG_JOBID)
+ if (cmd->u.tc_start.ts_valid_type
==
NRS_TBF_FLAG_JOBID)
nrs_tbf_jobid_cmd_fini(cmd);
- else if (cmd->u.tc_start.ts_valid_type
&
NRS_TBF_FLAG_NID)
+ else if (cmd->u.tc_start.ts_valid_type
==
NRS_TBF_FLAG_NID)
nrs_tbf_nid_cmd_fini(cmd);
+ else if (cmd->u.tc_start.ts_valid_type == NRS_TBF_FLAG_OPCODE)
+ nrs_tbf_opcode_cmd_fini(cmd);
}
}
diff --git
a/lustre/tests/sanityn.sh
b/lustre/tests/sanityn.sh
index
4573637
..
7f99537
100644
(file)
--- a/
lustre/tests/sanityn.sh
+++ b/
lustre/tests/sanityn.sh
@@
-3075,6
+3075,39
@@
tbf_rule_operate()
error "failed to run operate '$*' on TBF rules"
}
+tbf_verify() {
+ local dir=$DIR/$tdir
+ local client1=${CLIENT1:-`hostname`}
+ local myRUNAS="$3"
+
+ mkdir $dir || error "mkdir $dir failed"
+ $LFS setstripe -c 1 $dir || error "setstripe to $dir failed"
+ chmod 777 $dir
+
+ echo "Limited write rate: $1, read rate: $2"
+ echo "Verify the write rate is under TBF control"
+ local rate=$(do_node $client1 $myRUNAS dd if=/dev/zero of=$dir/tbf \
+ bs=1M count=100 oflag=direct 2>&1 | awk '/bytes/ {print $8}')
+ echo "Write speed is $rate"
+
+ # verify the write rate does not exceed 110% of TBF limited rate
+ [ $(bc <<< "$rate < 1.1 * $1") -eq 1 ] ||
+ error "The write rate ($rate) exceeds 110% of preset rate ($1)"
+
+ cancel_lru_locks osc
+
+ echo "Verify the read rate is under TBF control"
+ rate=$(do_node $client1 $myRUNAS dd if=$dir/tbf of=/dev/null \
+ bs=1M count=100 iflag=direct 2>&1 | awk '/bytes/ {print $8}')
+ echo "Read speed is $rate"
+
+ # verify the read rate does not exceed 110% of TBF limited rate
+ [ $(bc <<< "$rate < 1.1 * $2") -eq 1 ] ||
+ error "The read rate ($rate) exceeds 110% of preset rate ($2)"
+
+ rm -rf $dir || error "rm -rf $dir failed"
+}
+
test_77e() {
local server_version=$(lustre_version_code ost1)
[[ $server_version -ge $(version_code 2.7.58) ]] ||
@@
-3323,6
+3356,35
@@
test_77i() {
}
run_test 77i "Change rank of TBF rule"
+test_77j() {
+ local idis
+ local rateis
+ if [ $(lustre_version_code ost1) -ge $(version_code 2.8.60) ]; then
+ idis="opcode="
+ rateis="rate="
+ fi
+
+ do_nodes $(comma_list $(osts_nodes)) \
+ lctl set_param jobid_var=procname_uid \
+ ost.OSS.ost_io.nrs_policies="tbf\ opcode" \
+ ost.OSS.ost_io.nrs_tbf_rule="start\ ost_r\ ${idis}{ost_read}\ ${rateis}5" \
+ ost.OSS.ost_io.nrs_tbf_rule="start\ ost_w\ ${idis}{ost_write}\ ${rateis}20"
+
+ nrs_write_read
+ tbf_verify 20 5
+
+ do_nodes $(comma_list $(osts_nodes)) \
+ lctl set_param ost.OSS.ost_io.nrs_tbf_rule="stop\ ost_r" \
+ ost.OSS.ost_io.nrs_tbf_rule="stop\ ost_w" \
+ ost.OSS.ost_io.nrs_policies="fifo"
+
+ # sleep 3 seconds to wait the tbf policy stop completely,
+ # or the next test case is possible get -EAGAIN when
+ # setting the tbf policy
+ sleep 3
+}
+run_test 77j "check TBF-OPCode NRS policy"
+
test_78() { #LU-6673
local server_version=$(lustre_version_code ost1)
[[ $server_version -ge $(version_code 2.7.58) ]] ||