From 45ccb61a6123e68c55bcab044188cda6d935bcc3 Mon Sep 17 00:00:00 2001 From: Aurelien Degremont Date: Tue, 24 Sep 2013 14:10:09 +0200 Subject: [PATCH] LU-3999 hsm: Rename several HSM files in /proc for MDT. Some files in /proc, available for MDT are misnamed. This could be incoherent or troublesome for sysadmins. This patch renames them. `actions' is used when refering to HSM requests stored in MDT llog. `active_requests' is used when refering to HSM requests currently being handled by a copytool, on an agent node. Also rename some HSM variable names and functions to have them matching proc file names more consistently. sanity-hsm tests have been updated accordingly. Signed-off-by: Aurelien Degremont Change-Id: If2b5c24e06d480554040d8f69b09236e0ddd98e9 Reviewed-on: http://review.whamcloud.com/7740 Tested-by: Hudson Reviewed-by: John L. Hammond Reviewed-by: Faccini Bruno Tested-by: Maloo Reviewed-by: Oleg Drokin --- lustre/mdt/mdt_coordinator.c | 103 ++++++++++++++++++++------------------ lustre/mdt/mdt_hsm_cdt_actions.c | 44 ++++++++-------- lustre/mdt/mdt_hsm_cdt_requests.c | 28 ++++++----- lustre/mdt/mdt_internal.h | 12 ++--- lustre/tests/sanity-hsm.sh | 28 +++++------ 5 files changed, 110 insertions(+), 105 deletions(-) diff --git a/lustre/mdt/mdt_coordinator.c b/lustre/mdt/mdt_coordinator.c index 9d5d4cd..0071138 100644 --- a/lustre/mdt/mdt_coordinator.c +++ b/lustre/mdt/mdt_coordinator.c @@ -133,7 +133,7 @@ struct hsm_scan_data { char fs_name[MTI_NAME_MAXLEN+1]; /* request to be send to agents */ int request_sz; /** allocated size */ - int max_request; /** vector size */ + int max_requests; /** vector size */ int request_cnt; /** used count */ struct { int hal_sz; @@ -182,14 +182,14 @@ static int mdt_coordinator_cb(const struct lu_env *env, /* Are agents full? */ if (atomic_read(&cdt->cdt_request_count) == - cdt->cdt_max_request) + cdt->cdt_max_requests) break; /* first search if the request if known in the list we have * build and if there is room in the request vector */ empty_slot = -1; found = -1; - for (i = 0; i < hsd->max_request && + for (i = 0; i < hsd->max_requests && (empty_slot == -1 || found == -1); i++) { if (hsd->request[i].hal == NULL) { empty_slot = i; @@ -308,7 +308,8 @@ static int mdt_coordinator_cb(const struct lu_env *env, /* test if request too long, if yes cancel it * the same way the copy tool acknowledge a cancel request */ - if ((last + cdt->cdt_timeout) < cfs_time_current_sec()) { + if ((last + cdt->cdt_active_req_timeout) + < cfs_time_current_sec()) { struct hsm_progress_kernel pgs; dump_llog_agent_req_rec("mdt_coordinator_cb(): " @@ -371,7 +372,7 @@ static int mdt_coordinator_cb(const struct lu_env *env, case ARS_FAILED: case ARS_CANCELED: case ARS_SUCCEED: - if ((larr->arr_req_change + cdt->cdt_delay) < + if ((larr->arr_req_change + cdt->cdt_grace_delay) < cfs_time_current_sec()) RETURN(LLOG_DEL_RECORD); break; @@ -454,12 +455,12 @@ static int mdt_coordinator(void *data) hsd.max_cookie = 0; hsd.cookie_cnt = 0; hsd.cookies = NULL; - /* we use a copy of cdt_max_request in the cb, so if cdt_max_request + /* we use a copy of cdt_max_requests in the cb, so if cdt_max_requests * increases due to a change from /proc we do not overflow the * hsd.request[] vector */ - hsd.max_request = cdt->cdt_max_request; - hsd.request_sz = hsd.max_request * sizeof(*hsd.request); + hsd.max_requests = cdt->cdt_max_requests; + hsd.request_sz = hsd.max_requests * sizeof(*hsd.request); OBD_ALLOC(hsd.request, hsd.request_sz); if (!hsd.request) GOTO(out, rc = -ENOMEM); @@ -499,14 +500,14 @@ static int mdt_coordinator(void *data) CDEBUG(D_HSM, "coordinator starts reading llog\n"); - if (hsd.max_request != cdt->cdt_max_request) { - /* cdt_max_request has changed, + if (hsd.max_requests != cdt->cdt_max_requests) { + /* cdt_max_requests has changed, * we need to allocate a new buffer */ OBD_FREE(hsd.request, hsd.request_sz); - hsd.max_request = cdt->cdt_max_request; + hsd.max_requests = cdt->cdt_max_requests; hsd.request_sz = - hsd.max_request * sizeof(*hsd.request); + hsd.max_requests * sizeof(*hsd.request); OBD_ALLOC(hsd.request, hsd.request_sz); if (!hsd.request) { rc = -ENOMEM; @@ -556,7 +557,7 @@ static int mdt_coordinator(void *data) } /* here hsd contains a list of requests to be started */ - for (i = 0; i < hsd.max_request; i++) { + for (i = 0; i < hsd.max_requests; i++) { struct hsm_action_list *hal; struct hsm_action_item *hai; __u64 *cookies; @@ -565,7 +566,7 @@ static int mdt_coordinator(void *data) /* still room for work ? */ if (atomic_read(&cdt->cdt_request_count) == - cdt->cdt_max_request) + cdt->cdt_max_requests) break; if (hsd.request[i].hal == NULL) @@ -638,7 +639,7 @@ clean_cb_alloc: } /* free hal allocated by callback */ - for (i = 0; i < hsd.max_request; i++) { + for (i = 0; i < hsd.max_requests; i++) { if (hsd.request[i].hal) { OBD_FREE(hsd.request[i].hal, hsd.request[i].hal_sz); @@ -898,11 +899,11 @@ int mdt_hsm_cdt_init(struct mdt_device *mdt) /* default values for /proc tunnables * can be override by MGS conf */ cdt->cdt_default_archive_id = 1; - cdt->cdt_delay = 60; + cdt->cdt_grace_delay = 60; cdt->cdt_loop_period = 10; - cdt->cdt_max_request = 3; + cdt->cdt_max_requests = 3; cdt->cdt_policy = CDT_DEFAULT_POLICY; - cdt->cdt_timeout = 3600; + cdt->cdt_active_req_timeout = 3600; RETURN(0); } @@ -959,7 +960,6 @@ int mdt_hsm_cdt_start(struct mdt_device *mdt) /* just need to be larger than previous one */ /* cdt_last_cookie is protected by cdt_llog_lock */ cdt->cdt_last_cookie = cfs_time_current_sec(); - atomic_set(&cdt->cdt_request_count, 0); cdt->cdt_user_request_mask = (1UL << HSMA_RESTORE); cdt->cdt_group_request_mask = (1UL << HSMA_RESTORE); @@ -1951,9 +1951,9 @@ static int lprocfs_wr_hsm_##VAR(struct file *file, const char *buffer, \ } GENERATE_PROC_METHOD(cdt_loop_period) -GENERATE_PROC_METHOD(cdt_delay) -GENERATE_PROC_METHOD(cdt_timeout) -GENERATE_PROC_METHOD(cdt_max_request) +GENERATE_PROC_METHOD(cdt_grace_delay) +GENERATE_PROC_METHOD(cdt_active_req_timeout) +GENERATE_PROC_METHOD(cdt_max_requests) GENERATE_PROC_METHOD(cdt_default_archive_id) /* @@ -2185,32 +2185,35 @@ lprocfs_wr_hsm_other_request_mask(struct file *file, const char __user *buf, } static struct lprocfs_vars lprocfs_mdt_hsm_vars[] = { - { "agents", NULL, NULL, NULL, &mdt_hsm_agent_fops, 0 }, - { "agent_actions", NULL, NULL, NULL, - &mdt_agent_actions_fops, 0444 }, - { "default_archive_id", lprocfs_rd_hsm_cdt_default_archive_id, - lprocfs_wr_hsm_cdt_default_archive_id, - NULL, NULL, 0 }, - { "grace_delay", lprocfs_rd_hsm_cdt_delay, - lprocfs_wr_hsm_cdt_delay, - NULL, NULL, 0 }, - { "loop_period", lprocfs_rd_hsm_cdt_loop_period, - lprocfs_wr_hsm_cdt_loop_period, - NULL, NULL, 0 }, - { "max_requests", lprocfs_rd_hsm_cdt_max_request, - lprocfs_wr_hsm_cdt_max_request, - NULL, NULL, 0 }, - { "policy", lprocfs_rd_hsm_policy, lprocfs_wr_hsm_policy, - NULL, NULL, 0 }, - { "request_timeout", lprocfs_rd_hsm_cdt_timeout, - lprocfs_wr_hsm_cdt_timeout, - NULL, NULL, 0 }, - { "requests", NULL, NULL, NULL, &mdt_hsm_request_fops, 0 }, - { "user_request_mask", lprocfs_rd_hsm_user_request_mask, - lprocfs_wr_hsm_user_request_mask, }, - { "group_request_mask", lprocfs_rd_hsm_group_request_mask, - lprocfs_wr_hsm_group_request_mask, }, - { "other_request_mask", lprocfs_rd_hsm_other_request_mask, - lprocfs_wr_hsm_other_request_mask, }, - { NULL } + { "agents", NULL, NULL, NULL, &mdt_hsm_agent_fops, + 0 }, + { "actions", NULL, NULL, NULL, &mdt_hsm_actions_fops, + 0444 }, + { "default_archive_id", lprocfs_rd_hsm_cdt_default_archive_id, + lprocfs_wr_hsm_cdt_default_archive_id, + NULL, NULL, 0 }, + { "grace_delay", lprocfs_rd_hsm_cdt_grace_delay, + lprocfs_wr_hsm_cdt_grace_delay, + NULL, NULL, 0 }, + { "loop_period", lprocfs_rd_hsm_cdt_loop_period, + lprocfs_wr_hsm_cdt_loop_period, + NULL, NULL, 0 }, + { "max_requests", lprocfs_rd_hsm_cdt_max_requests, + lprocfs_wr_hsm_cdt_max_requests, + NULL, NULL, 0 }, + { "policy", lprocfs_rd_hsm_policy, + lprocfs_wr_hsm_policy, + NULL, NULL, 0 }, + { "active_request_timeout", lprocfs_rd_hsm_cdt_active_req_timeout, + lprocfs_wr_hsm_cdt_active_req_timeout, + NULL, NULL, 0 }, + { "active_requests", NULL, NULL, NULL, + &mdt_hsm_active_requests_fops, 0 }, + { "user_request_mask", lprocfs_rd_hsm_user_request_mask, + lprocfs_wr_hsm_user_request_mask, }, + { "group_request_mask", lprocfs_rd_hsm_group_request_mask, + lprocfs_wr_hsm_group_request_mask, }, + { "other_request_mask", lprocfs_rd_hsm_other_request_mask, + lprocfs_wr_hsm_other_request_mask, }, + { 0 } }; diff --git a/lustre/mdt/mdt_hsm_cdt_actions.c b/lustre/mdt/mdt_hsm_cdt_actions.c index e1be046..eda547e 100644 --- a/lustre/mdt/mdt_hsm_cdt_actions.c +++ b/lustre/mdt/mdt_hsm_cdt_actions.c @@ -25,7 +25,7 @@ * */ /* - * lustre/mdt/mdt_agent_actions.c + * lustre/mdt/mdt_hsm_cdt_actions.c * * Lustre HSM * @@ -349,7 +349,7 @@ struct agent_action_iterator { * seq_file method called to start access to /proc file * get llog context + llog handle */ -static void *mdt_agent_actions_proc_start(struct seq_file *s, loff_t *pos) +static void *mdt_hsm_actions_proc_start(struct seq_file *s, loff_t *pos) { struct agent_action_iterator *aai = s->private; ENTRY; @@ -379,7 +379,7 @@ static void *mdt_agent_actions_proc_start(struct seq_file *s, loff_t *pos) RETURN(aai); } -static void *mdt_agent_actions_proc_next(struct seq_file *s, void *v, +static void *mdt_hsm_actions_proc_next(struct seq_file *s, void *v, loff_t *pos) { RETURN(NULL); @@ -388,7 +388,7 @@ static void *mdt_agent_actions_proc_next(struct seq_file *s, void *v, /** * llog_cat_process() callback, used to fill a seq_file buffer */ -static int agent_actions_show_cb(const struct lu_env *env, +static int hsm_actions_show_cb(const struct lu_env *env, struct llog_handle *llh, struct llog_rec_hdr *hdr, void *data) @@ -447,11 +447,11 @@ static int agent_actions_show_cb(const struct lu_env *env, } /** - * mdt_agent_actions_proc_show() is called at for each seq record + * mdt_hsm_actions_proc_show() is called at for each seq record * process the llog, with a cb which fill the file_seq buffer * to be faster, one show will fill multiple records */ -static int mdt_agent_actions_proc_show(struct seq_file *s, void *v) +static int mdt_hsm_actions_proc_show(struct seq_file *s, void *v) { struct agent_action_iterator *aai = s->private; int rc; @@ -466,7 +466,7 @@ static int mdt_agent_actions_proc_show(struct seq_file *s, void *v) RETURN(0); rc = llog_cat_process(&aai->aai_env, aai->aai_ctxt->loc_handle, - agent_actions_show_cb, s, + hsm_actions_show_cb, s, aai->aai_cat_index, aai->aai_index + 1); if (rc == 0) /* all llog parsed */ aai->aai_eof = true; @@ -479,7 +479,7 @@ static int mdt_agent_actions_proc_show(struct seq_file *s, void *v) * seq_file method called to stop access to /proc file * clean + put llog context */ -static void mdt_agent_actions_proc_stop(struct seq_file *s, void *v) +static void mdt_hsm_actions_proc_stop(struct seq_file *s, void *v) { struct agent_action_iterator *aai = s->private; ENTRY; @@ -494,14 +494,14 @@ static void mdt_agent_actions_proc_stop(struct seq_file *s, void *v) return; } -static const struct seq_operations mdt_agent_actions_proc_ops = { - .start = mdt_agent_actions_proc_start, - .next = mdt_agent_actions_proc_next, - .show = mdt_agent_actions_proc_show, - .stop = mdt_agent_actions_proc_stop, +static const struct seq_operations mdt_hsm_actions_proc_ops = { + .start = mdt_hsm_actions_proc_start, + .next = mdt_hsm_actions_proc_next, + .show = mdt_hsm_actions_proc_show, + .stop = mdt_hsm_actions_proc_stop, }; -static int lprocfs_open_agent_actions(struct inode *inode, struct file *file) +static int lprocfs_open_hsm_actions(struct inode *inode, struct file *file) { struct agent_action_iterator *aai; struct seq_file *s; @@ -512,7 +512,7 @@ static int lprocfs_open_agent_actions(struct inode *inode, struct file *file) if (LPROCFS_ENTRY_CHECK(PDE(inode))) RETURN(-ENOENT); - rc = seq_open(file, &mdt_agent_actions_proc_ops); + rc = seq_open(file, &mdt_hsm_actions_proc_ops); if (rc) RETURN(rc); @@ -546,10 +546,10 @@ out: } /** - * lprocfs_release_agent_actions() is called at end of /proc access - * free alloacted ressources and call cleanup lprocfs methods + * lprocfs_release_hsm_actions() is called at end of /proc access. + * It frees allocated ressources and calls cleanup lprocfs methods. */ -static int lprocfs_release_agent_actions(struct inode *inode, struct file *file) +static int lprocfs_release_hsm_actions(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct agent_action_iterator *aai = seq->private; @@ -562,12 +562,12 @@ static int lprocfs_release_agent_actions(struct inode *inode, struct file *file) return lprocfs_seq_release(inode, file); } -/* methods to access agent actions llog through /proc */ -const struct file_operations mdt_agent_actions_fops = { +/* Methods to access HSM action list LLOG through /proc */ +const struct file_operations mdt_hsm_actions_fops = { .owner = THIS_MODULE, - .open = lprocfs_open_agent_actions, + .open = lprocfs_open_hsm_actions, .read = seq_read, .llseek = seq_lseek, - .release = lprocfs_release_agent_actions, + .release = lprocfs_release_hsm_actions, }; diff --git a/lustre/mdt/mdt_hsm_cdt_requests.c b/lustre/mdt/mdt_hsm_cdt_requests.c index a7a92c7..796cbea 100644 --- a/lustre/mdt/mdt_hsm_cdt_requests.c +++ b/lustre/mdt/mdt_hsm_cdt_requests.c @@ -451,7 +451,7 @@ struct cdt_agent_req *mdt_cdt_update_request(struct coordinator *cdt, /** * seq_file method called to start access to /proc file */ -static void *mdt_hsm_request_proc_start(struct seq_file *s, loff_t *p) +static void *mdt_hsm_active_requests_proc_start(struct seq_file *s, loff_t *p) { struct mdt_device *mdt = s->private; struct coordinator *cdt = &mdt->mdt_coordinator; @@ -480,7 +480,8 @@ static void *mdt_hsm_request_proc_start(struct seq_file *s, loff_t *p) * seq_file method called to get next item * just returns NULL at eof */ -static void *mdt_hsm_request_proc_next(struct seq_file *s, void *v, loff_t *p) +static void *mdt_hsm_active_requests_proc_next(struct seq_file *s, void *v, + loff_t *p) { struct mdt_device *mdt = s->private; struct coordinator *cdt = &mdt->mdt_coordinator; @@ -502,7 +503,7 @@ static void *mdt_hsm_request_proc_next(struct seq_file *s, void *v, loff_t *p) /** * display request data */ -static int mdt_hsm_request_proc_show(struct seq_file *s, void *v) +static int mdt_hsm_active_requests_proc_show(struct seq_file *s, void *v) { struct list_head *pos = v; struct cdt_agent_req *car; @@ -538,7 +539,7 @@ static int mdt_hsm_request_proc_show(struct seq_file *s, void *v) /** * seq_file method called to stop access to /proc file */ -static void mdt_hsm_request_proc_stop(struct seq_file *s, void *v) +static void mdt_hsm_active_requests_proc_stop(struct seq_file *s, void *v) { struct mdt_device *mdt = s->private; struct coordinator *cdt = &mdt->mdt_coordinator; @@ -550,18 +551,19 @@ static void mdt_hsm_request_proc_stop(struct seq_file *s, void *v) } /* hsm agent list proc functions */ -static const struct seq_operations mdt_hsm_request_proc_ops = { - .start = mdt_hsm_request_proc_start, - .next = mdt_hsm_request_proc_next, - .show = mdt_hsm_request_proc_show, - .stop = mdt_hsm_request_proc_stop, +static const struct seq_operations mdt_hsm_active_requests_proc_ops = { + .start = mdt_hsm_active_requests_proc_start, + .next = mdt_hsm_active_requests_proc_next, + .show = mdt_hsm_active_requests_proc_show, + .stop = mdt_hsm_active_requests_proc_stop, }; /** * public function called at open of /proc file to get * list of agents */ -static int lprocfs_open_hsm_request(struct inode *inode, struct file *file) +static int lprocfs_open_hsm_active_requests(struct inode *inode, + struct file *file) { struct seq_file *s; int rc; @@ -570,7 +572,7 @@ static int lprocfs_open_hsm_request(struct inode *inode, struct file *file) if (LPROCFS_ENTRY_CHECK(PDE(inode))) RETURN(-ENOENT); - rc = seq_open(file, &mdt_hsm_request_proc_ops); + rc = seq_open(file, &mdt_hsm_active_requests_proc_ops); if (rc) { RETURN(rc); } @@ -581,9 +583,9 @@ static int lprocfs_open_hsm_request(struct inode *inode, struct file *file) } /* methods to access hsm request list */ -const struct file_operations mdt_hsm_request_fops = { +const struct file_operations mdt_hsm_active_requests_fops = { .owner = THIS_MODULE, - .open = lprocfs_open_hsm_request, + .open = lprocfs_open_hsm_active_requests, .read = seq_read, .llseek = seq_lseek, .release = lprocfs_seq_release, diff --git a/lustre/mdt/mdt_internal.h b/lustre/mdt/mdt_internal.h index 5fb3c9a..8cb416e 100644 --- a/lustre/mdt/mdt_internal.h +++ b/lustre/mdt/mdt_internal.h @@ -129,13 +129,13 @@ struct coordinator { struct mutex cdt_restore_lock; /**< protect restore * list */ cfs_time_t cdt_loop_period; /**< llog scan period */ - cfs_time_t cdt_delay; /**< request grace + cfs_time_t cdt_grace_delay; /**< request grace * delay */ - cfs_time_t cdt_timeout; /**< request timeout */ - __u32 cdt_default_archive_id; /** archive id used + cfs_time_t cdt_active_req_timeout; /**< request timeout */ + __u32 cdt_default_archive_id; /**< archive id used * when none are * specified */ - __u64 cdt_max_request; /**< max count of started + __u64 cdt_max_requests; /**< max count of started * requests */ atomic_t cdt_request_count; /**< current count of * started requests */ @@ -947,7 +947,7 @@ int mdt_hsm_ct_register(struct mdt_thread_info *info); int mdt_hsm_ct_unregister(struct mdt_thread_info *info); int mdt_hsm_request(struct mdt_thread_info *info); /* mdt/mdt_hsm_cdt_actions.c */ -extern const struct file_operations mdt_agent_actions_fops; +extern const struct file_operations mdt_hsm_actions_fops; void dump_llog_agent_req_rec(const char *prefix, const struct llog_agent_req_rec *larr); int cdt_llog_process(const struct lu_env *env, struct mdt_device *mdt, @@ -991,7 +991,7 @@ int mdt_hsm_get_running(struct mdt_thread_info *mti, bool mdt_hsm_restore_is_running(struct mdt_thread_info *mti, const struct lu_fid *fid); /* mdt/mdt_hsm_cdt_requests.c */ -extern const struct file_operations mdt_hsm_request_fops; +extern const struct file_operations mdt_hsm_active_requests_fops; void dump_requests(char *prefix, struct coordinator *cdt); struct cdt_agent_req *mdt_cdt_alloc_request(__u64 compound_id, __u32 archive_id, __u64 flags, struct obd_uuid *uuid, diff --git a/lustre/tests/sanity-hsm.sh b/lustre/tests/sanity-hsm.sh index 37a66ce..287e434 100644 --- a/lustre/tests/sanity-hsm.sh +++ b/lustre/tests/sanity-hsm.sh @@ -466,7 +466,7 @@ wait_request_state() { local request=$2 local state=$3 - local cmd="$LCTL get_param -n $HSM_PARAM.agent_actions" + local cmd="$LCTL get_param -n $HSM_PARAM.actions" cmd+=" | awk '/'$fid'.*action='$request'/ {print \\\$13}' | cut -f2 -d=" wait_result $SINGLEMDS "$cmd" $state 100 || @@ -477,7 +477,7 @@ get_request_state() { local fid=$1 local request=$2 - do_facet $SINGLEMDS "$LCTL get_param -n $HSM_PARAM.agent_actions |"\ + do_facet $SINGLEMDS "$LCTL get_param -n $HSM_PARAM.actions |"\ "awk '/'$fid'.*action='$request'/ {print \\\$13}' | cut -f2 -d=" } @@ -485,14 +485,14 @@ get_request_count() { local fid=$1 local request=$2 - do_facet $SINGLEMDS "$LCTL get_param -n $HSM_PARAM.agent_actions |"\ + do_facet $SINGLEMDS "$LCTL get_param -n $HSM_PARAM.actions |"\ "awk -vn=0 '/'$fid'.*action='$request'/ {n++}; END {print n}'" } wait_all_done() { local timeout=$1 - local cmd="$LCTL get_param -n $HSM_PARAM.agent_actions" + local cmd="$LCTL get_param -n $HSM_PARAM.actions" cmd+=" | egrep 'WAITING|STARTED'" wait_result $SINGLEMDS "$cmd" "" $timeout || @@ -2382,7 +2382,7 @@ double_verify_reset_hsm_param() { test_100() { double_verify_reset_hsm_param loop_period double_verify_reset_hsm_param grace_delay - double_verify_reset_hsm_param request_timeout + double_verify_reset_hsm_param active_request_timeout double_verify_reset_hsm_param max_requests double_verify_reset_hsm_param default_archive_id } @@ -2412,7 +2412,7 @@ test_103() { echo "Current requests" local res=$(do_facet $SINGLEMDS "$LCTL get_param -n\ - $HSM_PARAM.agent_actions |\ + $HSM_PARAM.actions |\ grep -v CANCELED | grep -v SUCCEED | grep -v FAILED") [[ -z "$res" ]] || error "Some request have not been canceled" @@ -2434,7 +2434,7 @@ test_104() { cdt_disable $LFS hsm_archive --archive $HSM_ARCHIVE_NUMBER --data $DATA $f local data1=$(do_facet $SINGLEMDS "$LCTL get_param -n\ - $HSM_PARAM.agent_actions |\ + $HSM_PARAM.actions |\ grep $fid | cut -f16 -d=") cdt_enable @@ -2455,12 +2455,12 @@ test_105() { $LFS hsm_archive $DIR/$tdir/$i done local reqcnt1=$(do_facet $SINGLEMDS "$LCTL get_param -n\ - $HSM_PARAM.agent_actions |\ + $HSM_PARAM.actions |\ grep WAITING | wc -l") cdt_restart cdt_disable local reqcnt2=$(do_facet $SINGLEMDS "$LCTL get_param -n\ - $HSM_PARAM.agent_actions |\ + $HSM_PARAM.actions |\ grep WAITING | wc -l") cdt_enable cdt_purge @@ -3133,12 +3133,12 @@ test_250() { while [[ $cnt != 0 || $wt != 0 ]]; do sleep 1 cnt=$(do_facet $SINGLEMDS "$LCTL get_param -n\ - $HSM_PARAM.agent_actions |\ + $HSM_PARAM.actions |\ grep STARTED | grep -v CANCEL | wc -l") [[ $cnt -le $maxrequest ]] || error "$cnt > $maxrequest too many started requests" wt=$(do_facet $SINGLEMDS "$LCTL get_param\ - $HSM_PARAM.agent_actions |\ + $HSM_PARAM.actions |\ grep WAITING | wc -l") echo "max=$maxrequest started=$cnt waiting=$wt" done @@ -3157,8 +3157,8 @@ test_251() { cdt_disable # to have a short test - local old_to=$(get_hsm_param request_timeout) - set_hsm_param request_timeout 4 + local old_to=$(get_hsm_param active_request_timeout) + set_hsm_param active_request_timeout 4 # to be sure the cdt will wake up frequently so # it will be able to cancel the "old" request local old_loop=$(get_hsm_param loop_period) @@ -3170,7 +3170,7 @@ test_251() { sleep 5 wait_request_state $fid ARCHIVE CANCELED - set_hsm_param request_timeout $old_to + set_hsm_param active_request_timeout $old_to set_hsm_param loop_period $old_loop copytool_cleanup -- 1.8.3.1