char fs_name[MTI_NAME_MAXLEN+1];
/* request to be send to agents */
int request_sz; /** allocated size */
- int max_request; /** vector size */
+ int max_requests; /** vector size */
int request_cnt; /** used count */
struct {
int hal_sz;
/* Are agents full? */
if (atomic_read(&cdt->cdt_request_count) ==
- cdt->cdt_max_request)
+ cdt->cdt_max_requests)
break;
/* first search if the request if known in the list we have
* build and if there is room in the request vector */
empty_slot = -1;
found = -1;
- for (i = 0; i < hsd->max_request &&
+ for (i = 0; i < hsd->max_requests &&
(empty_slot == -1 || found == -1); i++) {
if (hsd->request[i].hal == NULL) {
empty_slot = i;
/* test if request too long, if yes cancel it
* the same way the copy tool acknowledge a cancel request */
- if ((last + cdt->cdt_timeout) < cfs_time_current_sec()) {
+ if ((last + cdt->cdt_active_req_timeout)
+ < cfs_time_current_sec()) {
struct hsm_progress_kernel pgs;
dump_llog_agent_req_rec("mdt_coordinator_cb(): "
case ARS_FAILED:
case ARS_CANCELED:
case ARS_SUCCEED:
- if ((larr->arr_req_change + cdt->cdt_delay) <
+ if ((larr->arr_req_change + cdt->cdt_grace_delay) <
cfs_time_current_sec())
RETURN(LLOG_DEL_RECORD);
break;
hsd.max_cookie = 0;
hsd.cookie_cnt = 0;
hsd.cookies = NULL;
- /* we use a copy of cdt_max_request in the cb, so if cdt_max_request
+ /* we use a copy of cdt_max_requests in the cb, so if cdt_max_requests
* increases due to a change from /proc we do not overflow the
* hsd.request[] vector
*/
- hsd.max_request = cdt->cdt_max_request;
- hsd.request_sz = hsd.max_request * sizeof(*hsd.request);
+ hsd.max_requests = cdt->cdt_max_requests;
+ hsd.request_sz = hsd.max_requests * sizeof(*hsd.request);
OBD_ALLOC(hsd.request, hsd.request_sz);
if (!hsd.request)
GOTO(out, rc = -ENOMEM);
CDEBUG(D_HSM, "coordinator starts reading llog\n");
- if (hsd.max_request != cdt->cdt_max_request) {
- /* cdt_max_request has changed,
+ if (hsd.max_requests != cdt->cdt_max_requests) {
+ /* cdt_max_requests has changed,
* we need to allocate a new buffer
*/
OBD_FREE(hsd.request, hsd.request_sz);
- hsd.max_request = cdt->cdt_max_request;
+ hsd.max_requests = cdt->cdt_max_requests;
hsd.request_sz =
- hsd.max_request * sizeof(*hsd.request);
+ hsd.max_requests * sizeof(*hsd.request);
OBD_ALLOC(hsd.request, hsd.request_sz);
if (!hsd.request) {
rc = -ENOMEM;
}
/* here hsd contains a list of requests to be started */
- for (i = 0; i < hsd.max_request; i++) {
+ for (i = 0; i < hsd.max_requests; i++) {
struct hsm_action_list *hal;
struct hsm_action_item *hai;
__u64 *cookies;
/* still room for work ? */
if (atomic_read(&cdt->cdt_request_count) ==
- cdt->cdt_max_request)
+ cdt->cdt_max_requests)
break;
if (hsd.request[i].hal == NULL)
}
/* free hal allocated by callback */
- for (i = 0; i < hsd.max_request; i++) {
+ for (i = 0; i < hsd.max_requests; i++) {
if (hsd.request[i].hal) {
OBD_FREE(hsd.request[i].hal,
hsd.request[i].hal_sz);
/* default values for /proc tunnables
* can be override by MGS conf */
cdt->cdt_default_archive_id = 1;
- cdt->cdt_delay = 60;
+ cdt->cdt_grace_delay = 60;
cdt->cdt_loop_period = 10;
- cdt->cdt_max_request = 3;
+ cdt->cdt_max_requests = 3;
cdt->cdt_policy = CDT_DEFAULT_POLICY;
- cdt->cdt_timeout = 3600;
+ cdt->cdt_active_req_timeout = 3600;
RETURN(0);
}
/* just need to be larger than previous one */
/* cdt_last_cookie is protected by cdt_llog_lock */
cdt->cdt_last_cookie = cfs_time_current_sec();
-
atomic_set(&cdt->cdt_request_count, 0);
cdt->cdt_user_request_mask = (1UL << HSMA_RESTORE);
cdt->cdt_group_request_mask = (1UL << HSMA_RESTORE);
}
GENERATE_PROC_METHOD(cdt_loop_period)
-GENERATE_PROC_METHOD(cdt_delay)
-GENERATE_PROC_METHOD(cdt_timeout)
-GENERATE_PROC_METHOD(cdt_max_request)
+GENERATE_PROC_METHOD(cdt_grace_delay)
+GENERATE_PROC_METHOD(cdt_active_req_timeout)
+GENERATE_PROC_METHOD(cdt_max_requests)
GENERATE_PROC_METHOD(cdt_default_archive_id)
/*
}
static struct lprocfs_vars lprocfs_mdt_hsm_vars[] = {
- { "agents", NULL, NULL, NULL, &mdt_hsm_agent_fops, 0 },
- { "agent_actions", NULL, NULL, NULL,
- &mdt_agent_actions_fops, 0444 },
- { "default_archive_id", lprocfs_rd_hsm_cdt_default_archive_id,
- lprocfs_wr_hsm_cdt_default_archive_id,
- NULL, NULL, 0 },
- { "grace_delay", lprocfs_rd_hsm_cdt_delay,
- lprocfs_wr_hsm_cdt_delay,
- NULL, NULL, 0 },
- { "loop_period", lprocfs_rd_hsm_cdt_loop_period,
- lprocfs_wr_hsm_cdt_loop_period,
- NULL, NULL, 0 },
- { "max_requests", lprocfs_rd_hsm_cdt_max_request,
- lprocfs_wr_hsm_cdt_max_request,
- NULL, NULL, 0 },
- { "policy", lprocfs_rd_hsm_policy, lprocfs_wr_hsm_policy,
- NULL, NULL, 0 },
- { "request_timeout", lprocfs_rd_hsm_cdt_timeout,
- lprocfs_wr_hsm_cdt_timeout,
- NULL, NULL, 0 },
- { "requests", NULL, NULL, NULL, &mdt_hsm_request_fops, 0 },
- { "user_request_mask", lprocfs_rd_hsm_user_request_mask,
- lprocfs_wr_hsm_user_request_mask, },
- { "group_request_mask", lprocfs_rd_hsm_group_request_mask,
- lprocfs_wr_hsm_group_request_mask, },
- { "other_request_mask", lprocfs_rd_hsm_other_request_mask,
- lprocfs_wr_hsm_other_request_mask, },
- { NULL }
+ { "agents", NULL, NULL, NULL, &mdt_hsm_agent_fops,
+ 0 },
+ { "actions", NULL, NULL, NULL, &mdt_hsm_actions_fops,
+ 0444 },
+ { "default_archive_id", lprocfs_rd_hsm_cdt_default_archive_id,
+ lprocfs_wr_hsm_cdt_default_archive_id,
+ NULL, NULL, 0 },
+ { "grace_delay", lprocfs_rd_hsm_cdt_grace_delay,
+ lprocfs_wr_hsm_cdt_grace_delay,
+ NULL, NULL, 0 },
+ { "loop_period", lprocfs_rd_hsm_cdt_loop_period,
+ lprocfs_wr_hsm_cdt_loop_period,
+ NULL, NULL, 0 },
+ { "max_requests", lprocfs_rd_hsm_cdt_max_requests,
+ lprocfs_wr_hsm_cdt_max_requests,
+ NULL, NULL, 0 },
+ { "policy", lprocfs_rd_hsm_policy,
+ lprocfs_wr_hsm_policy,
+ NULL, NULL, 0 },
+ { "active_request_timeout", lprocfs_rd_hsm_cdt_active_req_timeout,
+ lprocfs_wr_hsm_cdt_active_req_timeout,
+ NULL, NULL, 0 },
+ { "active_requests", NULL, NULL, NULL,
+ &mdt_hsm_active_requests_fops, 0 },
+ { "user_request_mask", lprocfs_rd_hsm_user_request_mask,
+ lprocfs_wr_hsm_user_request_mask, },
+ { "group_request_mask", lprocfs_rd_hsm_group_request_mask,
+ lprocfs_wr_hsm_group_request_mask, },
+ { "other_request_mask", lprocfs_rd_hsm_other_request_mask,
+ lprocfs_wr_hsm_other_request_mask, },
+ { 0 }
};
*
*/
/*
- * lustre/mdt/mdt_agent_actions.c
+ * lustre/mdt/mdt_hsm_cdt_actions.c
*
* Lustre HSM
*
* seq_file method called to start access to /proc file
* get llog context + llog handle
*/
-static void *mdt_agent_actions_proc_start(struct seq_file *s, loff_t *pos)
+static void *mdt_hsm_actions_proc_start(struct seq_file *s, loff_t *pos)
{
struct agent_action_iterator *aai = s->private;
ENTRY;
RETURN(aai);
}
-static void *mdt_agent_actions_proc_next(struct seq_file *s, void *v,
+static void *mdt_hsm_actions_proc_next(struct seq_file *s, void *v,
loff_t *pos)
{
RETURN(NULL);
/**
* llog_cat_process() callback, used to fill a seq_file buffer
*/
-static int agent_actions_show_cb(const struct lu_env *env,
+static int hsm_actions_show_cb(const struct lu_env *env,
struct llog_handle *llh,
struct llog_rec_hdr *hdr,
void *data)
}
/**
- * mdt_agent_actions_proc_show() is called at for each seq record
+ * mdt_hsm_actions_proc_show() is called at for each seq record
* process the llog, with a cb which fill the file_seq buffer
* to be faster, one show will fill multiple records
*/
-static int mdt_agent_actions_proc_show(struct seq_file *s, void *v)
+static int mdt_hsm_actions_proc_show(struct seq_file *s, void *v)
{
struct agent_action_iterator *aai = s->private;
int rc;
RETURN(0);
rc = llog_cat_process(&aai->aai_env, aai->aai_ctxt->loc_handle,
- agent_actions_show_cb, s,
+ hsm_actions_show_cb, s,
aai->aai_cat_index, aai->aai_index + 1);
if (rc == 0) /* all llog parsed */
aai->aai_eof = true;
* seq_file method called to stop access to /proc file
* clean + put llog context
*/
-static void mdt_agent_actions_proc_stop(struct seq_file *s, void *v)
+static void mdt_hsm_actions_proc_stop(struct seq_file *s, void *v)
{
struct agent_action_iterator *aai = s->private;
ENTRY;
return;
}
-static const struct seq_operations mdt_agent_actions_proc_ops = {
- .start = mdt_agent_actions_proc_start,
- .next = mdt_agent_actions_proc_next,
- .show = mdt_agent_actions_proc_show,
- .stop = mdt_agent_actions_proc_stop,
+static const struct seq_operations mdt_hsm_actions_proc_ops = {
+ .start = mdt_hsm_actions_proc_start,
+ .next = mdt_hsm_actions_proc_next,
+ .show = mdt_hsm_actions_proc_show,
+ .stop = mdt_hsm_actions_proc_stop,
};
-static int lprocfs_open_agent_actions(struct inode *inode, struct file *file)
+static int lprocfs_open_hsm_actions(struct inode *inode, struct file *file)
{
struct agent_action_iterator *aai;
struct seq_file *s;
if (LPROCFS_ENTRY_CHECK(PDE(inode)))
RETURN(-ENOENT);
- rc = seq_open(file, &mdt_agent_actions_proc_ops);
+ rc = seq_open(file, &mdt_hsm_actions_proc_ops);
if (rc)
RETURN(rc);
}
/**
- * lprocfs_release_agent_actions() is called at end of /proc access
- * free alloacted ressources and call cleanup lprocfs methods
+ * lprocfs_release_hsm_actions() is called at end of /proc access.
+ * It frees allocated ressources and calls cleanup lprocfs methods.
*/
-static int lprocfs_release_agent_actions(struct inode *inode, struct file *file)
+static int lprocfs_release_hsm_actions(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct agent_action_iterator *aai = seq->private;
return lprocfs_seq_release(inode, file);
}
-/* methods to access agent actions llog through /proc */
-const struct file_operations mdt_agent_actions_fops = {
+/* Methods to access HSM action list LLOG through /proc */
+const struct file_operations mdt_hsm_actions_fops = {
.owner = THIS_MODULE,
- .open = lprocfs_open_agent_actions,
+ .open = lprocfs_open_hsm_actions,
.read = seq_read,
.llseek = seq_lseek,
- .release = lprocfs_release_agent_actions,
+ .release = lprocfs_release_hsm_actions,
};
/**
* seq_file method called to start access to /proc file
*/
-static void *mdt_hsm_request_proc_start(struct seq_file *s, loff_t *p)
+static void *mdt_hsm_active_requests_proc_start(struct seq_file *s, loff_t *p)
{
struct mdt_device *mdt = s->private;
struct coordinator *cdt = &mdt->mdt_coordinator;
* seq_file method called to get next item
* just returns NULL at eof
*/
-static void *mdt_hsm_request_proc_next(struct seq_file *s, void *v, loff_t *p)
+static void *mdt_hsm_active_requests_proc_next(struct seq_file *s, void *v,
+ loff_t *p)
{
struct mdt_device *mdt = s->private;
struct coordinator *cdt = &mdt->mdt_coordinator;
/**
* display request data
*/
-static int mdt_hsm_request_proc_show(struct seq_file *s, void *v)
+static int mdt_hsm_active_requests_proc_show(struct seq_file *s, void *v)
{
struct list_head *pos = v;
struct cdt_agent_req *car;
/**
* seq_file method called to stop access to /proc file
*/
-static void mdt_hsm_request_proc_stop(struct seq_file *s, void *v)
+static void mdt_hsm_active_requests_proc_stop(struct seq_file *s, void *v)
{
struct mdt_device *mdt = s->private;
struct coordinator *cdt = &mdt->mdt_coordinator;
}
/* hsm agent list proc functions */
-static const struct seq_operations mdt_hsm_request_proc_ops = {
- .start = mdt_hsm_request_proc_start,
- .next = mdt_hsm_request_proc_next,
- .show = mdt_hsm_request_proc_show,
- .stop = mdt_hsm_request_proc_stop,
+static const struct seq_operations mdt_hsm_active_requests_proc_ops = {
+ .start = mdt_hsm_active_requests_proc_start,
+ .next = mdt_hsm_active_requests_proc_next,
+ .show = mdt_hsm_active_requests_proc_show,
+ .stop = mdt_hsm_active_requests_proc_stop,
};
/**
* public function called at open of /proc file to get
* list of agents
*/
-static int lprocfs_open_hsm_request(struct inode *inode, struct file *file)
+static int lprocfs_open_hsm_active_requests(struct inode *inode,
+ struct file *file)
{
struct seq_file *s;
int rc;
if (LPROCFS_ENTRY_CHECK(PDE(inode)))
RETURN(-ENOENT);
- rc = seq_open(file, &mdt_hsm_request_proc_ops);
+ rc = seq_open(file, &mdt_hsm_active_requests_proc_ops);
if (rc) {
RETURN(rc);
}
}
/* methods to access hsm request list */
-const struct file_operations mdt_hsm_request_fops = {
+const struct file_operations mdt_hsm_active_requests_fops = {
.owner = THIS_MODULE,
- .open = lprocfs_open_hsm_request,
+ .open = lprocfs_open_hsm_active_requests,
.read = seq_read,
.llseek = seq_lseek,
.release = lprocfs_seq_release,
struct mutex cdt_restore_lock; /**< protect restore
* list */
cfs_time_t cdt_loop_period; /**< llog scan period */
- cfs_time_t cdt_delay; /**< request grace
+ cfs_time_t cdt_grace_delay; /**< request grace
* delay */
- cfs_time_t cdt_timeout; /**< request timeout */
- __u32 cdt_default_archive_id; /** archive id used
+ cfs_time_t cdt_active_req_timeout; /**< request timeout */
+ __u32 cdt_default_archive_id; /**< archive id used
* when none are
* specified */
- __u64 cdt_max_request; /**< max count of started
+ __u64 cdt_max_requests; /**< max count of started
* requests */
atomic_t cdt_request_count; /**< current count of
* started requests */
int mdt_hsm_ct_unregister(struct mdt_thread_info *info);
int mdt_hsm_request(struct mdt_thread_info *info);
/* mdt/mdt_hsm_cdt_actions.c */
-extern const struct file_operations mdt_agent_actions_fops;
+extern const struct file_operations mdt_hsm_actions_fops;
void dump_llog_agent_req_rec(const char *prefix,
const struct llog_agent_req_rec *larr);
int cdt_llog_process(const struct lu_env *env, struct mdt_device *mdt,
bool mdt_hsm_restore_is_running(struct mdt_thread_info *mti,
const struct lu_fid *fid);
/* mdt/mdt_hsm_cdt_requests.c */
-extern const struct file_operations mdt_hsm_request_fops;
+extern const struct file_operations mdt_hsm_active_requests_fops;
void dump_requests(char *prefix, struct coordinator *cdt);
struct cdt_agent_req *mdt_cdt_alloc_request(__u64 compound_id, __u32 archive_id,
__u64 flags, struct obd_uuid *uuid,
local request=$2
local state=$3
- local cmd="$LCTL get_param -n $HSM_PARAM.agent_actions"
+ local cmd="$LCTL get_param -n $HSM_PARAM.actions"
cmd+=" | awk '/'$fid'.*action='$request'/ {print \\\$13}' | cut -f2 -d="
wait_result $SINGLEMDS "$cmd" $state 100 ||
local fid=$1
local request=$2
- do_facet $SINGLEMDS "$LCTL get_param -n $HSM_PARAM.agent_actions |"\
+ do_facet $SINGLEMDS "$LCTL get_param -n $HSM_PARAM.actions |"\
"awk '/'$fid'.*action='$request'/ {print \\\$13}' | cut -f2 -d="
}
local fid=$1
local request=$2
- do_facet $SINGLEMDS "$LCTL get_param -n $HSM_PARAM.agent_actions |"\
+ do_facet $SINGLEMDS "$LCTL get_param -n $HSM_PARAM.actions |"\
"awk -vn=0 '/'$fid'.*action='$request'/ {n++}; END {print n}'"
}
wait_all_done() {
local timeout=$1
- local cmd="$LCTL get_param -n $HSM_PARAM.agent_actions"
+ local cmd="$LCTL get_param -n $HSM_PARAM.actions"
cmd+=" | egrep 'WAITING|STARTED'"
wait_result $SINGLEMDS "$cmd" "" $timeout ||
test_100() {
double_verify_reset_hsm_param loop_period
double_verify_reset_hsm_param grace_delay
- double_verify_reset_hsm_param request_timeout
+ double_verify_reset_hsm_param active_request_timeout
double_verify_reset_hsm_param max_requests
double_verify_reset_hsm_param default_archive_id
}
echo "Current requests"
local res=$(do_facet $SINGLEMDS "$LCTL get_param -n\
- $HSM_PARAM.agent_actions |\
+ $HSM_PARAM.actions |\
grep -v CANCELED | grep -v SUCCEED | grep -v FAILED")
[[ -z "$res" ]] || error "Some request have not been canceled"
cdt_disable
$LFS hsm_archive --archive $HSM_ARCHIVE_NUMBER --data $DATA $f
local data1=$(do_facet $SINGLEMDS "$LCTL get_param -n\
- $HSM_PARAM.agent_actions |\
+ $HSM_PARAM.actions |\
grep $fid | cut -f16 -d=")
cdt_enable
$LFS hsm_archive $DIR/$tdir/$i
done
local reqcnt1=$(do_facet $SINGLEMDS "$LCTL get_param -n\
- $HSM_PARAM.agent_actions |\
+ $HSM_PARAM.actions |\
grep WAITING | wc -l")
cdt_restart
cdt_disable
local reqcnt2=$(do_facet $SINGLEMDS "$LCTL get_param -n\
- $HSM_PARAM.agent_actions |\
+ $HSM_PARAM.actions |\
grep WAITING | wc -l")
cdt_enable
cdt_purge
while [[ $cnt != 0 || $wt != 0 ]]; do
sleep 1
cnt=$(do_facet $SINGLEMDS "$LCTL get_param -n\
- $HSM_PARAM.agent_actions |\
+ $HSM_PARAM.actions |\
grep STARTED | grep -v CANCEL | wc -l")
[[ $cnt -le $maxrequest ]] ||
error "$cnt > $maxrequest too many started requests"
wt=$(do_facet $SINGLEMDS "$LCTL get_param\
- $HSM_PARAM.agent_actions |\
+ $HSM_PARAM.actions |\
grep WAITING | wc -l")
echo "max=$maxrequest started=$cnt waiting=$wt"
done
cdt_disable
# to have a short test
- local old_to=$(get_hsm_param request_timeout)
- set_hsm_param request_timeout 4
+ local old_to=$(get_hsm_param active_request_timeout)
+ set_hsm_param active_request_timeout 4
# to be sure the cdt will wake up frequently so
# it will be able to cancel the "old" request
local old_loop=$(get_hsm_param loop_period)
sleep 5
wait_request_state $fid ARCHIVE CANCELED
- set_hsm_param request_timeout $old_to
+ set_hsm_param active_request_timeout $old_to
set_hsm_param loop_period $old_loop
copytool_cleanup