4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * Copyright (c) 2011, 2012 Commissariat a l'energie atomique et aux energies
26 * Copyright (c) 2013, 2017, Intel Corporation.
27 * Use is subject to license terms.
30 * lustre/mdt/mdt_coordinator.c
32 * Lustre HSM Coordinator
34 * Author: Jacques-Charles Lafoucriere <jacques-charles.lafoucriere@cea.fr>
35 * Author: Aurelien Degremont <aurelien.degremont@cea.fr>
36 * Author: Thomas Leibovici <thomas.leibovici@cea.fr>
39 #define DEBUG_SUBSYSTEM S_MDS
41 #include <linux/kthread.h>
42 #include <obd_support.h>
43 #include <lustre_export.h>
45 #include <lprocfs_status.h>
46 #include <lustre_log.h>
47 #include <lustre_kernelcomm.h>
48 #include "mdt_internal.h"
51 * get obj and HSM attributes on a fid
52 * \param mti [IN] context
53 * \param fid [IN] object fid
54 * \param hsm [OUT] HSM meta data
55 * \retval obj or error (-ENOENT if not found)
57 struct mdt_object *mdt_hsm_get_md_hsm(struct mdt_thread_info *mti,
58 const struct lu_fid *fid,
62 struct mdt_object *obj;
70 /* find object by FID */
71 obj = mdt_object_find(mti->mti_env, mti->mti_mdt, fid);
75 if (!mdt_object_exists(obj)) {
77 mdt_object_put(mti->mti_env, obj);
78 RETURN(ERR_PTR(-ENOENT));
81 rc = mdt_attr_get_complex(mti, obj, ma);
83 mdt_object_put(mti->mti_env, obj);
87 if (ma->ma_valid & MA_HSM)
90 memset(hsm, 0, sizeof(*hsm));
95 void mdt_hsm_dump_hal(int level, const char *prefix,
96 struct hsm_action_list *hal)
99 struct hsm_action_item *hai;
102 CDEBUG(level, "%s: HAL header: version %X count %d"
103 " archive_id %d flags %#llx\n",
104 prefix, hal->hal_version, hal->hal_count,
105 hal->hal_archive_id, hal->hal_flags);
107 hai = hai_first(hal);
108 for (i = 0; i < hal->hal_count; i++) {
109 sz = hai->hai_len - sizeof(*hai);
110 CDEBUG(level, "%s %d: fid="DFID" dfid="DFID
112 " action=%s extent=%#llx-%#llx gid=%#llx"
113 " datalen=%d data=[%s]\n",
115 PFID(&hai->hai_fid), PFID(&hai->hai_dfid),
117 hsm_copytool_action2name(hai->hai_action),
118 hai->hai_extent.offset,
119 hai->hai_extent.length,
121 hai_dump_data_field(hai, buf, sizeof(buf)));
127 * data passed to llog_cat_process() callback
128 * to scan requests and take actions
130 struct hsm_scan_request {
133 struct hsm_action_list *hal;
136 struct hsm_scan_data {
137 struct mdt_thread_info *hsd_mti;
138 char hsd_fsname[MTI_NAME_MAXLEN + 1];
139 /* are we scanning the logs for housekeeping, or just looking
142 bool hsd_housekeeping;
143 bool hsd_one_restore;
144 int hsd_action_count;
145 int hsd_request_len; /* array alloc len */
146 int hsd_request_count; /* array used count */
147 struct hsm_scan_request *hsd_request;
150 static int mdt_cdt_waiting_cb(const struct lu_env *env,
151 struct mdt_device *mdt,
152 struct llog_handle *llh,
153 struct llog_agent_req_rec *larr,
154 struct hsm_scan_data *hsd)
156 struct coordinator *cdt = &mdt->mdt_coordinator;
157 struct hsm_scan_request *request;
158 struct hsm_action_item *hai;
163 /* Are agents full? */
164 if (atomic_read(&cdt->cdt_request_count) >= cdt->cdt_max_requests)
165 RETURN(hsd->hsd_housekeeping ? 0 : LLOG_PROC_BREAK);
167 if (hsd->hsd_action_count + atomic_read(&cdt->cdt_request_count) >=
168 cdt->cdt_max_requests) {
169 /* We cannot send any more request
171 * *** SPECIAL CASE ***
173 * Restore requests are too important not to schedule at least
174 * one, everytime we can.
176 if (larr->arr_hai.hai_action != HSMA_RESTORE ||
177 hsd->hsd_one_restore)
178 RETURN(hsd->hsd_housekeeping ? 0 : LLOG_PROC_BREAK);
181 hai_size = cfs_size_round(larr->arr_hai.hai_len);
182 archive_id = larr->arr_archive_id;
184 /* Can we add this action to one of the existing HALs in hsd. */
186 for (i = 0; i < hsd->hsd_request_count; i++) {
187 if (hsd->hsd_request[i].hal->hal_archive_id == archive_id &&
188 hsd->hsd_request[i].hal_used_sz + hai_size <=
190 request = &hsd->hsd_request[i];
195 /* Are we trying to force-schedule a request? */
196 if (hsd->hsd_action_count + atomic_read(&cdt->cdt_request_count) >=
197 cdt->cdt_max_requests) {
198 /* Is there really no compatible hsm_scan_request? */
200 for (i -= 1; i >= 0; i--) {
201 if (hsd->hsd_request[i].hal->hal_archive_id ==
203 request = &hsd->hsd_request[i];
209 /* Make room for the hai */
211 /* Discard the last hai until there is enough space */
213 request->hal->hal_count--;
215 hai = hai_first(request->hal);
216 for (i = 0; i < request->hal->hal_count; i++)
218 request->hal_used_sz -=
219 cfs_size_round(hai->hai_len);
220 hsd->hsd_action_count--;
221 } while (request->hal_used_sz + hai_size >
223 } else if (hsd->hsd_housekeeping) {
224 struct hsm_scan_request *tmp;
226 /* Discard the (whole) last hal */
227 hsd->hsd_request_count--;
228 LASSERT(hsd->hsd_request_count >= 0);
229 tmp = &hsd->hsd_request[hsd->hsd_request_count];
230 hsd->hsd_action_count -= tmp->hal->hal_count;
231 LASSERT(hsd->hsd_action_count >= 0);
232 OBD_FREE(tmp->hal, tmp->hal_sz);
234 /* Bailing out, this code path is too hot */
235 RETURN(LLOG_PROC_BREAK);
241 struct hsm_action_list *hal;
243 LASSERT(hsd->hsd_request_count < hsd->hsd_request_len);
244 request = &hsd->hsd_request[hsd->hsd_request_count];
246 /* allocates hai vector size just needs to be large
248 request->hal_sz = sizeof(*request->hal) +
249 cfs_size_round(MTI_NAME_MAXLEN + 1) + 2 * hai_size;
250 OBD_ALLOC_LARGE(hal, request->hal_sz);
254 hal->hal_version = HAL_VERSION;
255 strlcpy(hal->hal_fsname, hsd->hsd_fsname, MTI_NAME_MAXLEN + 1);
256 hal->hal_archive_id = larr->arr_archive_id;
257 hal->hal_flags = larr->arr_flags;
259 request->hal_used_sz = hal_size(hal);
261 hsd->hsd_request_count++;
262 } else if (request->hal_sz < request->hal_used_sz + hai_size) {
263 /* Not enough room, need an extension */
267 sz = min_t(int, 2 * request->hal_sz, LDLM_MAXREQSIZE);
268 LASSERT(request->hal_used_sz + hai_size < sz);
270 OBD_ALLOC_LARGE(hal_buffer, sz);
274 memcpy(hal_buffer, request->hal, request->hal_used_sz);
275 OBD_FREE_LARGE(request->hal, request->hal_sz);
276 request->hal = hal_buffer;
277 request->hal_sz = sz;
280 hai = hai_first(request->hal);
281 for (i = 0; i < request->hal->hal_count; i++)
284 memcpy(hai, &larr->arr_hai, larr->arr_hai.hai_len);
286 request->hal_used_sz += hai_size;
287 request->hal->hal_count++;
289 hsd->hsd_action_count++;
291 switch (hai->hai_action) {
295 hsd->hsd_one_restore = true;
296 /* Intentional fallthrough */
298 cdt_agent_record_hash_add(cdt, hai->hai_cookie,
299 llh->lgh_hdr->llh_cat_idx,
300 larr->arr_hdr.lrh_index);
306 static int mdt_cdt_started_cb(const struct lu_env *env,
307 struct mdt_device *mdt,
308 struct llog_handle *llh,
309 struct llog_agent_req_rec *larr,
310 struct hsm_scan_data *hsd)
312 struct coordinator *cdt = &mdt->mdt_coordinator;
313 struct hsm_action_item *hai = &larr->arr_hai;
314 struct cdt_agent_req *car;
315 time64_t now = ktime_get_real_seconds();
317 enum changelog_rec_flags clf_flags;
320 if (!hsd->hsd_housekeeping)
323 /* we search for a running request
324 * error may happen if coordinator crashes or stopped
325 * with running request
327 car = mdt_cdt_find_request(cdt, hai->hai_cookie);
329 last = larr->arr_req_change;
331 last = car->car_req_update;
334 /* test if request too long, if yes cancel it
335 * the same way the copy tool acknowledge a cancel request */
336 if (now <= last + cdt->cdt_active_req_timeout)
337 GOTO(out_car, rc = 0);
339 dump_llog_agent_req_rec("request timed out, start cleaning", larr);
342 car->car_req_update = now;
343 mdt_hsm_agent_update_statistics(cdt, 0, 1, 0, &car->car_uuid);
344 /* Remove car from memory list (LU-9075) */
345 mdt_cdt_remove_request(cdt, hai->hai_cookie);
348 /* Emit a changelog record for the failed action.*/
350 hsm_set_cl_error(&clf_flags, ECANCELED);
352 switch (hai->hai_action) {
354 hsm_set_cl_event(&clf_flags, HE_ARCHIVE);
357 hsm_set_cl_event(&clf_flags, HE_RESTORE);
360 hsm_set_cl_event(&clf_flags, HE_REMOVE);
363 hsm_set_cl_event(&clf_flags, HE_CANCEL);
366 /* Unknown record type, skip changelog. */
372 mo_changelog(env, CL_HSM, clf_flags, mdt->mdt_child,
375 if (hai->hai_action == HSMA_RESTORE)
376 cdt_restore_handle_del(hsd->hsd_mti, cdt, &hai->hai_fid);
378 larr->arr_status = ARS_CANCELED;
379 larr->arr_req_change = now;
380 rc = llog_write(hsd->hsd_mti->mti_env, llh, &larr->arr_hdr,
381 larr->arr_hdr.lrh_index);
383 CERROR("%s: cannot update agent log: rc = %d\n",
384 mdt_obd_name(mdt), rc);
385 rc = LLOG_DEL_RECORD;
388 /* ct has completed a request, so a slot is available,
389 * signal the coordinator to find new work */
390 mdt_hsm_cdt_event(cdt);
393 mdt_cdt_put_request(car);
399 * llog_cat_process() callback, used to:
400 * - find waiting request and start action
401 * - purge canceled and done requests
402 * \param env [IN] environment
403 * \param llh [IN] llog handle
404 * \param hdr [IN] llog record
405 * \param data [IN/OUT] cb data = struct hsm_scan_data
407 * \retval -ve failure
409 static int mdt_coordinator_cb(const struct lu_env *env,
410 struct llog_handle *llh,
411 struct llog_rec_hdr *hdr,
414 struct llog_agent_req_rec *larr = (struct llog_agent_req_rec *)hdr;
415 struct hsm_scan_data *hsd = data;
416 struct mdt_device *mdt = hsd->hsd_mti->mti_mdt;
417 struct coordinator *cdt = &mdt->mdt_coordinator;
420 larr = (struct llog_agent_req_rec *)hdr;
421 dump_llog_agent_req_rec("mdt_coordinator_cb(): ", larr);
422 switch (larr->arr_status) {
424 RETURN(mdt_cdt_waiting_cb(env, mdt, llh, larr, hsd));
426 RETURN(mdt_cdt_started_cb(env, mdt, llh, larr, hsd));
428 if (!hsd->hsd_housekeeping)
431 if ((larr->arr_req_change + cdt->cdt_grace_delay) <
432 ktime_get_real_seconds()) {
433 cdt_agent_record_hash_del(cdt,
434 larr->arr_hai.hai_cookie);
435 RETURN(LLOG_DEL_RECORD);
442 /* Release the ressource used by the coordinator. Called when the
443 * coordinator is stopping. */
444 static void mdt_hsm_cdt_cleanup(struct mdt_device *mdt)
446 struct coordinator *cdt = &mdt->mdt_coordinator;
447 struct cdt_agent_req *car, *tmp1;
448 struct hsm_agent *ha, *tmp2;
449 struct cdt_restore_handle *crh, *tmp3;
450 struct mdt_thread_info *cdt_mti;
453 down_write(&cdt->cdt_request_lock);
454 list_for_each_entry_safe(car, tmp1, &cdt->cdt_request_list,
456 cfs_hash_del(cdt->cdt_request_cookie_hash,
457 &car->car_hai->hai_cookie,
458 &car->car_cookie_hash);
459 list_del(&car->car_request_list);
460 mdt_cdt_put_request(car);
462 up_write(&cdt->cdt_request_lock);
464 down_write(&cdt->cdt_agent_lock);
465 list_for_each_entry_safe(ha, tmp2, &cdt->cdt_agents, ha_list) {
466 list_del(&ha->ha_list);
467 if (ha->ha_archive_cnt != 0)
468 OBD_FREE_PTR_ARRAY(ha->ha_archive_id,
472 up_write(&cdt->cdt_agent_lock);
474 cdt_mti = lu_context_key_get(&cdt->cdt_env.le_ctx, &mdt_thread_key);
475 mutex_lock(&cdt->cdt_restore_lock);
476 list_for_each_entry_safe(crh, tmp3, &cdt->cdt_restore_handle_list,
478 list_del(&crh->crh_list);
479 /* give back layout lock */
480 mdt_object_unlock(cdt_mti, NULL, &crh->crh_lh, 1);
481 OBD_SLAB_FREE_PTR(crh, mdt_hsm_cdt_kmem);
483 mutex_unlock(&cdt->cdt_restore_lock);
487 * Coordinator state transition table, indexed on enum cdt_states, taking
488 * from and to states. For instance since CDT_INIT to CDT_RUNNING is a
489 * valid transition, cdt_transition[CDT_INIT][CDT_RUNNING] is true.
491 static bool cdt_transition[CDT_STATES_COUNT][CDT_STATES_COUNT] = {
492 /* from -> to: stopped init running disable stopping */
493 /* stopped */ { true, true, false, false, false },
494 /* init */ { true, false, true, false, false },
495 /* running */ { false, false, true, true, true },
496 /* disable */ { false, false, true, true, true },
497 /* stopping */ { true, false, false, false, false }
501 * Change coordinator thread state
502 * Some combinations are not valid, so catch them here.
504 * Returns 0 on success, with old_state set if not NULL, or -EINVAL if
505 * the transition was not possible.
507 static int set_cdt_state_locked(struct coordinator *cdt,
508 enum cdt_states new_state)
511 enum cdt_states state;
513 state = cdt->cdt_state;
515 if (cdt_transition[state][new_state]) {
516 cdt->cdt_state = new_state;
520 "unexpected coordinator transition, from=%s, to=%s\n",
521 cdt_mdt_state2str(state), cdt_mdt_state2str(new_state));
528 static int set_cdt_state(struct coordinator *cdt, enum cdt_states new_state)
532 mutex_lock(&cdt->cdt_state_lock);
533 rc = set_cdt_state_locked(cdt, new_state);
534 mutex_unlock(&cdt->cdt_state_lock);
543 * \param data [IN] obd device
545 * \retval -ve failure
547 static int mdt_coordinator(void *data)
549 struct mdt_thread_info *mti = data;
550 struct mdt_device *mdt = mti->mti_mdt;
551 struct coordinator *cdt = &mdt->mdt_coordinator;
552 struct hsm_scan_data hsd = { NULL };
553 time64_t last_housekeeping = 0;
554 size_t request_sz = 0;
558 CDEBUG(D_HSM, "%s: coordinator thread starting, pid=%d\n",
559 mdt_obd_name(mdt), current->pid);
562 obd_uuid2fsname(hsd.hsd_fsname, mdt_obd_name(mdt),
563 sizeof(hsd.hsd_fsname));
565 set_cdt_state(cdt, CDT_RUNNING);
567 /* Inform mdt_hsm_cdt_start(). */
568 wake_up(&cdt->cdt_waitq);
575 struct hsm_record_update *updates;
577 /* Limit execution of the expensive requests traversal
578 * to at most one second. This prevents repeatedly
579 * locking/unlocking the catalog for each request
580 * and preventing other HSM operations from happening
582 wait_event_interruptible_timeout(cdt->cdt_waitq,
583 kthread_should_stop() ||
584 cdt->cdt_wakeup_coordinator,
585 cfs_time_seconds(1));
587 cdt->cdt_wakeup_coordinator = false;
588 CDEBUG(D_HSM, "coordinator resumes\n");
590 if (kthread_should_stop()) {
591 CDEBUG(D_HSM, "Coordinator stops\n");
596 /* if coordinator is suspended continue to wait */
597 if (cdt->cdt_state == CDT_DISABLE) {
598 CDEBUG(D_HSM, "disable state, coordinator sleeps\n");
602 /* If no event, and no housekeeping to do, continue to
604 if (last_housekeeping + cdt->cdt_loop_period <=
605 ktime_get_real_seconds()) {
606 last_housekeeping = ktime_get_real_seconds();
607 hsd.hsd_housekeeping = true;
608 } else if (cdt->cdt_event) {
609 hsd.hsd_housekeeping = false;
614 cdt->cdt_event = false;
616 CDEBUG(D_HSM, "coordinator starts reading llog\n");
618 if (hsd.hsd_request_len != cdt->cdt_max_requests) {
619 /* cdt_max_requests has changed,
620 * we need to allocate a new buffer
622 struct hsm_scan_request *tmp = NULL;
623 int max_requests = cdt->cdt_max_requests;
624 OBD_ALLOC_LARGE(tmp, max_requests *
625 sizeof(struct hsm_scan_request));
627 CERROR("Failed to resize request buffer, "
628 "keeping it at %d\n",
629 hsd.hsd_request_len);
631 if (hsd.hsd_request != NULL)
632 OBD_FREE_LARGE(hsd.hsd_request,
635 hsd.hsd_request_len = max_requests;
636 request_sz = hsd.hsd_request_len *
637 sizeof(struct hsm_scan_request);
638 hsd.hsd_request = tmp;
642 hsd.hsd_action_count = 0;
643 hsd.hsd_request_count = 0;
644 hsd.hsd_one_restore = false;
646 rc = cdt_llog_process(mti->mti_env, mdt, mdt_coordinator_cb,
651 CDEBUG(D_HSM, "found %d requests to send\n",
652 hsd.hsd_request_count);
654 if (list_empty(&cdt->cdt_agents)) {
655 CDEBUG(D_HSM, "no agent available, "
656 "coordinator sleeps\n");
660 /* Compute how many HAI we have in all the requests */
662 for (i = 0; i < hsd.hsd_request_count; i++) {
663 const struct hsm_scan_request *request =
666 updates_cnt += request->hal->hal_count;
669 /* Allocate a temporary array to store the cookies to
670 * update, and their status. */
671 updates_sz = updates_cnt * sizeof(*updates);
672 OBD_ALLOC_LARGE(updates, updates_sz);
673 if (updates == NULL) {
674 CERROR("%s: Cannot allocate memory (%d bytes) "
675 "for %d updates. Too many HSM requests?\n",
676 mdt_obd_name(mdt), updates_sz, updates_cnt);
680 /* here hsd contains a list of requests to be started */
681 for (i = 0; i < hsd.hsd_request_count; i++) {
682 struct hsm_scan_request *request = &hsd.hsd_request[i];
683 struct hsm_action_list *hal = request->hal;
684 struct hsm_action_item *hai;
687 /* still room for work ? */
688 if (atomic_read(&cdt->cdt_request_count) >=
689 cdt->cdt_max_requests)
692 rc = mdt_hsm_agent_send(mti, hal, 0);
693 /* if failure, we suppose it is temporary
694 * if the copy tool failed to do the request
695 * it has to use hsm_progress
698 /* set up cookie vector to set records status
699 * after copy tools start or failed
701 hai = hai_first(hal);
702 for (j = 0; j < hal->hal_count; j++) {
703 updates[update_idx].cookie = hai->hai_cookie;
704 updates[update_idx].status =
705 (rc ? ARS_WAITING : ARS_STARTED);
712 rc = mdt_agent_record_update(mti->mti_env, mdt,
713 updates, update_idx);
715 CERROR("%s: mdt_agent_record_update() failed, "
716 "rc=%d, cannot update records "
718 mdt_obd_name(mdt), rc, update_idx);
721 OBD_FREE_LARGE(updates, updates_sz);
724 /* free hal allocated by callback */
725 for (i = 0; i < hsd.hsd_request_count; i++) {
726 struct hsm_scan_request *request = &hsd.hsd_request[i];
728 OBD_FREE_LARGE(request->hal, request->hal_sz);
732 if (hsd.hsd_request != NULL)
733 OBD_FREE_LARGE(hsd.hsd_request, request_sz);
735 mdt_hsm_cdt_cleanup(mdt);
738 CERROR("%s: coordinator thread exiting, process=%d, rc=%d\n",
739 mdt_obd_name(mdt), current->pid, rc);
741 CDEBUG(D_HSM, "%s: coordinator thread exiting, process=%d,"
743 mdt_obd_name(mdt), current->pid);
748 int cdt_restore_handle_add(struct mdt_thread_info *mti, struct coordinator *cdt,
749 const struct lu_fid *fid,
750 const struct hsm_extent *he)
752 struct cdt_restore_handle *crh;
753 struct mdt_object *obj;
757 OBD_SLAB_ALLOC_PTR(crh, mdt_hsm_cdt_kmem);
762 /* in V1 all file is restored
763 * crh->extent.start = he->offset;
764 * crh->extent.end = he->offset + he->length;
766 crh->crh_extent.start = 0;
767 crh->crh_extent.end = he->length;
768 /* get the layout lock */
769 mdt_lock_reg_init(&crh->crh_lh, LCK_EX);
770 obj = mdt_object_find_lock(mti, &crh->crh_fid, &crh->crh_lh,
771 MDS_INODELOCK_LAYOUT);
773 GOTO(out_crh, rc = PTR_ERR(obj));
775 /* We do not keep a reference on the object during the restore
776 * which can be very long. */
777 mdt_object_put(mti->mti_env, obj);
779 mutex_lock(&cdt->cdt_restore_lock);
780 if (unlikely(cdt->cdt_state == CDT_STOPPED ||
781 cdt->cdt_state == CDT_STOPPING)) {
782 mutex_unlock(&cdt->cdt_restore_lock);
783 GOTO(out_lh, rc = -EAGAIN);
786 list_add_tail(&crh->crh_list, &cdt->cdt_restore_handle_list);
787 mutex_unlock(&cdt->cdt_restore_lock);
791 mdt_object_unlock(mti, NULL, &crh->crh_lh, 1);
793 OBD_SLAB_FREE_PTR(crh, mdt_hsm_cdt_kmem);
799 * lookup a restore handle by FID
800 * caller needs to hold cdt_restore_lock
801 * \param cdt [IN] coordinator
802 * \param fid [IN] FID
803 * \retval cdt_restore_handle found
804 * \retval NULL not found
806 struct cdt_restore_handle *cdt_restore_handle_find(struct coordinator *cdt,
807 const struct lu_fid *fid)
809 struct cdt_restore_handle *crh;
812 list_for_each_entry(crh, &cdt->cdt_restore_handle_list, crh_list) {
813 if (lu_fid_eq(&crh->crh_fid, fid))
820 void cdt_restore_handle_del(struct mdt_thread_info *mti,
821 struct coordinator *cdt, const struct lu_fid *fid)
823 struct cdt_restore_handle *crh;
825 /* give back layout lock */
826 mutex_lock(&cdt->cdt_restore_lock);
827 crh = cdt_restore_handle_find(cdt, fid);
829 list_del(&crh->crh_list);
830 mutex_unlock(&cdt->cdt_restore_lock);
835 /* XXX We pass a NULL object since the restore handle does not
836 * keep a reference on the object being restored. */
837 mdt_object_unlock(mti, NULL, &crh->crh_lh, 1);
838 OBD_SLAB_FREE_PTR(crh, mdt_hsm_cdt_kmem);
842 * data passed to llog_cat_process() callback
843 * to scan requests and take actions
845 struct hsm_restore_data {
846 struct mdt_thread_info *hrd_mti;
850 * llog_cat_process() callback, used to:
851 * - find restore request and allocate the restore handle
852 * \param env [IN] environment
853 * \param llh [IN] llog handle
854 * \param hdr [IN] llog record
855 * \param data [IN/OUT] cb data = struct hsm_restore_data
857 * \retval -ve failure
859 static int hsm_restore_cb(const struct lu_env *env,
860 struct llog_handle *llh,
861 struct llog_rec_hdr *hdr, void *data)
863 struct llog_agent_req_rec *larr;
864 struct hsm_restore_data *hrd;
865 struct hsm_action_item *hai;
866 struct mdt_thread_info *mti;
867 struct coordinator *cdt;
873 cdt = &mti->mti_mdt->mdt_coordinator;
875 larr = (struct llog_agent_req_rec *)hdr;
876 hai = &larr->arr_hai;
877 if (hai->hai_cookie >= cdt->cdt_last_cookie) {
878 /* update the cookie to avoid collision */
879 cdt->cdt_last_cookie = hai->hai_cookie + 1;
882 if (hai->hai_action != HSMA_RESTORE ||
883 agent_req_in_final_state(larr->arr_status))
886 /* restore request not in a final state */
888 /* force replay of restore requests left in started state from previous
889 * CDT context, to be canceled later if finally found to be incompatible
890 * when being re-started */
891 if (larr->arr_status == ARS_STARTED) {
892 larr->arr_status = ARS_WAITING;
893 larr->arr_req_change = ktime_get_real_seconds();
894 rc = llog_write(env, llh, hdr, hdr->lrh_index);
899 rc = cdt_restore_handle_add(mti, cdt, &hai->hai_fid, &hai->hai_extent);
905 * restore coordinator state at startup
906 * the goal is to take a layout lock for each registered restore request
907 * \param mti [IN] context
909 static int mdt_hsm_pending_restore(struct mdt_thread_info *mti)
911 struct hsm_restore_data hrd;
917 rc = cdt_llog_process(mti->mti_env, mti->mti_mdt, hsm_restore_cb, &hrd,
923 int hsm_init_ucred(struct lu_ucred *uc)
926 uc->uc_valid = UCRED_OLD;
935 uc->uc_suppgids[0] = -1;
936 uc->uc_suppgids[1] = -1;
937 uc->uc_cap = cap_combine(CAP_FS_SET, CAP_NFSD_SET);
940 uc->uc_identity = NULL;
941 /* always record internal HSM activity if also enabled globally */
942 uc->uc_enable_audit = 1;
948 * initialize coordinator struct
949 * \param mdt [IN] device
951 * \retval -ve failure
953 int mdt_hsm_cdt_init(struct mdt_device *mdt)
955 struct coordinator *cdt = &mdt->mdt_coordinator;
956 struct mdt_thread_info *cdt_mti = NULL;
960 init_waitqueue_head(&cdt->cdt_waitq);
961 init_rwsem(&cdt->cdt_llog_lock);
962 init_rwsem(&cdt->cdt_agent_lock);
963 init_rwsem(&cdt->cdt_request_lock);
964 mutex_init(&cdt->cdt_restore_lock);
965 mutex_init(&cdt->cdt_state_lock);
966 set_cdt_state(cdt, CDT_STOPPED);
968 INIT_LIST_HEAD(&cdt->cdt_request_list);
969 INIT_LIST_HEAD(&cdt->cdt_agents);
970 INIT_LIST_HEAD(&cdt->cdt_restore_handle_list);
972 cdt->cdt_request_cookie_hash = cfs_hash_create("REQUEST_COOKIE_HASH",
979 &cdt_request_cookie_hash_ops,
981 if (cdt->cdt_request_cookie_hash == NULL)
984 cdt->cdt_agent_record_hash = cfs_hash_create("AGENT_RECORD_HASH",
991 &cdt_agent_record_hash_ops,
993 if (cdt->cdt_agent_record_hash == NULL)
994 GOTO(out_request_cookie_hash, rc = -ENOMEM);
996 rc = lu_env_init(&cdt->cdt_env, LCT_MD_THREAD);
998 GOTO(out_agent_record_hash, rc);
1000 /* for mdt_ucred(), lu_ucred stored in lu_ucred_key */
1001 rc = lu_context_init(&cdt->cdt_session, LCT_SERVER_SESSION);
1005 lu_context_enter(&cdt->cdt_session);
1006 cdt->cdt_env.le_ses = &cdt->cdt_session;
1008 cdt_mti = lu_context_key_get(&cdt->cdt_env.le_ctx, &mdt_thread_key);
1009 LASSERT(cdt_mti != NULL);
1011 cdt_mti->mti_env = &cdt->cdt_env;
1012 cdt_mti->mti_mdt = mdt;
1014 hsm_init_ucred(mdt_ucred(cdt_mti));
1016 /* default values for sysfs tunnables
1017 * can be override by MGS conf */
1018 cdt->cdt_default_archive_id = 1;
1019 cdt->cdt_grace_delay = 60;
1020 cdt->cdt_loop_period = 10;
1021 cdt->cdt_max_requests = 3;
1022 cdt->cdt_policy = CDT_DEFAULT_POLICY;
1023 cdt->cdt_active_req_timeout = 3600;
1025 /* by default do not remove archives on last unlink */
1026 cdt->cdt_remove_archive_on_last_unlink = false;
1031 lu_env_fini(&cdt->cdt_env);
1032 out_agent_record_hash:
1033 cfs_hash_putref(cdt->cdt_agent_record_hash);
1034 cdt->cdt_agent_record_hash = NULL;
1035 out_request_cookie_hash:
1036 cfs_hash_putref(cdt->cdt_request_cookie_hash);
1037 cdt->cdt_request_cookie_hash = NULL;
1043 * free a coordinator thread
1044 * \param mdt [IN] device
1046 int mdt_hsm_cdt_fini(struct mdt_device *mdt)
1048 struct coordinator *cdt = &mdt->mdt_coordinator;
1051 lu_context_exit(cdt->cdt_env.le_ses);
1052 lu_context_fini(cdt->cdt_env.le_ses);
1054 lu_env_fini(&cdt->cdt_env);
1056 cfs_hash_putref(cdt->cdt_agent_record_hash);
1057 cdt->cdt_agent_record_hash = NULL;
1059 cfs_hash_putref(cdt->cdt_request_cookie_hash);
1060 cdt->cdt_request_cookie_hash = NULL;
1066 * start a coordinator thread
1067 * \param mdt [IN] device
1069 * \retval -ve failure
1071 static int mdt_hsm_cdt_start(struct mdt_device *mdt)
1073 struct coordinator *cdt = &mdt->mdt_coordinator;
1074 struct mdt_thread_info *cdt_mti;
1078 struct task_struct *task;
1081 /* functions defined but not yet used
1082 * this avoid compilation warning
1084 ptr = dump_requests;
1086 rc = set_cdt_state(cdt, CDT_INIT);
1088 CERROR("%s: Coordinator already started or stopping\n",
1093 BUILD_BUG_ON(BIT(CDT_POLICY_SHIFT_COUNT - 1) != CDT_POLICY_LAST);
1094 cdt->cdt_policy = CDT_DEFAULT_POLICY;
1096 /* just need to be larger than previous one */
1097 /* cdt_last_cookie is protected by cdt_llog_lock */
1098 cdt->cdt_last_cookie = ktime_get_real_seconds();
1099 atomic_set(&cdt->cdt_request_count, 0);
1100 atomic_set(&cdt->cdt_archive_count, 0);
1101 atomic_set(&cdt->cdt_restore_count, 0);
1102 atomic_set(&cdt->cdt_remove_count, 0);
1103 cdt->cdt_user_request_mask = (1UL << HSMA_RESTORE);
1104 cdt->cdt_group_request_mask = (1UL << HSMA_RESTORE);
1105 cdt->cdt_other_request_mask = (1UL << HSMA_RESTORE);
1107 /* wait until MDD initialize hsm actions llog */
1108 while (!test_bit(MDT_FL_CFGLOG, &mdt->mdt_state) && i < obd_timeout) {
1109 schedule_timeout_interruptible(cfs_time_seconds(1));
1112 if (!test_bit(MDT_FL_CFGLOG, &mdt->mdt_state))
1113 CWARN("%s: trying to init HSM before MDD\n", mdt_obd_name(mdt));
1115 /* to avoid deadlock when start is made through sysfs
1116 * sysfs entries are created by the coordinator thread
1118 /* set up list of started restore requests */
1119 cdt_mti = lu_context_key_get(&cdt->cdt_env.le_ctx, &mdt_thread_key);
1120 rc = mdt_hsm_pending_restore(cdt_mti);
1122 CERROR("%s: cannot take the layout locks needed"
1123 " for registered restore: %d\n",
1124 mdt_obd_name(mdt), rc);
1126 if (mdt->mdt_bottom->dd_rdonly)
1129 task = kthread_run(mdt_coordinator, cdt_mti, "hsm_cdtr");
1132 set_cdt_state(cdt, CDT_STOPPED);
1133 CERROR("%s: error starting coordinator thread: %d\n",
1134 mdt_obd_name(mdt), rc);
1136 cdt->cdt_task = task;
1137 wait_event(cdt->cdt_waitq,
1138 cdt->cdt_state != CDT_INIT);
1139 CDEBUG(D_HSM, "%s: coordinator thread started\n",
1148 * stop a coordinator thread
1149 * \param mdt [IN] device
1151 int mdt_hsm_cdt_stop(struct mdt_device *mdt)
1153 struct coordinator *cdt = &mdt->mdt_coordinator;
1157 /* stop coordinator thread */
1158 rc = set_cdt_state(cdt, CDT_STOPPING);
1160 kthread_stop(cdt->cdt_task);
1161 cdt->cdt_task = NULL;
1162 set_cdt_state(cdt, CDT_STOPPED);
1168 static int mdt_hsm_set_exists(struct mdt_thread_info *mti,
1169 const struct lu_fid *fid,
1172 struct mdt_object *obj;
1176 obj = mdt_hsm_get_md_hsm(mti, fid, &mh);
1178 GOTO(out, rc = PTR_ERR(obj));
1180 if (mh.mh_flags & HS_EXISTS &&
1181 mh.mh_arch_id == archive_id)
1182 GOTO(out_obj, rc = 0);
1184 mh.mh_flags |= HS_EXISTS;
1185 mh.mh_arch_id = archive_id;
1186 rc = mdt_hsm_attr_set(mti, obj, &mh);
1189 mdt_object_put(mti->mti_env, obj);
1195 * register all requests from an hal in the memory list
1196 * \param mti [IN] context
1197 * \param hal [IN] request
1198 * \param uuid [OUT] in case of CANCEL, the uuid of the agent
1199 * which is running the CT
1201 * \retval -ve failure
1203 int mdt_hsm_add_hal(struct mdt_thread_info *mti,
1204 struct hsm_action_list *hal, struct obd_uuid *uuid)
1206 struct mdt_device *mdt = mti->mti_mdt;
1207 struct coordinator *cdt = &mdt->mdt_coordinator;
1208 struct hsm_action_item *hai;
1212 /* register request in memory list */
1213 hai = hai_first(hal);
1214 for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai)) {
1215 struct cdt_agent_req *car;
1217 /* in case of a cancel request, we first mark the ondisk
1218 * record of the request we want to stop as canceled
1219 * this does not change the cancel record
1220 * it will be done when updating the request status
1222 if (hai->hai_action == HSMA_CANCEL) {
1223 struct hsm_record_update update = {
1224 .cookie = hai->hai_cookie,
1225 .status = ARS_CANCELED,
1228 rc = mdt_agent_record_update(mti->mti_env, mti->mti_mdt,
1231 CERROR("%s: mdt_agent_record_update() failed, "
1232 "rc=%d, cannot update status to %s "
1233 "for cookie %#llx\n",
1234 mdt_obd_name(mdt), rc,
1235 agent_req_status2name(ARS_CANCELED),
1240 /* find the running request to set it canceled */
1241 car = mdt_cdt_find_request(cdt, hai->hai_cookie);
1243 car->car_canceled = 1;
1244 /* uuid has to be changed to the one running the
1245 * request to cancel */
1246 *uuid = car->car_uuid;
1247 mdt_cdt_put_request(car);
1249 /* no need to memorize cancel request
1250 * this also avoid a deadlock when we receive
1251 * a purge all requests command
1256 if (hai->hai_action == HSMA_ARCHIVE) {
1257 rc = mdt_hsm_set_exists(mti, &hai->hai_fid,
1258 hal->hal_archive_id);
1265 car = mdt_cdt_alloc_request(hal->hal_archive_id, hal->hal_flags,
1268 GOTO(out, rc = PTR_ERR(car));
1270 rc = mdt_cdt_add_request(cdt, car);
1272 mdt_cdt_free_request(car);
1279 * swap layouts between 2 fids
1280 * \param mti [IN] context
1283 * \param mh_common [IN] MD HSM
1285 static int hsm_swap_layouts(struct mdt_thread_info *mti,
1286 struct mdt_object *obj, const struct lu_fid *dfid,
1287 struct md_hsm *mh_common)
1289 struct mdt_object *dobj;
1290 struct mdt_lock_handle *dlh;
1294 if (!mdt_object_exists(obj))
1295 GOTO(out, rc = -ENOENT);
1297 /* we already have layout lock on obj so take only
1299 dlh = &mti->mti_lh[MDT_LH_OLD];
1300 mdt_lock_reg_init(dlh, LCK_EX);
1301 dobj = mdt_object_find_lock(mti, dfid, dlh, MDS_INODELOCK_LAYOUT);
1303 GOTO(out, rc = PTR_ERR(dobj));
1305 /* if copy tool closes the volatile before sending the final
1306 * progress through llapi_hsm_copy_end(), all the objects
1307 * are removed and mdd_swap_layout LBUG */
1308 if (!mdt_object_exists(dobj)) {
1309 CERROR("%s: Copytool has closed volatile file "DFID"\n",
1310 mdt_obd_name(mti->mti_mdt), PFID(dfid));
1311 GOTO(out_dobj, rc = -ENOENT);
1313 /* Since we only handle restores here, unconditionally use
1314 * SWAP_LAYOUTS_MDS_HSM flag to ensure original layout will
1315 * be preserved in case of failure during swap_layout and not
1316 * leave a file in an intermediate but incoherent state.
1317 * But need to setup HSM xattr of data FID before, reuse
1318 * mti and mh presets for FID in hsm_cdt_request_completed(),
1319 * only need to clear RELEASED and DIRTY.
1321 mh_common->mh_flags &= ~(HS_RELEASED | HS_DIRTY);
1322 rc = mdt_hsm_attr_set(mti, dobj, mh_common);
1324 rc = mo_swap_layouts(mti->mti_env,
1325 mdt_object_child(obj),
1326 mdt_object_child(dobj),
1327 SWAP_LAYOUTS_MDS_HSM);
1329 rc = mdt_lsom_downgrade(mti, obj);
1332 "%s: File fid="DFID" SOM "
1333 "downgrade failed, rc = %d\n",
1334 mdt_obd_name(mti->mti_mdt),
1335 PFID(mdt_object_fid(obj)), rc);
1338 mdt_object_unlock_put(mti, dobj, dlh, 1);
1344 * update status of a completed request
1345 * \param mti [IN] context
1346 * \param pgs [IN] progress of the copy tool
1348 * \retval -ve failure
1350 static int hsm_cdt_request_completed(struct mdt_thread_info *mti,
1351 struct hsm_progress_kernel *pgs,
1352 const struct cdt_agent_req *car,
1353 enum agent_req_status *status)
1355 const struct lu_env *env = mti->mti_env;
1356 struct mdt_device *mdt = mti->mti_mdt;
1357 struct coordinator *cdt = &mdt->mdt_coordinator;
1358 struct mdt_object *obj = NULL;
1359 enum changelog_rec_flags clf_flags = 0;
1362 bool need_changelog = true;
1366 /* default is to retry */
1367 *status = ARS_WAITING;
1369 /* find object by FID, mdt_hsm_get_md_hsm() returns obj or err
1370 * if error/removed continue anyway to get correct reporting done */
1371 obj = mdt_hsm_get_md_hsm(mti, &car->car_hai->hai_fid, &mh);
1372 /* we will update MD HSM only if needed */
1373 is_mh_changed = false;
1375 /* no need to change mh->mh_arch_id
1376 * mdt_hsm_get_md_hsm() got it from disk and it is still valid
1378 if (pgs->hpk_errval != 0) {
1379 switch (pgs->hpk_errval) {
1381 /* the copy tool does not support cancel
1382 * so the cancel request is failed
1383 * As we cannot distinguish a cancel progress
1384 * from another action progress (they have the
1385 * same cookie), we suppose here the CT returns
1386 * ENOSYS only if does not support cancel
1388 /* this can also happen when cdt calls it to
1389 * for a timed out request */
1390 *status = ARS_FAILED;
1391 /* to have a cancel event in changelog */
1392 pgs->hpk_errval = ECANCELED;
1395 /* the request record has already been set to
1396 * ARS_CANCELED, this set the cancel request
1398 *status = ARS_SUCCEED;
1401 /* retry only if current policy or requested, and
1402 * object is not on error/removed */
1403 *status = (cdt->cdt_policy & CDT_NORETRY_ACTION ||
1404 !(pgs->hpk_flags & HP_FLAG_RETRY) ||
1405 IS_ERR(obj)) ? ARS_FAILED : ARS_WAITING;
1409 if (pgs->hpk_errval > CLF_HSM_MAXERROR) {
1410 CERROR("%s: Request %#llx on "DFID
1411 " failed, error code %d too large\n",
1413 pgs->hpk_cookie, PFID(&pgs->hpk_fid),
1415 hsm_set_cl_error(&clf_flags, CLF_HSM_ERROVERFLOW);
1418 hsm_set_cl_error(&clf_flags, pgs->hpk_errval);
1421 switch (car->car_hai->hai_action) {
1423 hsm_set_cl_event(&clf_flags, HE_ARCHIVE);
1426 hsm_set_cl_event(&clf_flags, HE_RESTORE);
1429 hsm_set_cl_event(&clf_flags, HE_REMOVE);
1432 hsm_set_cl_event(&clf_flags, HE_CANCEL);
1433 CERROR("%s: Failed request %#llx on "DFID
1434 " cannot be a CANCEL\n",
1437 PFID(&pgs->hpk_fid));
1440 CERROR("%s: Failed request %#llx on "DFID
1441 " %d is an unknown action\n",
1443 pgs->hpk_cookie, PFID(&pgs->hpk_fid),
1444 car->car_hai->hai_action);
1449 *status = ARS_SUCCEED;
1450 switch (car->car_hai->hai_action) {
1452 hsm_set_cl_event(&clf_flags, HE_ARCHIVE);
1453 /* set ARCHIVE keep EXIST and clear LOST and
1455 mh.mh_arch_ver = pgs->hpk_data_version;
1456 mh.mh_flags |= HS_ARCHIVED;
1457 mh.mh_flags &= ~(HS_LOST|HS_DIRTY);
1458 is_mh_changed = true;
1461 hsm_set_cl_event(&clf_flags, HE_RESTORE);
1463 /* do not clear RELEASED and DIRTY here
1464 * this will occur in hsm_swap_layouts()
1467 /* Restoring has changed the file version on
1469 mh.mh_arch_ver = pgs->hpk_data_version;
1470 is_mh_changed = true;
1473 hsm_set_cl_event(&clf_flags, HE_REMOVE);
1474 /* clear ARCHIVED EXISTS and LOST */
1475 mh.mh_flags &= ~(HS_ARCHIVED | HS_EXISTS | HS_LOST);
1476 is_mh_changed = true;
1479 hsm_set_cl_event(&clf_flags, HE_CANCEL);
1480 CERROR("%s: Successful request %#llx on "DFID" cannot be a CANCEL\n",
1483 PFID(&pgs->hpk_fid));
1486 CERROR("%s: Successful request %#llx on "DFID" %d is an unknown action\n",
1488 pgs->hpk_cookie, PFID(&pgs->hpk_fid),
1489 car->car_hai->hai_action);
1495 /* rc != 0 means error when analysing action, it may come from
1496 * a crasy CT no need to manage DIRTY
1497 * and if mdt_hsm_get_md_hsm() has returned an error, mh has not been
1500 if (rc == 0 && !IS_ERR(obj))
1501 hsm_set_cl_flags(&clf_flags,
1502 mh.mh_flags & HS_DIRTY ? CLF_HSM_DIRTY : 0);
1504 /* unlock is done later, after layout lock management */
1505 if (is_mh_changed && !IS_ERR(obj))
1506 rc = mdt_hsm_attr_set(mti, obj, &mh);
1508 /* we give back layout lock only if restore was successful or
1509 * if no retry will be attempted and if object is still alive,
1510 * in other cases we just unlock the object */
1511 if (car->car_hai->hai_action == HSMA_RESTORE) {
1512 struct mdt_lock_handle *lh;
1514 /* restore in data FID done, we swap the layouts
1515 * only if restore is successful */
1516 if (pgs->hpk_errval == 0 && !IS_ERR(obj)) {
1517 rc = hsm_swap_layouts(mti, obj, &car->car_hai->hai_dfid,
1520 if (cdt->cdt_policy & CDT_NORETRY_ACTION)
1521 *status = ARS_FAILED;
1522 pgs->hpk_errval = -rc;
1525 /* we have to retry, so keep layout lock */
1526 if (*status == ARS_WAITING)
1529 /* restore special case, need to create ChangeLog record
1530 * before to give back layout lock to avoid concurrent
1531 * file updater to post out of order ChangeLog */
1532 mo_changelog(env, CL_HSM, clf_flags, mdt->mdt_child,
1533 &car->car_hai->hai_fid);
1534 need_changelog = false;
1536 cdt_restore_handle_del(mti, cdt, &car->car_hai->hai_fid);
1537 if (!IS_ERR_OR_NULL(obj)) {
1538 /* flush UPDATE lock so attributes are upadated */
1539 lh = &mti->mti_lh[MDT_LH_OLD];
1540 mdt_lock_reg_init(lh, LCK_EX);
1541 mdt_object_lock(mti, obj, lh, MDS_INODELOCK_UPDATE);
1542 mdt_object_unlock(mti, obj, lh, 1);
1549 /* always add a ChangeLog record */
1551 mo_changelog(env, CL_HSM, clf_flags, mdt->mdt_child,
1552 &car->car_hai->hai_fid);
1555 mdt_object_put(mti->mti_env, obj);
1561 * update status of a request
1562 * \param mti [IN] context
1563 * \param pgs [IN] progress of the copy tool
1565 * \retval -ve failure
1567 int mdt_hsm_update_request_state(struct mdt_thread_info *mti,
1568 struct hsm_progress_kernel *pgs)
1570 struct mdt_device *mdt = mti->mti_mdt;
1571 struct coordinator *cdt = &mdt->mdt_coordinator;
1572 struct cdt_agent_req *car;
1576 /* no coordinator started, so we cannot serve requests */
1577 if (cdt->cdt_state == CDT_STOPPED)
1580 /* first do sanity checks */
1581 car = mdt_cdt_update_request(cdt, pgs);
1583 CERROR("%s: Cannot find running request for cookie %#llx"
1586 pgs->hpk_cookie, PFID(&pgs->hpk_fid));
1588 RETURN(PTR_ERR(car));
1591 CDEBUG(D_HSM, "Progress received for fid="DFID" cookie=%#llx"
1592 " action=%s flags=%d err=%d fid="DFID" dfid="DFID"\n",
1593 PFID(&pgs->hpk_fid), pgs->hpk_cookie,
1594 hsm_copytool_action2name(car->car_hai->hai_action),
1595 pgs->hpk_flags, pgs->hpk_errval,
1596 PFID(&car->car_hai->hai_fid),
1597 PFID(&car->car_hai->hai_dfid));
1599 /* progress is done on FID or data FID depending of the action and
1600 * of the copy progress */
1601 /* for restore progress is used to send back the data FID to cdt */
1602 if (car->car_hai->hai_action == HSMA_RESTORE &&
1603 lu_fid_eq(&car->car_hai->hai_fid, &car->car_hai->hai_dfid))
1604 car->car_hai->hai_dfid = pgs->hpk_fid;
1606 if ((car->car_hai->hai_action == HSMA_RESTORE ||
1607 car->car_hai->hai_action == HSMA_ARCHIVE) &&
1608 (!lu_fid_eq(&pgs->hpk_fid, &car->car_hai->hai_dfid) &&
1609 !lu_fid_eq(&pgs->hpk_fid, &car->car_hai->hai_fid))) {
1610 CERROR("%s: Progress on "DFID" for cookie %#llx"
1611 " does not match request FID "DFID" nor data FID "
1614 PFID(&pgs->hpk_fid), pgs->hpk_cookie,
1615 PFID(&car->car_hai->hai_fid),
1616 PFID(&car->car_hai->hai_dfid));
1617 GOTO(out, rc = -EINVAL);
1620 if (pgs->hpk_errval != 0 && !(pgs->hpk_flags & HP_FLAG_COMPLETED)) {
1621 CERROR("%s: Progress on "DFID" for cookie %#llx action=%s"
1622 " is not coherent (err=%d and not completed"
1625 PFID(&pgs->hpk_fid), pgs->hpk_cookie,
1626 hsm_copytool_action2name(car->car_hai->hai_action),
1627 pgs->hpk_errval, pgs->hpk_flags);
1628 GOTO(out, rc = -EINVAL);
1631 /* now progress is valid */
1633 /* we use a root like ucred */
1634 hsm_init_ucred(mdt_ucred(mti));
1636 if (pgs->hpk_flags & HP_FLAG_COMPLETED) {
1637 enum agent_req_status status;
1638 struct hsm_record_update update;
1641 rc = hsm_cdt_request_completed(mti, pgs, car, &status);
1643 CDEBUG(D_HSM, "updating record: fid="DFID" cookie=%#llx action=%s "
1645 PFID(&pgs->hpk_fid), pgs->hpk_cookie,
1646 hsm_copytool_action2name(car->car_hai->hai_action),
1647 agent_req_status2name(status));
1649 /* update record first (LU-9075) */
1650 update.cookie = pgs->hpk_cookie;
1651 update.status = status;
1653 rc1 = mdt_agent_record_update(mti->mti_env, mdt,
1656 CERROR("%s: mdt_agent_record_update() failed,"
1657 " rc=%d, cannot update status to %s"
1658 " for cookie %#llx\n",
1659 mdt_obd_name(mdt), rc1,
1660 agent_req_status2name(status),
1662 rc = (rc != 0 ? rc : rc1);
1664 /* then remove request from memory list (LU-9075) */
1665 mdt_cdt_remove_request(cdt, pgs->hpk_cookie);
1667 /* ct has completed a request, so a slot is available,
1668 * signal the coordinator to find new work */
1669 mdt_hsm_cdt_event(cdt);
1671 /* if copytool send a progress on a canceled request
1672 * we inform copytool it should stop
1674 if (car->car_canceled == 1)
1680 /* remove ref got from mdt_cdt_update_request() */
1681 mdt_cdt_put_request(car);
1688 * data passed to llog_cat_process() callback
1689 * to cancel requests
1691 struct hsm_cancel_all_data {
1692 struct mdt_device *mdt;
1696 * llog_cat_process() callback, used to:
1697 * - purge all requests
1698 * \param env [IN] environment
1699 * \param llh [IN] llog handle
1700 * \param hdr [IN] llog record
1701 * \param data [IN] cb data = struct hsm_cancel_all_data
1703 * \retval -ve failure
1705 static int mdt_cancel_all_cb(const struct lu_env *env,
1706 struct llog_handle *llh,
1707 struct llog_rec_hdr *hdr, void *data)
1709 struct llog_agent_req_rec *larr;
1710 struct hsm_cancel_all_data *hcad;
1714 larr = (struct llog_agent_req_rec *)hdr;
1716 if (larr->arr_status == ARS_WAITING ||
1717 larr->arr_status == ARS_STARTED) {
1718 larr->arr_status = ARS_CANCELED;
1719 larr->arr_req_change = ktime_get_real_seconds();
1720 rc = llog_write(env, llh, hdr, hdr->lrh_index);
1727 * cancel all actions
1728 * \param obd [IN] MDT device
1730 static int hsm_cancel_all_actions(struct mdt_device *mdt)
1733 struct lu_context session;
1734 struct mdt_thread_info *mti;
1735 struct coordinator *cdt = &mdt->mdt_coordinator;
1736 struct cdt_agent_req *car;
1737 struct hsm_action_list *hal = NULL;
1738 struct hsm_action_item *hai;
1739 struct hsm_cancel_all_data hcad;
1740 int hal_sz = 0, hal_len, rc;
1741 enum cdt_states old_state;
1744 rc = lu_env_init(&env, LCT_MD_THREAD);
1748 /* for mdt_ucred(), lu_ucred stored in lu_ucred_key */
1749 rc = lu_context_init(&session, LCT_SERVER_SESSION);
1753 lu_context_enter(&session);
1754 env.le_ses = &session;
1756 mti = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
1757 LASSERT(mti != NULL);
1759 mti->mti_env = &env;
1762 hsm_init_ucred(mdt_ucred(mti));
1764 mutex_lock(&cdt->cdt_state_lock);
1765 old_state = cdt->cdt_state;
1767 /* disable coordinator */
1768 rc = set_cdt_state_locked(cdt, CDT_DISABLE);
1770 GOTO(out_cdt_state_unlock, rc);
1772 /* send cancel to all running requests */
1773 down_read(&cdt->cdt_request_lock);
1774 list_for_each_entry(car, &cdt->cdt_request_list, car_request_list) {
1775 mdt_cdt_get_request(car);
1776 /* request is not yet removed from list, it will be done
1777 * when copytool will return progress
1780 if (car->car_hai->hai_action == HSMA_CANCEL) {
1781 mdt_cdt_put_request(car);
1786 hal_len = sizeof(*hal) + cfs_size_round(MTI_NAME_MAXLEN + 1) +
1787 cfs_size_round(car->car_hai->hai_len);
1789 if (hal_len > hal_sz && hal_sz > 0) {
1790 /* not enough room, free old buffer */
1791 OBD_FREE(hal, hal_sz);
1795 /* empty buffer, allocate one */
1798 OBD_ALLOC(hal, hal_sz);
1800 mdt_cdt_put_request(car);
1801 up_read(&cdt->cdt_request_lock);
1802 GOTO(out_cdt_state, rc = -ENOMEM);
1806 hal->hal_version = HAL_VERSION;
1807 obd_uuid2fsname(hal->hal_fsname, mdt_obd_name(mdt),
1809 hal->hal_fsname[MTI_NAME_MAXLEN] = '\0';
1810 hal->hal_archive_id = car->car_archive_id;
1811 hal->hal_flags = car->car_flags;
1814 hai = hai_first(hal);
1815 memcpy(hai, car->car_hai, car->car_hai->hai_len);
1816 hai->hai_action = HSMA_CANCEL;
1819 /* it is possible to safely call mdt_hsm_agent_send()
1820 * (ie without a deadlock on cdt_request_lock), because the
1821 * write lock is taken only if we are not in purge mode
1822 * (mdt_hsm_agent_send() does not call mdt_cdt_add_request()
1823 * nor mdt_cdt_remove_request())
1825 /* no conflict with cdt thread because cdt is disable and we
1826 * have the request lock */
1827 mdt_hsm_agent_send(mti, hal, 1);
1829 mdt_cdt_put_request(car);
1831 up_read(&cdt->cdt_request_lock);
1834 OBD_FREE(hal, hal_sz);
1836 /* cancel all on-disk records */
1839 rc = cdt_llog_process(mti->mti_env, mti->mti_mdt, mdt_cancel_all_cb,
1840 &hcad, 0, 0, WRITE);
1842 /* Enable coordinator, unless the coordinator was stopping. */
1843 set_cdt_state_locked(cdt, old_state);
1844 out_cdt_state_unlock:
1845 mutex_unlock(&cdt->cdt_state_lock);
1847 lu_context_exit(&session);
1848 lu_context_fini(&session);
1856 * check if a request is compatible with file status
1857 * \param hai [IN] request description
1858 * \param archive_id [IN] request archive id
1859 * \param rq_flags [IN] request flags
1860 * \param hsm [IN] file HSM metadata
1863 bool mdt_hsm_is_action_compat(const struct hsm_action_item *hai,
1864 u32 archive_id, u64 rq_flags,
1865 const struct md_hsm *hsm)
1867 int is_compat = false;
1871 hsm_flags = hsm->mh_flags;
1872 switch (hai->hai_action) {
1874 if (!(hsm_flags & HS_NOARCHIVE) &&
1875 (hsm_flags & HS_DIRTY || !(hsm_flags & HS_ARCHIVED)))
1878 if (hsm_flags & HS_EXISTS &&
1880 archive_id != hsm->mh_arch_id)
1885 if (!(hsm_flags & HS_DIRTY) && (hsm_flags & HS_RELEASED) &&
1886 hsm_flags & HS_ARCHIVED && !(hsm_flags & HS_LOST))
1890 if (!(hsm_flags & HS_RELEASED) &&
1891 (hsm_flags & (HS_ARCHIVED | HS_EXISTS)))
1898 CDEBUG(D_HSM, "fid="DFID" action=%s flags=%#llx"
1899 " extent=%#llx-%#llx hsm_flags=%.8X %s\n",
1900 PFID(&hai->hai_fid),
1901 hsm_copytool_action2name(hai->hai_action), rq_flags,
1902 hai->hai_extent.offset, hai->hai_extent.length,
1904 (is_compat ? "compatible" : "uncompatible"));
1910 * sysfs interface used to get/set HSM behaviour (cdt->cdt_policy)
1912 static const struct {
1916 } hsm_policy_names[] = {
1917 { CDT_NONBLOCKING_RESTORE, "NonBlockingRestore", "NBR"},
1918 { CDT_NORETRY_ACTION, "NoRetryAction", "NRA"},
1923 * convert a policy name to a bit
1924 * \param name [IN] policy name
1926 * \retval policy bit
1928 static __u64 hsm_policy_str2bit(const char *name)
1932 for (i = 0; hsm_policy_names[i].bit != 0; i++)
1933 if (strcmp(hsm_policy_names[i].nickname, name) == 0 ||
1934 strcmp(hsm_policy_names[i].name, name) == 0)
1935 return hsm_policy_names[i].bit;
1940 * convert a policy bit field to a string
1941 * \param mask [IN] policy bit field
1942 * \param hexa [IN] print mask before bit names
1943 * \param buffer [OUT] string
1944 * \param count [IN] size of buffer
1946 static void hsm_policy_bit2str(struct seq_file *m, const __u64 mask,
1954 seq_printf(m, "(%#llx) ", mask);
1956 for (i = 0; i < CDT_POLICY_SHIFT_COUNT; i++) {
1959 for (j = 0; hsm_policy_names[j].bit != 0; j++) {
1960 if (hsm_policy_names[j].bit == bit)
1964 seq_printf(m, "[%s] ", hsm_policy_names[j].name);
1966 seq_printf(m, "%s ", hsm_policy_names[j].name);
1968 /* remove last ' ' */
1973 /* methods to read/write HSM policy flags */
1974 static int mdt_hsm_policy_seq_show(struct seq_file *m, void *data)
1976 struct mdt_device *mdt = m->private;
1977 struct coordinator *cdt = &mdt->mdt_coordinator;
1980 hsm_policy_bit2str(m, cdt->cdt_policy, false);
1985 mdt_hsm_policy_seq_write(struct file *file, const char __user *buffer,
1986 size_t count, loff_t *off)
1988 struct seq_file *m = file->private_data;
1989 struct mdt_device *mdt = m->private;
1990 struct coordinator *cdt = &mdt->mdt_coordinator;
1991 char *start, *token, sign;
1994 __u64 add_mask, remove_mask, set_mask;
1998 if (count + 1 > PAGE_SIZE)
2001 OBD_ALLOC(buf, count + 1);
2005 if (copy_from_user(buf, buffer, count))
2006 GOTO(out, rc = -EFAULT);
2011 CDEBUG(D_HSM, "%s: receive new policy: '%s'\n", mdt_obd_name(mdt),
2014 add_mask = remove_mask = set_mask = 0;
2016 token = strsep(&start, "\n ");
2022 if (sign == '-' || sign == '+')
2025 policy = hsm_policy_str2bit(token);
2027 CWARN("%s: '%s' is unknown, "
2028 "supported policies are:\n", mdt_obd_name(mdt),
2030 hsm_policy_bit2str(m, 0, false);
2031 GOTO(out, rc = -EINVAL);
2035 remove_mask |= policy;
2045 } while (start != NULL);
2047 CDEBUG(D_HSM, "%s: new policy: rm=%#llx add=%#llx set=%#llx\n",
2048 mdt_obd_name(mdt), remove_mask, add_mask, set_mask);
2050 /* if no sign in all string, it is a clear and set
2051 * if some sign found, all unsigned are converted
2053 * P1 P2 = set to P1 and P2
2054 * P1 -P2 = add P1 clear P2 same as +P1 -P2
2056 if (remove_mask == 0 && add_mask == 0) {
2057 cdt->cdt_policy = set_mask;
2059 cdt->cdt_policy |= set_mask | add_mask;
2060 cdt->cdt_policy &= ~remove_mask;
2063 GOTO(out, rc = count);
2066 OBD_FREE(buf, count + 1);
2069 LDEBUGFS_SEQ_FOPS(mdt_hsm_policy);
2071 ssize_t loop_period_show(struct kobject *kobj, struct attribute *attr,
2074 struct coordinator *cdt = container_of(kobj, struct coordinator,
2077 return scnprintf(buf, PAGE_SIZE, "%u\n", cdt->cdt_loop_period);
2080 ssize_t loop_period_store(struct kobject *kobj, struct attribute *attr,
2081 const char *buffer, size_t count)
2083 struct coordinator *cdt = container_of(kobj, struct coordinator,
2088 rc = kstrtouint(buffer, 0, &val);
2093 cdt->cdt_loop_period = val;
2095 return val ? count : -EINVAL;
2097 LUSTRE_RW_ATTR(loop_period);
2099 ssize_t grace_delay_show(struct kobject *kobj, struct attribute *attr,
2102 struct coordinator *cdt = container_of(kobj, struct coordinator,
2105 return scnprintf(buf, PAGE_SIZE, "%u\n", cdt->cdt_grace_delay);
2108 ssize_t grace_delay_store(struct kobject *kobj, struct attribute *attr,
2109 const char *buffer, size_t count)
2111 struct coordinator *cdt = container_of(kobj, struct coordinator,
2116 rc = kstrtouint(buffer, 0, &val);
2121 cdt->cdt_grace_delay = val;
2123 return val ? count : -EINVAL;
2125 LUSTRE_RW_ATTR(grace_delay);
2127 ssize_t active_request_timeout_show(struct kobject *kobj,
2128 struct attribute *attr,
2131 struct coordinator *cdt = container_of(kobj, struct coordinator,
2134 return scnprintf(buf, PAGE_SIZE, "%d\n", cdt->cdt_active_req_timeout);
2137 ssize_t active_request_timeout_store(struct kobject *kobj,
2138 struct attribute *attr,
2139 const char *buffer, size_t count)
2141 struct coordinator *cdt = container_of(kobj, struct coordinator,
2146 rc = kstrtouint(buffer, 0, &val);
2151 cdt->cdt_active_req_timeout = val;
2153 return val ? count : -EINVAL;
2155 LUSTRE_RW_ATTR(active_request_timeout);
2157 ssize_t max_requests_show(struct kobject *kobj, struct attribute *attr,
2160 struct coordinator *cdt = container_of(kobj, struct coordinator,
2163 return scnprintf(buf, PAGE_SIZE, "%llu\n", cdt->cdt_max_requests);
2166 ssize_t max_requests_store(struct kobject *kobj, struct attribute *attr,
2167 const char *buffer, size_t count)
2169 struct coordinator *cdt = container_of(kobj, struct coordinator,
2171 unsigned long long val;
2174 rc = kstrtoull(buffer, 0, &val);
2179 cdt->cdt_max_requests = val;
2181 return val ? count : -EINVAL;
2183 LUSTRE_RW_ATTR(max_requests);
2185 ssize_t default_archive_id_show(struct kobject *kobj, struct attribute *attr,
2188 struct coordinator *cdt = container_of(kobj, struct coordinator,
2191 return scnprintf(buf, PAGE_SIZE, "%u\n", cdt->cdt_default_archive_id);
2194 ssize_t default_archive_id_store(struct kobject *kobj, struct attribute *attr,
2195 const char *buffer, size_t count)
2197 struct coordinator *cdt = container_of(kobj, struct coordinator,
2202 rc = kstrtouint(buffer, 0, &val);
2207 cdt->cdt_default_archive_id = val;
2209 return val ? count : -EINVAL;
2211 LUSTRE_RW_ATTR(default_archive_id);
2214 * procfs write method for MDT/hsm_control
2215 * proc entry is in mdt directory so data is mdt obd_device pointer
2217 #define CDT_ENABLE_CMD "enabled"
2218 #define CDT_STOP_CMD "shutdown"
2219 #define CDT_DISABLE_CMD "disabled"
2220 #define CDT_PURGE_CMD "purge"
2221 #define CDT_HELP_CMD "help"
2222 #define CDT_MAX_CMD_LEN 10
2224 ssize_t hsm_control_store(struct kobject *kobj, struct attribute *attr,
2225 const char *buffer, size_t count)
2227 struct obd_device *obd = container_of(kobj, struct obd_device,
2229 struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2230 struct coordinator *cdt = &(mdt->mdt_coordinator);
2234 if (count == 0 || count >= CDT_MAX_CMD_LEN)
2237 if (strncmp(buffer, CDT_ENABLE_CMD, strlen(CDT_ENABLE_CMD)) == 0) {
2238 if (cdt->cdt_state == CDT_DISABLE) {
2239 rc = set_cdt_state(cdt, CDT_RUNNING);
2240 mdt_hsm_cdt_event(cdt);
2241 wake_up(&cdt->cdt_waitq);
2242 } else if (cdt->cdt_state == CDT_RUNNING) {
2245 rc = mdt_hsm_cdt_start(mdt);
2247 } else if (strncmp(buffer, CDT_STOP_CMD, strlen(CDT_STOP_CMD)) == 0) {
2248 if (cdt->cdt_state == CDT_STOPPING) {
2249 CERROR("%s: Coordinator is already stopping\n",
2252 } else if (cdt->cdt_state == CDT_STOPPED) {
2255 rc = mdt_hsm_cdt_stop(mdt);
2257 } else if (strncmp(buffer, CDT_DISABLE_CMD,
2258 strlen(CDT_DISABLE_CMD)) == 0) {
2259 if ((cdt->cdt_state == CDT_STOPPING) ||
2260 (cdt->cdt_state == CDT_STOPPED)) {
2261 CERROR("%s: Coordinator is stopped\n",
2265 rc = set_cdt_state(cdt, CDT_DISABLE);
2267 } else if (strncmp(buffer, CDT_PURGE_CMD,
2268 strlen(CDT_PURGE_CMD)) == 0) {
2269 rc = hsm_cancel_all_actions(mdt);
2270 } else if (strncmp(buffer, CDT_HELP_CMD,
2271 strlen(CDT_HELP_CMD)) == 0) {
2279 CERROR("%s: Valid coordinator control commands are: "
2280 "%s %s %s %s %s\n", mdt_obd_name(mdt),
2281 CDT_ENABLE_CMD, CDT_STOP_CMD, CDT_DISABLE_CMD,
2282 CDT_PURGE_CMD, CDT_HELP_CMD);
2290 ssize_t hsm_control_show(struct kobject *kobj, struct attribute *attr,
2293 struct obd_device *obd = container_of(kobj, struct obd_device,
2295 struct coordinator *cdt;
2297 cdt = &(mdt_dev(obd->obd_lu_dev)->mdt_coordinator);
2299 return scnprintf(buf, PAGE_SIZE, "%s\n",
2300 cdt_mdt_state2str(cdt->cdt_state));
2304 mdt_hsm_request_mask_show(struct seq_file *m, __u64 mask)
2310 for (i = 0; i < 8 * sizeof(mask); i++) {
2311 if (mask & (1UL << i)) {
2312 seq_printf(m, "%s%s", first ? "" : " ",
2313 hsm_copytool_action2name(i));
2323 mdt_hsm_user_request_mask_seq_show(struct seq_file *m, void *data)
2325 struct mdt_device *mdt = m->private;
2326 struct coordinator *cdt = &mdt->mdt_coordinator;
2328 return mdt_hsm_request_mask_show(m, cdt->cdt_user_request_mask);
2332 mdt_hsm_group_request_mask_seq_show(struct seq_file *m, void *data)
2334 struct mdt_device *mdt = m->private;
2335 struct coordinator *cdt = &mdt->mdt_coordinator;
2337 return mdt_hsm_request_mask_show(m, cdt->cdt_group_request_mask);
2341 mdt_hsm_other_request_mask_seq_show(struct seq_file *m, void *data)
2343 struct mdt_device *mdt = m->private;
2344 struct coordinator *cdt = &mdt->mdt_coordinator;
2346 return mdt_hsm_request_mask_show(m, cdt->cdt_other_request_mask);
2349 static inline enum hsm_copytool_action
2350 hsm_copytool_name2action(const char *name)
2352 if (strcasecmp(name, "NOOP") == 0)
2354 else if (strcasecmp(name, "ARCHIVE") == 0)
2355 return HSMA_ARCHIVE;
2356 else if (strcasecmp(name, "RESTORE") == 0)
2357 return HSMA_RESTORE;
2358 else if (strcasecmp(name, "REMOVE") == 0)
2360 else if (strcasecmp(name, "CANCEL") == 0)
2367 mdt_write_hsm_request_mask(struct file *file, const char __user *user_buf,
2368 size_t user_count, __u64 *mask)
2370 char *buf, *pos, *name;
2376 if (!(user_count < 4096))
2379 buf_size = user_count + 1;
2381 OBD_ALLOC(buf, buf_size);
2385 if (copy_from_user(buf, user_buf, buf_size - 1))
2386 GOTO(out, rc = -EFAULT);
2388 buf[buf_size - 1] = '\0';
2391 while ((name = strsep(&pos, " \t\v\n")) != NULL) {
2397 action = hsm_copytool_name2action(name);
2399 GOTO(out, rc = -EINVAL);
2401 new_mask |= (1UL << action);
2407 OBD_FREE(buf, buf_size);
2413 mdt_hsm_user_request_mask_seq_write(struct file *file, const char __user *buf,
2414 size_t count, loff_t *off)
2416 struct seq_file *m = file->private_data;
2417 struct mdt_device *mdt = m->private;
2418 struct coordinator *cdt = &mdt->mdt_coordinator;
2420 return mdt_write_hsm_request_mask(file, buf, count,
2421 &cdt->cdt_user_request_mask);
2425 mdt_hsm_group_request_mask_seq_write(struct file *file, const char __user *buf,
2426 size_t count, loff_t *off)
2428 struct seq_file *m = file->private_data;
2429 struct mdt_device *mdt = m->private;
2430 struct coordinator *cdt = &mdt->mdt_coordinator;
2432 return mdt_write_hsm_request_mask(file, buf, count,
2433 &cdt->cdt_group_request_mask);
2437 mdt_hsm_other_request_mask_seq_write(struct file *file, const char __user *buf,
2438 size_t count, loff_t *off)
2440 struct seq_file *m = file->private_data;
2441 struct mdt_device *mdt = m->private;
2442 struct coordinator *cdt = &mdt->mdt_coordinator;
2444 return mdt_write_hsm_request_mask(file, buf, count,
2445 &cdt->cdt_other_request_mask);
2448 static ssize_t remove_archive_on_last_unlink_show(struct kobject *kobj,
2449 struct attribute *attr,
2452 struct coordinator *cdt = container_of(kobj, struct coordinator,
2455 return scnprintf(buf, PAGE_SIZE, "%u\n",
2456 cdt->cdt_remove_archive_on_last_unlink);
2459 static ssize_t remove_archive_on_last_unlink_store(struct kobject *kobj,
2460 struct attribute *attr,
2464 struct coordinator *cdt = container_of(kobj, struct coordinator,
2469 rc = kstrtobool(buffer, &val);
2473 cdt->cdt_remove_archive_on_last_unlink = val;
2476 LUSTRE_RW_ATTR(remove_archive_on_last_unlink);
2478 LDEBUGFS_SEQ_FOPS(mdt_hsm_user_request_mask);
2479 LDEBUGFS_SEQ_FOPS(mdt_hsm_group_request_mask);
2480 LDEBUGFS_SEQ_FOPS(mdt_hsm_other_request_mask);
2482 /* Read-only sysfs files for request counters */
2483 static ssize_t archive_count_show(struct kobject *kobj, struct attribute *attr,
2486 struct coordinator *cdt = container_of(kobj, struct coordinator,
2489 return scnprintf(buf, PAGE_SIZE, "%d\n",
2490 atomic_read(&cdt->cdt_archive_count));
2492 LUSTRE_RO_ATTR(archive_count);
2494 static ssize_t restore_count_show(struct kobject *kobj, struct attribute *attr,
2497 struct coordinator *cdt = container_of(kobj, struct coordinator,
2500 return scnprintf(buf, PAGE_SIZE, "%d\n",
2501 atomic_read(&cdt->cdt_restore_count));
2503 LUSTRE_RO_ATTR(restore_count);
2505 static ssize_t remove_count_show(struct kobject *kobj, struct attribute *attr,
2508 struct coordinator *cdt = container_of(kobj, struct coordinator,
2511 return scnprintf(buf, PAGE_SIZE, "%d\n",
2512 atomic_read(&cdt->cdt_remove_count));
2514 LUSTRE_RO_ATTR(remove_count);
2516 static struct ldebugfs_vars ldebugfs_mdt_hsm_vars[] = {
2518 .fops = &mdt_hsm_agent_fops },
2519 { .name = "actions",
2520 .fops = &mdt_hsm_actions_fops,
2521 .proc_mode = 0444 },
2523 .fops = &mdt_hsm_policy_fops },
2524 { .name = "active_requests",
2525 .fops = &mdt_hsm_active_requests_fops },
2526 { .name = "user_request_mask",
2527 .fops = &mdt_hsm_user_request_mask_fops, },
2528 { .name = "group_request_mask",
2529 .fops = &mdt_hsm_group_request_mask_fops, },
2530 { .name = "other_request_mask",
2531 .fops = &mdt_hsm_other_request_mask_fops, },
2535 static struct attribute *hsm_attrs[] = {
2536 &lustre_attr_loop_period.attr,
2537 &lustre_attr_grace_delay.attr,
2538 &lustre_attr_active_request_timeout.attr,
2539 &lustre_attr_max_requests.attr,
2540 &lustre_attr_default_archive_id.attr,
2541 &lustre_attr_remove_archive_on_last_unlink.attr,
2542 &lustre_attr_archive_count.attr,
2543 &lustre_attr_restore_count.attr,
2544 &lustre_attr_remove_count.attr,
2548 static void hsm_kobj_release(struct kobject *kobj)
2550 struct coordinator *cdt = container_of(kobj, struct coordinator,
2553 debugfs_remove_recursive(cdt->cdt_debugfs_dir);
2554 cdt->cdt_debugfs_dir = NULL;
2556 complete(&cdt->cdt_kobj_unregister);
2559 static struct kobj_type hsm_ktype = {
2560 .default_attrs = hsm_attrs,
2561 .sysfs_ops = &lustre_sysfs_ops,
2562 .release = hsm_kobj_release,
2566 * create sysfs entries for coordinator
2569 * \retval -ve failure
2571 int hsm_cdt_tunables_init(struct mdt_device *mdt)
2573 struct coordinator *cdt = &mdt->mdt_coordinator;
2574 struct obd_device *obd = mdt2obd_dev(mdt);
2577 init_completion(&cdt->cdt_kobj_unregister);
2578 rc = kobject_init_and_add(&cdt->cdt_hsm_kobj, &hsm_ktype,
2579 &obd->obd_kset.kobj, "%s", "hsm");
2581 kobject_put(&cdt->cdt_hsm_kobj);
2585 /* init debugfs entries, failure is not critical */
2586 cdt->cdt_debugfs_dir = debugfs_create_dir("hsm",
2587 obd->obd_debugfs_entry);
2588 ldebugfs_add_vars(cdt->cdt_debugfs_dir, ldebugfs_mdt_hsm_vars, mdt);
2594 * remove sysfs entries for coordinator
2598 void hsm_cdt_tunables_fini(struct mdt_device *mdt)
2600 struct coordinator *cdt = &mdt->mdt_coordinator;
2602 kobject_put(&cdt->cdt_hsm_kobj);
2603 wait_for_completion(&cdt->cdt_kobj_unregister);