4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/mgs/mgs_nids.c
34 * NID table management for lustre.
36 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
39 #define DEBUG_SUBSYSTEM S_MGS
40 #define D_MGS D_CONFIG
42 #include <linux/kthread.h>
43 #include <linux/pagemap.h>
46 #include <obd_class.h>
47 #include <lustre_disk.h>
49 #include "mgs_internal.h"
51 static time64_t ir_timeout;
53 static int nidtbl_is_sane(struct mgs_nidtbl *tbl)
55 struct mgs_nidtbl_target *tgt;
58 LASSERT(mutex_is_locked(&tbl->mn_lock));
59 list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
60 if (!tgt->mnt_version)
63 if (version >= tgt->mnt_version)
66 version = tgt->mnt_version;
72 * Fetch nidtbl entries whose version are not less than @version
73 * nidtbl entries will be packed in @pages by @unit_size units - entries
74 * shouldn't cross unit boundaries.
76 static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl,
77 struct mgs_config_res *res, struct page **pages,
78 int nrpages, int units_total, int unit_size)
80 struct mgs_nidtbl_target *tgt;
81 struct mgs_nidtbl_entry *entry;
82 struct mgs_nidtbl_entry *last_in_unit = NULL;
83 struct mgs_target_info *mti;
84 __u64 version = res->mcr_offset;
87 int bytes_in_unit = 0;
88 int units_in_page = 0;
93 /* make sure unit_size is power 2 */
94 LASSERT((unit_size & (unit_size - 1)) == 0);
95 LASSERT(nrpages << PAGE_SHIFT >= units_total * unit_size);
97 mutex_lock(&tbl->mn_lock);
98 LASSERT(nidtbl_is_sane(tbl));
100 /* no more entries ? */
101 if (version > tbl->mn_version) {
102 version = tbl->mn_version;
106 /* iterate over all targets to compose a bitmap by the type of llog.
107 * If the llog is for MDTs, llog entries for OSTs will be returned;
108 * otherwise, it's for clients, then llog entries for both OSTs and
109 * MDTs will be returned.
111 list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
112 int entry_len = sizeof(*entry);
114 if (tgt->mnt_version < version)
117 /* write target recover information */
119 LASSERT(mti->mti_nid_count < MTI_NIDS_MAX);
120 entry_len += mti->mti_nid_count * sizeof(lnet_nid_t);
122 if (entry_len > unit_size) {
123 CWARN("nidtbl: too large entry: entry length %d,"
124 "unit size: %d\n", entry_len, unit_size);
125 GOTO(out, rc = -EOVERFLOW);
128 if (bytes_in_unit < entry_len) {
129 if (units_total == 0) {
134 /* check if we need to consume remaining bytes. */
135 if (last_in_unit != NULL && bytes_in_unit) {
136 last_in_unit->mne_length += bytes_in_unit;
138 buf += bytes_in_unit;
141 LASSERT((rc & (unit_size - 1)) == 0);
143 if (units_in_page == 0) {
144 /* allocate a new page */
145 pages[index] = alloc_page(GFP_KERNEL);
146 if (pages[index] == NULL) {
151 /* destroy previous map */
153 kunmap(pages[index - 1]);
155 /* reassign buffer */
156 buf = kmap(pages[index]);
159 units_in_page = PAGE_SIZE / unit_size;
160 LASSERT(units_in_page > 0);
163 /* allocate an unit */
164 LASSERT(((long)buf & (unit_size - 1)) == 0);
165 bytes_in_unit = unit_size;
171 entry = (struct mgs_nidtbl_entry *)buf;
172 entry->mne_version = tgt->mnt_version;
173 entry->mne_instance = mti->mti_instance;
174 entry->mne_index = mti->mti_stripe_index;
175 entry->mne_length = entry_len;
176 entry->mne_type = tgt->mnt_type;
177 entry->mne_nid_type = 0;
178 entry->mne_nid_size = sizeof(lnet_nid_t);
179 entry->mne_nid_count = mti->mti_nid_count;
180 memcpy(entry->u.nids, mti->mti_nids,
181 mti->mti_nid_count * sizeof(lnet_nid_t));
183 version = tgt->mnt_version;
187 bytes_in_unit -= entry_len;
188 last_in_unit = entry;
190 CDEBUG(D_MGS, "fsname %s, entry size %d, pages %d/%d/%d/%d.\n",
191 tbl->mn_fsdb->fsdb_name, entry_len,
192 bytes_in_unit, index, nrpages, units_total);
195 kunmap(pages[index - 1]);
197 LASSERT(version <= tbl->mn_version);
198 res->mcr_size = tbl->mn_version;
199 res->mcr_offset = nobuf ? version : tbl->mn_version;
200 mutex_unlock(&tbl->mn_lock);
201 LASSERT(ergo(version == 1, rc == 0)); /* get the log first time */
203 CDEBUG(D_MGS, "Read IR logs %s return with %d, version %llu\n",
204 tbl->mn_fsdb->fsdb_name, rc, version);
208 static int nidtbl_update_version(const struct lu_env *env,
209 struct mgs_device *mgs,
210 struct mgs_nidtbl *tbl)
212 struct dt_object *fsdb;
215 struct lu_buf buf = {
217 .lb_len = sizeof(version)
223 if (mgs->mgs_bottom->dd_rdonly)
226 LASSERT(mutex_is_locked(&tbl->mn_lock));
228 fsdb = local_file_find_or_create(env, mgs->mgs_los, mgs->mgs_nidtbl_dir,
229 tbl->mn_fsdb->fsdb_name,
230 S_IFREG | S_IRUGO | S_IWUSR);
232 RETURN(PTR_ERR(fsdb));
234 th = dt_trans_create(env, mgs->mgs_bottom);
236 GOTO(out_put, rc = PTR_ERR(th));
238 th->th_sync = 1; /* update table synchronously */
239 rc = dt_declare_record_write(env, fsdb, &buf, off, th);
243 rc = dt_trans_start_local(env, mgs->mgs_bottom, th);
247 version = cpu_to_le64(tbl->mn_version);
248 rc = dt_record_write(env, fsdb, &buf, &off, th);
251 dt_trans_stop(env, mgs->mgs_bottom, th);
253 dt_object_put(env, fsdb);
257 #define MGS_NIDTBL_VERSION_INIT 2
259 static int nidtbl_read_version(const struct lu_env *env,
260 struct mgs_device *mgs, struct mgs_nidtbl *tbl,
263 struct dt_object *fsdb;
266 struct lu_buf buf = {
268 .lb_len = sizeof(tmpver)
274 LASSERT(mutex_is_locked(&tbl->mn_lock));
276 LASSERT(mgs->mgs_nidtbl_dir);
277 rc = dt_lookup_dir(env, mgs->mgs_nidtbl_dir, tbl->mn_fsdb->fsdb_name,
280 *version = MGS_NIDTBL_VERSION_INIT;
286 fsdb = dt_locate_at(env, mgs->mgs_bottom, &fid,
287 &mgs->mgs_dt_dev.dd_lu_dev, NULL);
289 RETURN(PTR_ERR(fsdb));
291 rc = dt_read(env, fsdb, &buf, &off);
292 if (rc == buf.lb_len) {
293 *version = le64_to_cpu(tmpver);
295 } else if (rc == 0) {
296 *version = MGS_NIDTBL_VERSION_INIT;
298 CERROR("%s: read version file %s error %d\n",
299 mgs->mgs_obd->obd_name, tbl->mn_fsdb->fsdb_name, rc);
301 dt_object_put(env, fsdb);
305 static int mgs_nidtbl_write(const struct lu_env *env, struct fs_db *fsdb,
306 struct mgs_target_info *mti)
308 struct mgs_nidtbl *tbl;
309 struct mgs_nidtbl_target *tgt;
311 int type = mti->mti_flags & LDD_F_SV_TYPE_MASK;
315 type &= ~LDD_F_SV_TYPE_MGS;
318 tbl = &fsdb->fsdb_nidtbl;
319 mutex_lock(&tbl->mn_lock);
320 list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
321 struct mgs_target_info *info = &tgt->mnt_mti;
323 if (type == tgt->mnt_type &&
324 mti->mti_stripe_index == info->mti_stripe_index) {
332 GOTO(out, rc = -ENOMEM);
334 INIT_LIST_HEAD(&tgt->mnt_list);
336 tgt->mnt_version = 0; /* 0 means invalid */
337 tgt->mnt_type = type;
339 ++tbl->mn_nr_targets;
342 tgt->mnt_version = ++tbl->mn_version;
345 list_move_tail(&tgt->mnt_list, &tbl->mn_targets);
347 rc = nidtbl_update_version(env, fsdb->fsdb_mgs, tbl);
351 mutex_unlock(&tbl->mn_lock);
353 CERROR("Write NID table version for file system %s error %d\n",
354 fsdb->fsdb_name, rc);
358 static void mgs_nidtbl_fini_fs(struct fs_db *fsdb)
360 struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
361 struct list_head head = LIST_HEAD_INIT(head);
363 mutex_lock(&tbl->mn_lock);
364 tbl->mn_nr_targets = 0;
365 list_splice_init(&tbl->mn_targets, &head);
366 mutex_unlock(&tbl->mn_lock);
368 while (!list_empty(&head)) {
369 struct mgs_nidtbl_target *tgt;
370 tgt = list_entry(head.next, struct mgs_nidtbl_target, mnt_list);
371 list_del(&tgt->mnt_list);
376 static int mgs_nidtbl_init_fs(const struct lu_env *env, struct fs_db *fsdb)
378 struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
381 INIT_LIST_HEAD(&tbl->mn_targets);
382 mutex_init(&tbl->mn_lock);
383 tbl->mn_nr_targets = 0;
385 mutex_lock(&tbl->mn_lock);
386 rc = nidtbl_read_version(env, fsdb->fsdb_mgs, tbl, &tbl->mn_version);
387 mutex_unlock(&tbl->mn_lock);
389 CERROR("%s: IR: failed to read current version, rc = %d\n",
390 fsdb->fsdb_mgs->mgs_obd->obd_name, rc);
392 CDEBUG(D_MGS, "IR: current version is %llu\n",
398 /* --------- Imperative Recovery relies on nidtbl stuff ------- */
399 void mgs_ir_notify_complete(struct fs_db *fsdb)
401 struct timespec64 ts;
404 atomic_set(&fsdb->fsdb_notify_phase, 0);
407 fsdb->fsdb_notify_count++;
408 delta = ktime_sub(ktime_get(), fsdb->fsdb_notify_start);
409 fsdb->fsdb_notify_total = ktime_add(fsdb->fsdb_notify_total, delta);
410 if (ktime_after(delta, fsdb->fsdb_notify_max))
411 fsdb->fsdb_notify_max = delta;
413 ts = ktime_to_timespec64(fsdb->fsdb_notify_max);
414 CDEBUG(D_MGS, "Revoke recover lock of %s completed after %lld.%09lds\n",
415 fsdb->fsdb_name, (s64)ts.tv_sec, ts.tv_nsec);
418 static int mgs_ir_notify(void *arg)
420 struct fs_db *fsdb = arg;
421 struct ldlm_res_id resid;
422 char name[sizeof(fsdb->fsdb_name) + 16];
424 LASSERTF(sizeof(name) < 40, "name is too large to be in stack.\n");
426 snprintf(name, sizeof(name) - 1, "mgs_%s_notify", fsdb->fsdb_name);
427 complete(&fsdb->fsdb_notify_comp);
428 set_user_nice(current, -2);
429 mgc_fsname2resid(fsdb->fsdb_name, &resid, CONFIG_T_RECOVER);
431 struct l_wait_info lwi = { 0 };
433 l_wait_event(fsdb->fsdb_notify_waitq,
434 fsdb->fsdb_notify_stop ||
435 atomic_read(&fsdb->fsdb_notify_phase),
437 if (fsdb->fsdb_notify_stop)
440 CDEBUG(D_MGS, "%s woken up, phase is %d\n",
441 name, atomic_read(&fsdb->fsdb_notify_phase));
443 fsdb->fsdb_notify_start = ktime_get();
444 mgs_revoke_lock(fsdb->fsdb_mgs, fsdb, CONFIG_T_RECOVER);
447 complete(&fsdb->fsdb_notify_comp);
451 int mgs_ir_init_fs(const struct lu_env *env, struct mgs_device *mgs,
454 struct task_struct *task;
457 ir_timeout = (time64_t)OBD_IR_MGS_TIMEOUT;
459 fsdb->fsdb_ir_state = IR_FULL;
460 if (mgs->mgs_start_time + ir_timeout > ktime_get_real_seconds())
461 fsdb->fsdb_ir_state = IR_STARTUP;
462 fsdb->fsdb_nonir_clients = 0;
463 /* start notify thread */
464 fsdb->fsdb_mgs = mgs;
465 task = kthread_run(mgs_ir_notify, fsdb,
466 "mgs_%s_notify", fsdb->fsdb_name);
468 wait_for_completion(&fsdb->fsdb_notify_comp);
470 CERROR("Start notify thread error %ld\n", PTR_ERR(task));
472 mgs_nidtbl_init_fs(env, fsdb);
476 void mgs_ir_fini_fs(struct mgs_device *mgs, struct fs_db *fsdb)
478 if (test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags))
481 mgs_fsc_cleanup_by_fsdb(fsdb);
483 mgs_nidtbl_fini_fs(fsdb);
485 LASSERT(list_empty(&fsdb->fsdb_clients));
487 fsdb->fsdb_notify_stop = 1;
488 wake_up(&fsdb->fsdb_notify_waitq);
489 wait_for_completion(&fsdb->fsdb_notify_comp);
492 /* caller must have held fsdb_mutex */
493 static inline void ir_state_graduate(struct fs_db *fsdb)
495 if (fsdb->fsdb_ir_state == IR_STARTUP) {
496 if (ktime_get_real_seconds() >
497 fsdb->fsdb_mgs->mgs_start_time + ir_timeout) {
498 fsdb->fsdb_ir_state = IR_FULL;
499 if (fsdb->fsdb_nonir_clients)
500 fsdb->fsdb_ir_state = IR_PARTIAL;
505 int mgs_ir_update(const struct lu_env *env, struct mgs_device *mgs,
506 struct mgs_target_info *mti)
512 if (mti->mti_instance == 0)
515 rc = mgs_find_or_make_fsdb(env, mgs, mti->mti_fsname, &fsdb);
519 rc = mgs_nidtbl_write(env, fsdb, mti);
524 mutex_lock(&fsdb->fsdb_mutex);
525 ir_state_graduate(fsdb);
526 switch (fsdb->fsdb_ir_state) {
528 mti->mti_flags |= LDD_F_IR_CAPABLE;
538 mutex_unlock(&fsdb->fsdb_mutex);
540 LASSERT(ergo(mti->mti_flags & LDD_F_IR_CAPABLE, notify));
542 CDEBUG(D_MGS, "Try to revoke recover lock of %s\n",
544 atomic_inc(&fsdb->fsdb_notify_phase);
545 wake_up(&fsdb->fsdb_notify_waitq);
549 mgs_put_fsdb(mgs, fsdb);
553 /* NID table can be cached by two entities: Clients and MDTs */
559 static int delogname(char *logname, char *fsname, int *typ)
565 ptr = strrchr(logname, '-');
569 /* decouple file system name. The llog name may be:
570 * - "prefix-fsname", prefix is "cliir" or "mdtir"
572 if (strncmp(ptr, "-mdtir", 6) == 0)
574 else if (strncmp(ptr, "-cliir", 6) == 0)
583 memcpy(fsname, logname, len);
590 int mgs_get_ir_logs(struct ptlrpc_request *req)
592 struct lu_env *env = req->rq_svc_thread->t_env;
593 struct mgs_device *mgs = exp2mgs_dev(req->rq_export);
594 struct fs_db *fsdb = NULL;
595 struct mgs_config_body *body;
596 struct mgs_config_res *res;
597 struct ptlrpc_bulk_desc *desc;
598 struct l_wait_info lwi;
608 struct page **pages = NULL;
611 body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
615 if (body->mcb_type != CONFIG_T_RECOVER)
618 rc = delogname(body->mcb_name, fsname, &type);
622 bufsize = body->mcb_units << body->mcb_bits;
623 nrpages = (bufsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
624 if (nrpages > PTLRPC_MAX_BRW_PAGES)
627 rc = mgs_find_or_make_fsdb(env, mgs, fsname, &fsdb);
631 CDEBUG(D_MGS, "Reading IR log %s bufsize %ld.\n",
632 body->mcb_name, bufsize);
634 OBD_ALLOC(pages, sizeof(*pages) * nrpages);
636 GOTO(out, rc = -ENOMEM);
638 res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES);
640 GOTO(out, rc = -EINVAL);
642 res->mcr_offset = body->mcb_offset;
643 unit_size = min_t(int, 1 << body->mcb_bits, PAGE_SIZE);
644 bytes = mgs_nidtbl_read(req->rq_export, &fsdb->fsdb_nidtbl, res,
645 pages, nrpages, bufsize / unit_size, unit_size);
647 GOTO(out, rc = bytes);
649 /* start bulk transfer */
650 page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
651 LASSERT(page_count <= nrpages);
652 desc = ptlrpc_prep_bulk_exp(req, page_count, 1,
653 PTLRPC_BULK_PUT_SOURCE |
654 PTLRPC_BULK_BUF_KIOV,
656 &ptlrpc_bulk_kiov_pin_ops);
658 GOTO(out, rc = -ENOMEM);
660 for (i = 0; i < page_count && bytes > 0; i++) {
661 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
667 rc = target_bulk_io(req->rq_export, desc, &lwi);
668 ptlrpc_free_bulk(desc);
674 for (i = 0; i < nrpages; i++) {
678 __free_page(pages[i]);
681 OBD_FREE(pages, sizeof(*pages) * nrpages);
685 mgs_put_fsdb(mgs, fsdb);
690 static int lprocfs_ir_set_state(struct fs_db *fsdb, const char *buf)
692 const char *strings[] = IR_STRINGS;
696 for (i = 0; i < ARRAY_SIZE(strings); i++) {
697 if (strcmp(strings[i], buf) == 0) {
705 CDEBUG(D_MGS, "change fsr state of %s from %s to %s\n",
706 fsdb->fsdb_name, strings[fsdb->fsdb_ir_state], strings[state]);
707 mutex_lock(&fsdb->fsdb_mutex);
708 if (state == IR_FULL && fsdb->fsdb_nonir_clients)
710 fsdb->fsdb_ir_state = state;
711 mutex_unlock(&fsdb->fsdb_mutex);
716 static int lprocfs_ir_set_timeout(struct fs_db *fsdb, const char *buf)
721 static int lprocfs_ir_clear_stats(struct fs_db *fsdb, const char *buf)
726 fsdb->fsdb_notify_total = ktime_set(0, 0);
727 fsdb->fsdb_notify_max = ktime_set(0, 0);
728 fsdb->fsdb_notify_count = 0;
732 static struct lproc_ir_cmd {
735 int (*handler)(struct fs_db *, const char *);
737 { "state=", 6, lprocfs_ir_set_state },
738 { "timeout=", 8, lprocfs_ir_set_timeout },
739 { "0", 1, lprocfs_ir_clear_stats }
742 int lprocfs_wr_ir_state(struct file *file, const char __user *buffer,
743 size_t count, void *data)
745 struct fs_db *fsdb = data;
750 if (count == 0 || count >= PAGE_SIZE)
753 OBD_ALLOC(kbuf, count + 1);
757 if (copy_from_user(kbuf, buffer, count)) {
758 OBD_FREE(kbuf, count + 1);
762 kbuf[count] = 0; /* buffer is supposed to end with 0 */
763 if (kbuf[count - 1] == '\n')
767 /* fsname=<file system name> must be the 1st entry */
768 while (ptr != NULL) {
772 tmpptr = strchr(ptr, ';');
777 for (i = 0; i < ARRAY_SIZE(ir_cmds); i++) {
778 struct lproc_ir_cmd *cmd;
782 cmdlen = cmd->namelen;
783 if (strncmp(cmd->name, ptr, cmdlen) == 0) {
785 rc = cmd->handler(fsdb, ptr);
795 CERROR("Unable to process command: %s(%d)\n", ptr, rc);
796 OBD_FREE(kbuf, count + 1);
800 int lprocfs_rd_ir_state(struct seq_file *seq, void *data)
802 struct fs_db *fsdb = data;
803 struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
804 const char *ir_strings[] = IR_STRINGS;
805 struct timespec64 ts_max;
806 struct timespec64 ts;
808 /* mgs_live_seq_show() already holds fsdb_mutex. */
809 ir_state_graduate(fsdb);
811 seq_printf(seq, "\nimperative_recovery_state:\n");
814 " nonir_clients: %d\n"
815 " nidtbl_version: %lld\n",
816 ir_strings[fsdb->fsdb_ir_state], fsdb->fsdb_nonir_clients,
819 ts = ktime_to_timespec64(fsdb->fsdb_notify_total);
820 ts_max = ktime_to_timespec64(fsdb->fsdb_notify_max);
822 seq_printf(seq, " notify_duration_total: %lld.%09ld\n"
823 " notify_duation_max: %lld.%09ld\n"
824 " notify_count: %u\n",
825 (s64)ts.tv_sec, ts.tv_nsec,
826 (s64)ts_max.tv_sec, ts_max.tv_nsec,
827 fsdb->fsdb_notify_count);
832 int lprocfs_ir_timeout_seq_show(struct seq_file *m, void *data)
834 return lprocfs_u64_seq_show(m, &ir_timeout);
837 ssize_t lprocfs_ir_timeout_seq_write(struct file *file,
838 const char __user *buffer,
839 size_t count, loff_t *off)
841 return lprocfs_wr_uint(file, buffer, count, &ir_timeout);
844 /* --------------- Handle non IR support clients --------------- */
845 /* attach a lustre file system to an export */
846 int mgs_fsc_attach(const struct lu_env *env, struct obd_export *exp,
849 struct mgs_export_data *data = &exp->u.eu_mgs_data;
850 struct mgs_device *mgs = exp2mgs_dev(exp);
851 struct fs_db *fsdb = NULL;
852 struct mgs_fsc *fsc = NULL;
853 struct mgs_fsc *new_fsc = NULL;
858 rc = mgs_find_or_make_fsdb(env, mgs, fsname, &fsdb);
862 /* allocate a new fsc in case we need it in spinlock. */
863 OBD_ALLOC_PTR(new_fsc);
865 GOTO(out, rc = -ENOMEM);
867 INIT_LIST_HEAD(&new_fsc->mfc_export_list);
868 INIT_LIST_HEAD(&new_fsc->mfc_fsdb_list);
869 new_fsc->mfc_fsdb = fsdb;
870 new_fsc->mfc_export = class_export_get(exp);
871 new_fsc->mfc_ir_capable = !!(exp_connect_flags(exp) &
872 OBD_CONNECT_IMP_RECOV);
875 mutex_lock(&fsdb->fsdb_mutex);
877 /* tend to find it in export list because this list is shorter. */
878 spin_lock(&data->med_lock);
879 list_for_each_entry(fsc, &data->med_clients, mfc_export_list) {
880 if (strcmp(fsname, fsc->mfc_fsdb->fsdb_name) == 0) {
889 /* add it into export list. */
890 list_add(&fsc->mfc_export_list, &data->med_clients);
892 /* add into fsdb list. */
893 list_add(&fsc->mfc_fsdb_list, &fsdb->fsdb_clients);
894 if (!fsc->mfc_ir_capable) {
895 ++fsdb->fsdb_nonir_clients;
896 if (fsdb->fsdb_ir_state == IR_FULL)
897 fsdb->fsdb_ir_state = IR_PARTIAL;
901 spin_unlock(&data->med_lock);
902 mutex_unlock(&fsdb->fsdb_mutex);
905 class_export_put(new_fsc->mfc_export);
906 OBD_FREE_PTR(new_fsc);
910 mgs_put_fsdb(mgs, fsdb);
914 void mgs_fsc_cleanup(struct obd_export *exp)
916 struct mgs_export_data *data = &exp->u.eu_mgs_data;
917 struct mgs_fsc *fsc, *tmp;
918 struct list_head head = LIST_HEAD_INIT(head);
920 spin_lock(&data->med_lock);
921 list_splice_init(&data->med_clients, &head);
922 spin_unlock(&data->med_lock);
924 list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
925 struct fs_db *fsdb = fsc->mfc_fsdb;
927 LASSERT(fsc->mfc_export == exp);
929 mutex_lock(&fsdb->fsdb_mutex);
930 list_del_init(&fsc->mfc_fsdb_list);
931 if (fsc->mfc_ir_capable == 0) {
932 --fsdb->fsdb_nonir_clients;
933 LASSERT(fsdb->fsdb_ir_state != IR_FULL);
934 if (fsdb->fsdb_nonir_clients == 0 &&
935 fsdb->fsdb_ir_state == IR_PARTIAL)
936 fsdb->fsdb_ir_state = IR_FULL;
938 mutex_unlock(&fsdb->fsdb_mutex);
939 list_del_init(&fsc->mfc_export_list);
940 class_export_put(fsc->mfc_export);
945 /* must be called with fsdb->fsdb_mutex held */
946 void mgs_fsc_cleanup_by_fsdb(struct fs_db *fsdb)
948 struct mgs_fsc *fsc, *tmp;
950 list_for_each_entry_safe(fsc, tmp, &fsdb->fsdb_clients,
952 struct mgs_export_data *data = &fsc->mfc_export->u.eu_mgs_data;
954 LASSERT(fsdb == fsc->mfc_fsdb);
955 list_del_init(&fsc->mfc_fsdb_list);
957 spin_lock(&data->med_lock);
958 list_del_init(&fsc->mfc_export_list);
959 spin_unlock(&data->med_lock);
960 class_export_put(fsc->mfc_export);
964 fsdb->fsdb_nonir_clients = 0;
965 if (fsdb->fsdb_ir_state == IR_PARTIAL)
966 fsdb->fsdb_ir_state = IR_FULL;