1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mgs/mgs_nids.c
38 * NID table management for lustre.
40 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
46 #define DEBUG_SUBSYSTEM S_MGS
47 #define D_MGS D_CONFIG
50 #include <linux/module.h>
51 #include <linux/pagemap.h>
57 #include <obd_class.h>
58 #include <lustre_log.h>
60 #include <libcfs/list.h>
61 #include <linux/lvfs.h>
62 #include <lustre_fsfilt.h>
63 #include <lustre_disk.h>
64 #include <lustre_param.h>
65 #include "mgs_internal.h"
67 static unsigned int ir_timeout;
69 static int nidtbl_is_sane(struct mgs_nidtbl *tbl)
71 struct mgs_nidtbl_target *tgt;
74 LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
75 cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
76 if (!tgt->mnt_version)
79 if (version >= tgt->mnt_version)
82 version = tgt->mnt_version;
88 * Fetch nidtbl entries whose version are not less than @version
89 * nidtbl entries will be packed in @pages by @unit_size units - entries
90 * shouldn't cross unit boundaries.
92 static int mgs_nidtbl_read(struct obd_device *unused, struct mgs_nidtbl *tbl,
93 struct mgs_config_res *res, cfs_page_t **pages,
94 int nrpages, int units_total, int unit_size)
96 struct mgs_nidtbl_target *tgt;
97 struct mgs_nidtbl_entry *entry;
98 struct mgs_nidtbl_entry *last_in_unit = NULL;
99 struct mgs_target_info *mti;
100 __u64 version = res->mcr_offset;
103 int bytes_in_unit = 0;
104 int units_in_page = 0;
109 /* make sure unit_size is power 2 */
110 LASSERT((unit_size & (unit_size - 1)) == 0);
111 LASSERT(nrpages << CFS_PAGE_SHIFT >= units_total * unit_size);
113 cfs_mutex_lock(&tbl->mn_lock);
114 LASSERT(nidtbl_is_sane(tbl));
116 /* no more entries ? */
117 if (version > tbl->mn_version) {
118 version = tbl->mn_version;
122 /* iterate over all targets to compose a bitmap by the type of llog.
123 * If the llog is for MDTs, llog entries for OSTs will be returned;
124 * otherwise, it's for clients, then llog entries for both OSTs and
125 * MDTs will be returned.
127 cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
128 int entry_len = sizeof(*entry);
130 if (tgt->mnt_version < version)
133 /* write target recover information */
135 LASSERT(mti->mti_nid_count < MTI_NIDS_MAX);
136 entry_len += mti->mti_nid_count * sizeof(lnet_nid_t);
138 if (entry_len > unit_size) {
139 CWARN("nidtbl: too large entry: entry length %d,"
140 "unit size: %d\n", entry_len, unit_size);
141 GOTO(out, rc = -EOVERFLOW);
144 if (bytes_in_unit < entry_len) {
145 if (units_total == 0) {
150 /* check if we need to consume remaining bytes. */
151 if (last_in_unit != NULL && bytes_in_unit) {
152 /* entry has been swapped. */
153 __swab32s(&last_in_unit->mne_length);
154 last_in_unit->mne_length += bytes_in_unit;
155 __swab32s(&last_in_unit->mne_length);
157 buf += bytes_in_unit;
160 LASSERT((rc & (unit_size - 1)) == 0);
162 if (units_in_page == 0) {
163 /* allocate a new page */
164 pages[index] = cfs_alloc_page(CFS_ALLOC_STD);
165 if (pages[index] == NULL) {
170 /* destroy previous map */
172 cfs_kunmap(pages[index - 1]);
174 /* reassign buffer */
175 buf = cfs_kmap(pages[index]);
178 units_in_page = CFS_PAGE_SIZE / unit_size;
179 LASSERT(units_in_page > 0);
182 /* allocate an unit */
183 LASSERT(((long)buf & (unit_size - 1)) == 0);
184 bytes_in_unit = unit_size;
190 entry = (struct mgs_nidtbl_entry *)buf;
191 entry->mne_version = tgt->mnt_version;
192 entry->mne_instance = mti->mti_instance;
193 entry->mne_index = mti->mti_stripe_index;
194 entry->mne_length = entry_len;
195 entry->mne_type = tgt->mnt_type;
196 entry->mne_nid_type = 0;
197 entry->mne_nid_size = sizeof(lnet_nid_t);
198 entry->mne_nid_count = mti->mti_nid_count;
199 memcpy(entry->u.nids, mti->mti_nids,
200 mti->mti_nid_count * sizeof(lnet_nid_t));
201 lustre_swab_mgs_nidtbl_entry(entry);
203 version = tgt->mnt_version;
207 bytes_in_unit -= entry_len;
208 last_in_unit = entry;
210 CDEBUG(D_MGS, "fsname %s, entry size %d, pages %d/%d/%d/%d.\n",
211 tbl->mn_fsdb->fsdb_name, entry_len,
212 bytes_in_unit, index, nrpages, units_total);
215 cfs_kunmap(pages[index - 1]);
217 LASSERT(version <= tbl->mn_version);
218 res->mcr_size = tbl->mn_version;
219 res->mcr_offset = nobuf ? version : tbl->mn_version;
220 cfs_mutex_unlock(&tbl->mn_lock);
221 LASSERT(ergo(version == 1, rc == 0)); /* get the log first time */
223 CDEBUG(D_MGS, "Read IR logs %s return with %d, version %llu\n",
224 tbl->mn_fsdb->fsdb_name, rc, version);
228 static int nidtbl_update_version(struct obd_device *obd, struct mgs_nidtbl *tbl)
230 struct lvfs_run_ctxt saved;
231 struct file *file = NULL;
232 char filename[sizeof(MGS_NIDTBL_DIR) + 9];
238 LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
239 LASSERT(sizeof(filename) < 32);
241 sprintf(filename, "%s/%s",
242 MGS_NIDTBL_DIR, tbl->mn_fsdb->fsdb_name);
244 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
246 file = l_filp_open(filename, O_RDWR|O_CREAT, 0660);
248 version = cpu_to_le64(tbl->mn_version);
249 rc = lustre_fwrite(file, &version, sizeof(version), &off);
250 if (rc == sizeof(version))
253 fsfilt_sync(obd, obd->u.mgs.mgs_sb);
258 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
262 #define MGS_NIDTBL_VERSION_INIT 2
264 static int nidtbl_read_version(struct obd_device *obd, struct mgs_nidtbl *tbl)
266 struct lvfs_run_ctxt saved;
267 struct file *file = NULL;
268 char filename[sizeof(MGS_NIDTBL_DIR) + 9];
274 LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
275 LASSERT(sizeof(filename) < 32);
277 sprintf(filename, "%s/%s",
278 MGS_NIDTBL_DIR, tbl->mn_fsdb->fsdb_name);
280 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
282 file = l_filp_open(filename, O_RDONLY, 0);
284 rc = lustre_fread(file, &version, sizeof(version), &off);
285 if (rc == sizeof(version))
286 rc = cpu_to_le64(version);
288 rc = MGS_NIDTBL_VERSION_INIT;
290 CERROR("read version file %s error %d\n", filename, rc);
295 rc = MGS_NIDTBL_VERSION_INIT;
298 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
302 static int mgs_nidtbl_write(struct fs_db *fsdb, struct mgs_target_info *mti)
304 struct mgs_nidtbl *tbl;
305 struct mgs_nidtbl_target *tgt;
307 int type = mti->mti_flags & LDD_F_SV_TYPE_MASK;
311 type &= ~LDD_F_SV_TYPE_MGS;
314 tbl = &fsdb->fsdb_nidtbl;
315 cfs_mutex_lock(&tbl->mn_lock);
316 cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
317 struct mgs_target_info *info = &tgt->mnt_mti;
318 if (type == tgt->mnt_type &&
319 mti->mti_stripe_index == info->mti_stripe_index) {
327 GOTO(out, rc = -ENOMEM);
329 CFS_INIT_LIST_HEAD(&tgt->mnt_list);
331 tgt->mnt_version = 0; /* 0 means invalid */
332 tgt->mnt_type = type;
334 ++tbl->mn_nr_targets;
337 tgt->mnt_version = ++tbl->mn_version;
340 cfs_list_move_tail(&tgt->mnt_list, &tbl->mn_targets);
342 rc = nidtbl_update_version(fsdb->fsdb_obd, tbl);
346 cfs_mutex_unlock(&tbl->mn_lock);
348 CERROR("Write NID table version for file system %s error %d\n",
349 fsdb->fsdb_name, rc);
353 static void mgs_nidtbl_fini_fs(struct fs_db *fsdb)
355 struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
358 cfs_mutex_lock(&tbl->mn_lock);
359 tbl->mn_nr_targets = 0;
360 cfs_list_splice_init(&tbl->mn_targets, &head);
361 cfs_mutex_unlock(&tbl->mn_lock);
363 while (!cfs_list_empty(&head)) {
364 struct mgs_nidtbl_target *tgt;
365 tgt = list_entry(head.next, struct mgs_nidtbl_target, mnt_list);
366 cfs_list_del(&tgt->mnt_list);
371 static int mgs_nidtbl_init_fs(struct fs_db *fsdb)
373 struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
375 CFS_INIT_LIST_HEAD(&tbl->mn_targets);
376 cfs_mutex_init(&tbl->mn_lock);
377 tbl->mn_nr_targets = 0;
379 cfs_mutex_lock(&tbl->mn_lock);
380 tbl->mn_version = nidtbl_read_version(fsdb->fsdb_obd, tbl);
381 cfs_mutex_unlock(&tbl->mn_lock);
382 CDEBUG(D_MGS, "IR: current version is %llu\n", tbl->mn_version);
387 /* --------- Imperative Recovery relies on nidtbl stuff ------- */
388 void mgs_ir_notify_complete(struct fs_db *fsdb)
391 cfs_duration_t delta;
393 cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
396 fsdb->fsdb_notify_count++;
397 delta = cfs_time_sub(cfs_time_current(), fsdb->fsdb_notify_start);
398 fsdb->fsdb_notify_total += delta;
399 if (delta > fsdb->fsdb_notify_max)
400 fsdb->fsdb_notify_max = delta;
402 cfs_duration_usec(delta, &tv);
403 CDEBUG(D_MGS, "Revoke recover lock of %s completed after %ld.%06lds\n",
404 fsdb->fsdb_name, tv.tv_sec, tv.tv_usec);
407 static int mgs_ir_notify(void *arg)
409 struct fs_db *fsdb = arg;
410 struct ldlm_res_id resid;
412 char name[sizeof(fsdb->fsdb_name) + 20];
414 LASSERTF(sizeof(name) < 32, "name is too large to be in stack.\n");
415 sprintf(name, "mgs_%s_notify", fsdb->fsdb_name);
418 cfs_complete(&fsdb->fsdb_notify_comp);
420 set_user_nice(current, -2);
422 mgc_fsname2resid(fsdb->fsdb_name, &resid, CONFIG_T_RECOVER);
424 struct l_wait_info lwi = { 0 };
426 l_wait_event(fsdb->fsdb_notify_waitq,
427 fsdb->fsdb_notify_stop ||
428 cfs_atomic_read(&fsdb->fsdb_notify_phase),
430 if (fsdb->fsdb_notify_stop)
433 CDEBUG(D_MGS, "%s woken up, phase is %d\n",
434 name, cfs_atomic_read(&fsdb->fsdb_notify_phase));
436 fsdb->fsdb_notify_start = cfs_time_current();
437 mgs_revoke_lock(fsdb->fsdb_obd, fsdb, CONFIG_T_RECOVER);
440 cfs_complete(&fsdb->fsdb_notify_comp);
444 int mgs_ir_init_fs(struct obd_device *obd, struct fs_db *fsdb)
446 struct mgs_obd *mgs = &obd->u.mgs;
450 ir_timeout = OBD_IR_MGS_TIMEOUT;
452 fsdb->fsdb_ir_state = IR_FULL;
453 if (cfs_time_before(cfs_time_current_sec(),
454 mgs->mgs_start_time + ir_timeout))
455 fsdb->fsdb_ir_state = IR_STARTUP;
456 fsdb->fsdb_nonir_clients = 0;
457 CFS_INIT_LIST_HEAD(&fsdb->fsdb_clients);
459 /* start notify thread */
460 fsdb->fsdb_obd = obd;
461 cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
462 cfs_waitq_init(&fsdb->fsdb_notify_waitq);
463 cfs_init_completion(&fsdb->fsdb_notify_comp);
464 rc = cfs_create_thread(mgs_ir_notify, fsdb, CFS_DAEMON_FLAGS);
466 cfs_wait_for_completion(&fsdb->fsdb_notify_comp);
468 CERROR("Start notify thread error %d\n", rc);
470 mgs_nidtbl_init_fs(fsdb);
474 void mgs_ir_fini_fs(struct obd_device *obd, struct fs_db *fsdb)
476 if (cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags))
479 mgs_fsc_cleanup_by_fsdb(fsdb);
481 mgs_nidtbl_fini_fs(fsdb);
483 LASSERT(cfs_list_empty(&fsdb->fsdb_clients));
485 fsdb->fsdb_notify_stop = 1;
486 cfs_waitq_signal(&fsdb->fsdb_notify_waitq);
487 cfs_wait_for_completion(&fsdb->fsdb_notify_comp);
490 /* caller must have held fsdb_sem */
491 static inline void ir_state_graduate(struct fs_db *fsdb)
493 struct mgs_obd *mgs = &fsdb->fsdb_obd->u.mgs;
495 if (fsdb->fsdb_ir_state == IR_STARTUP) {
496 if (cfs_time_before(mgs->mgs_start_time + ir_timeout,
497 cfs_time_current_sec())) {
498 fsdb->fsdb_ir_state = IR_FULL;
499 if (fsdb->fsdb_nonir_clients)
500 fsdb->fsdb_ir_state = IR_PARTIAL;
505 int mgs_ir_update(struct obd_device *obd, struct mgs_target_info *mti)
511 if (mti->mti_instance == 0)
514 rc = mgs_find_or_make_fsdb(obd, mti->mti_fsname, &fsdb);
518 rc = mgs_nidtbl_write(fsdb, mti);
523 cfs_down(&fsdb->fsdb_sem);
524 ir_state_graduate(fsdb);
525 switch (fsdb->fsdb_ir_state) {
527 mti->mti_flags |= LDD_F_IR_CAPABLE;
537 cfs_up(&fsdb->fsdb_sem);
539 LASSERT(ergo(mti->mti_flags & LDD_F_IR_CAPABLE, notify));
541 CDEBUG(D_MGS, "Try to revoke recover lock of %s\n",
543 cfs_atomic_inc(&fsdb->fsdb_notify_phase);
544 cfs_waitq_signal(&fsdb->fsdb_notify_waitq);
549 /* NID table can be cached by two entities: Clients and MDTs */
555 static int delogname(char *logname, char *fsname, int *typ)
561 ptr = strrchr(logname, '-');
565 /* decouple file system name. The llog name may be:
566 * - "prefix-fsname", prefix is "cliir" or "mdtir"
568 if (strncmp(ptr, "-mdtir", 6) == 0)
570 else if (strncmp(ptr, "-cliir", 6) == 0)
579 memcpy(fsname, logname, len);
586 int mgs_get_ir_logs(struct ptlrpc_request *req)
588 struct obd_device *obd = req->rq_export->exp_obd;
590 struct mgs_config_body *body;
591 struct mgs_config_res *res;
592 struct ptlrpc_bulk_desc *desc;
593 struct l_wait_info lwi;
604 cfs_page_t **pages = NULL;
607 body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
611 if (body->mcb_type != CONFIG_T_RECOVER)
614 rc = delogname(body->mcb_name, fsname, &type);
618 rc = mgs_find_or_make_fsdb(obd, fsname, &fsdb);
622 bufsize = body->mcb_units << body->mcb_bits;
623 nrpages = (bufsize + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
624 if (nrpages > PTLRPC_MAX_BRW_PAGES)
627 CDEBUG(D_MGS, "Reading IR log %s bufsize %ld.\n",
628 body->mcb_name, bufsize);
630 OBD_ALLOC(pages, sizeof(*pages) * nrpages);
634 rc = req_capsule_server_pack(&req->rq_pill);
638 res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES);
640 GOTO(out, rc = -EINVAL);
642 res->mcr_offset = body->mcb_offset;
643 unit_size = min_t(int, 1 << body->mcb_bits, CFS_PAGE_SIZE);
644 bytes = mgs_nidtbl_read(obd, &fsdb->fsdb_nidtbl, res, pages, nrpages,
645 bufsize / unit_size, unit_size);
647 GOTO(out, rc = bytes);
649 /* start bulk transfer */
650 page_count = (bytes + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
651 LASSERT(page_count <= nrpages);
652 desc = ptlrpc_prep_bulk_exp(req, page_count,
653 BULK_PUT_SOURCE, MGS_BULK_PORTAL);
655 GOTO(out, rc = -ENOMEM);
657 for (i = 0; i < page_count && bytes > 0; i++) {
658 ptlrpc_prep_bulk_page(desc, pages[i], 0,
659 min_t(int, bytes, CFS_PAGE_SIZE));
660 bytes -= CFS_PAGE_SIZE;
663 rc = target_bulk_io(req->rq_export, desc, &lwi);
664 ptlrpc_free_bulk(desc);
668 for (i = 0; i < nrpages; i++) {
669 if (pages[i] == NULL)
671 cfs_free_page(pages[i]);
673 OBD_FREE(pages, sizeof(*pages) * nrpages);
678 static int lprocfs_ir_set_state(struct fs_db *fsdb, const char *buf)
680 const char *strings[] = IR_STRINGS;
684 for (i = 0; i < ARRAY_SIZE(strings); i++) {
685 if (strcmp(strings[i], buf) == 0) {
693 CDEBUG(D_MGS, "change fsr state of %s from %s to %s\n",
694 fsdb->fsdb_name, strings[fsdb->fsdb_ir_state], strings[state]);
695 cfs_down(&fsdb->fsdb_sem);
696 if (state == IR_FULL && fsdb->fsdb_nonir_clients)
698 fsdb->fsdb_ir_state = state;
699 cfs_up(&fsdb->fsdb_sem);
704 static int lprocfs_ir_set_timeout(struct fs_db *fsdb, const char *buf)
709 static int lprocfs_ir_clear_stats(struct fs_db *fsdb, const char *buf)
714 fsdb->fsdb_notify_total = 0;
715 fsdb->fsdb_notify_max = 0;
716 fsdb->fsdb_notify_count = 0;
720 static struct lproc_ir_cmd {
723 int (*handler)(struct fs_db *, const char *);
725 { "state=", 6, lprocfs_ir_set_state },
726 { "timeout=", 8, lprocfs_ir_set_timeout },
727 { "0", 1, lprocfs_ir_clear_stats }
730 int lprocfs_wr_ir_state(struct file *file, const char *buffer,
731 unsigned long count, void *data)
733 struct fs_db *fsdb = data;
738 if (count > CFS_PAGE_SIZE)
741 OBD_ALLOC(kbuf, count + 1);
745 if (copy_from_user(kbuf, buffer, count)) {
746 OBD_FREE(kbuf, count);
750 kbuf[count] = 0; /* buffer is supposed to end with 0 */
751 if (kbuf[count - 1] == '\n')
755 /* fsname=<file system name> must be the 1st entry */
756 while (ptr != NULL) {
760 tmpptr = strchr(ptr, ';');
765 for (i = 0; i < ARRAY_SIZE(ir_cmds); i++) {
766 struct lproc_ir_cmd *cmd;
770 cmdlen = cmd->namelen;
771 if (strncmp(cmd->name, ptr, cmdlen) == 0) {
773 rc = cmd->handler(fsdb, ptr);
783 CERROR("Unable to process command: %s(%d)\n", ptr, rc);
784 OBD_FREE(kbuf, count + 1);
788 int lprocfs_rd_ir_state(struct seq_file *seq, void *data)
790 struct fs_db *fsdb = data;
791 struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
792 const char *ir_strings[] = IR_STRINGS;
793 struct timeval tv_max;
796 /* mgs_live_seq_show() already holds fsdb_sem. */
797 ir_state_graduate(fsdb);
799 seq_printf(seq, "\nimperative_recovery_state:\n");
802 " nonir_clients: %d\n"
803 " nidtbl_version: %lld\n",
804 ir_strings[fsdb->fsdb_ir_state], fsdb->fsdb_nonir_clients,
807 cfs_duration_usec(fsdb->fsdb_notify_total, &tv);
808 cfs_duration_usec(fsdb->fsdb_notify_max, &tv_max);
810 seq_printf(seq, " notify_duration_total: %lu.%06lu\n"
811 " notify_duation_max: %lu.%06lu\n"
812 " notify_count: %u\n",
813 tv.tv_sec, tv.tv_usec,
814 tv_max.tv_sec, tv_max.tv_usec,
815 fsdb->fsdb_notify_count);
820 int lprocfs_rd_ir_timeout(char *page, char **start, off_t off, int count,
821 int *eof, void *data)
824 return snprintf(page, count, "%d\n", ir_timeout);
827 int lprocfs_wr_ir_timeout(struct file *file, const char *buffer,
828 unsigned long count, void *data)
830 return lprocfs_wr_uint(file, buffer, count, &ir_timeout);
833 /* --------------- Handle non IR support clients --------------- */
834 /* attach a lustre file system to an export */
835 int mgs_fsc_attach(struct obd_export *exp, char *fsname)
837 struct mgs_export_data *data = &exp->u.eu_mgs_data;
838 struct obd_device *obd = exp->exp_obd;
840 struct mgs_fsc *fsc = NULL;
841 struct mgs_fsc *new_fsc = NULL;
846 rc = mgs_find_or_make_fsdb(obd, fsname, &fsdb);
850 /* allocate a new fsc in case we need it in spinlock. */
851 OBD_ALLOC_PTR(new_fsc);
855 CFS_INIT_LIST_HEAD(&new_fsc->mfc_export_list);
856 CFS_INIT_LIST_HEAD(&new_fsc->mfc_fsdb_list);
857 new_fsc->mfc_fsdb = fsdb;
858 new_fsc->mfc_export = class_export_get(exp);
859 new_fsc->mfc_ir_capable =
860 !!(exp->exp_connect_flags & OBD_CONNECT_IMP_RECOV);
863 cfs_down(&fsdb->fsdb_sem);
865 /* tend to find it in export list because this list is shorter. */
866 cfs_spin_lock(&data->med_lock);
867 cfs_list_for_each_entry(fsc, &data->med_clients, mfc_export_list) {
868 if (strcmp(fsname, fsc->mfc_fsdb->fsdb_name) == 0) {
877 /* add it into export list. */
878 cfs_list_add(&fsc->mfc_export_list, &data->med_clients);
880 /* add into fsdb list. */
881 cfs_list_add(&fsc->mfc_fsdb_list, &fsdb->fsdb_clients);
882 if (!fsc->mfc_ir_capable) {
883 ++fsdb->fsdb_nonir_clients;
884 if (fsdb->fsdb_ir_state == IR_FULL)
885 fsdb->fsdb_ir_state = IR_PARTIAL;
889 cfs_spin_unlock(&data->med_lock);
890 cfs_up(&fsdb->fsdb_sem);
893 class_export_put(new_fsc->mfc_export);
894 OBD_FREE_PTR(new_fsc);
899 void mgs_fsc_cleanup(struct obd_export *exp)
901 struct mgs_export_data *data = &exp->u.eu_mgs_data;
902 struct mgs_fsc *fsc, *tmp;
905 cfs_spin_lock(&data->med_lock);
906 cfs_list_splice_init(&data->med_clients, &head);
907 cfs_spin_unlock(&data->med_lock);
909 cfs_list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
910 struct fs_db *fsdb = fsc->mfc_fsdb;
912 LASSERT(fsc->mfc_export == exp);
914 cfs_down(&fsdb->fsdb_sem);
915 cfs_list_del_init(&fsc->mfc_fsdb_list);
916 if (fsc->mfc_ir_capable == 0) {
917 --fsdb->fsdb_nonir_clients;
918 LASSERT(fsdb->fsdb_ir_state != IR_FULL);
919 if (fsdb->fsdb_nonir_clients == 0 &&
920 fsdb->fsdb_ir_state == IR_PARTIAL)
921 fsdb->fsdb_ir_state = IR_FULL;
923 cfs_up(&fsdb->fsdb_sem);
924 cfs_list_del_init(&fsc->mfc_export_list);
925 class_export_put(fsc->mfc_export);
930 /* must be called with fsdb->fsdb_sem held */
931 void mgs_fsc_cleanup_by_fsdb(struct fs_db *fsdb)
933 struct mgs_fsc *fsc, *tmp;
935 cfs_list_for_each_entry_safe(fsc, tmp, &fsdb->fsdb_clients,
937 struct mgs_export_data *data = &fsc->mfc_export->u.eu_mgs_data;
939 LASSERT(fsdb == fsc->mfc_fsdb);
940 cfs_list_del_init(&fsc->mfc_fsdb_list);
942 cfs_spin_lock(&data->med_lock);
943 cfs_list_del_init(&fsc->mfc_export_list);
944 cfs_spin_unlock(&data->med_lock);
945 class_export_put(fsc->mfc_export);
949 fsdb->fsdb_nonir_clients = 0;
950 if (fsdb->fsdb_ir_state == IR_PARTIAL)
951 fsdb->fsdb_ir_state = IR_FULL;