4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mgs/mgs_nids.c
38 * NID table management for lustre.
40 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
43 #define DEBUG_SUBSYSTEM S_MGS
44 #define D_MGS D_CONFIG
47 #include <linux/pagemap.h>
51 #include <obd_class.h>
52 #include <lustre_disk.h>
54 #include "mgs_internal.h"
56 static unsigned int ir_timeout;
58 static int nidtbl_is_sane(struct mgs_nidtbl *tbl)
60 struct mgs_nidtbl_target *tgt;
63 LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
64 cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
65 if (!tgt->mnt_version)
68 if (version >= tgt->mnt_version)
71 version = tgt->mnt_version;
77 * Fetch nidtbl entries whose version are not less than @version
78 * nidtbl entries will be packed in @pages by @unit_size units - entries
79 * shouldn't cross unit boundaries.
81 static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl,
82 struct mgs_config_res *res, cfs_page_t **pages,
83 int nrpages, int units_total, int unit_size)
85 struct mgs_nidtbl_target *tgt;
86 struct mgs_nidtbl_entry *entry;
87 struct mgs_nidtbl_entry *last_in_unit = NULL;
88 struct mgs_target_info *mti;
89 __u64 version = res->mcr_offset;
92 int bytes_in_unit = 0;
93 int units_in_page = 0;
98 /* make sure unit_size is power 2 */
99 LASSERT((unit_size & (unit_size - 1)) == 0);
100 LASSERT(nrpages << CFS_PAGE_SHIFT >= units_total * unit_size);
102 cfs_mutex_lock(&tbl->mn_lock);
103 LASSERT(nidtbl_is_sane(tbl));
105 /* no more entries ? */
106 if (version > tbl->mn_version) {
107 version = tbl->mn_version;
111 /* iterate over all targets to compose a bitmap by the type of llog.
112 * If the llog is for MDTs, llog entries for OSTs will be returned;
113 * otherwise, it's for clients, then llog entries for both OSTs and
114 * MDTs will be returned.
116 cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
117 int entry_len = sizeof(*entry);
119 if (tgt->mnt_version < version)
122 /* write target recover information */
124 LASSERT(mti->mti_nid_count < MTI_NIDS_MAX);
125 entry_len += mti->mti_nid_count * sizeof(lnet_nid_t);
127 if (entry_len > unit_size) {
128 CWARN("nidtbl: too large entry: entry length %d,"
129 "unit size: %d\n", entry_len, unit_size);
130 GOTO(out, rc = -EOVERFLOW);
133 if (bytes_in_unit < entry_len) {
134 if (units_total == 0) {
139 /* check if we need to consume remaining bytes. */
140 if (last_in_unit != NULL && bytes_in_unit) {
141 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 6, 50, 0)
142 /* May need to swab back to update the length.*/
143 if (exp->exp_need_mne_swab)
144 lustre_swab_mgs_nidtbl_entry(last_in_unit);
146 last_in_unit->mne_length += bytes_in_unit;
147 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 6, 50, 0)
148 if (exp->exp_need_mne_swab)
149 lustre_swab_mgs_nidtbl_entry(last_in_unit);
152 buf += bytes_in_unit;
155 LASSERT((rc & (unit_size - 1)) == 0);
157 if (units_in_page == 0) {
158 /* allocate a new page */
159 pages[index] = cfs_alloc_page(CFS_ALLOC_STD);
160 if (pages[index] == NULL) {
165 /* destroy previous map */
167 cfs_kunmap(pages[index - 1]);
169 /* reassign buffer */
170 buf = cfs_kmap(pages[index]);
173 units_in_page = CFS_PAGE_SIZE / unit_size;
174 LASSERT(units_in_page > 0);
177 /* allocate an unit */
178 LASSERT(((long)buf & (unit_size - 1)) == 0);
179 bytes_in_unit = unit_size;
185 entry = (struct mgs_nidtbl_entry *)buf;
186 entry->mne_version = tgt->mnt_version;
187 entry->mne_instance = mti->mti_instance;
188 entry->mne_index = mti->mti_stripe_index;
189 entry->mne_length = entry_len;
190 entry->mne_type = tgt->mnt_type;
191 entry->mne_nid_type = 0;
192 entry->mne_nid_size = sizeof(lnet_nid_t);
193 entry->mne_nid_count = mti->mti_nid_count;
194 memcpy(entry->u.nids, mti->mti_nids,
195 mti->mti_nid_count * sizeof(lnet_nid_t));
197 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 6, 50, 0)
198 /* For LU-1644, swab entry for 2.2 clients. */
199 if (exp->exp_need_mne_swab)
200 lustre_swab_mgs_nidtbl_entry(entry);
203 version = tgt->mnt_version;
207 bytes_in_unit -= entry_len;
208 last_in_unit = entry;
210 CDEBUG(D_MGS, "fsname %s, entry size %d, pages %d/%d/%d/%d.\n",
211 tbl->mn_fsdb->fsdb_name, entry_len,
212 bytes_in_unit, index, nrpages, units_total);
215 cfs_kunmap(pages[index - 1]);
217 LASSERT(version <= tbl->mn_version);
218 res->mcr_size = tbl->mn_version;
219 res->mcr_offset = nobuf ? version : tbl->mn_version;
220 cfs_mutex_unlock(&tbl->mn_lock);
221 LASSERT(ergo(version == 1, rc == 0)); /* get the log first time */
223 CDEBUG(D_MGS, "Read IR logs %s return with %d, version %llu\n",
224 tbl->mn_fsdb->fsdb_name, rc, version);
228 static int nidtbl_update_version(const struct lu_env *env,
229 struct mgs_device *mgs,
230 struct mgs_nidtbl *tbl)
232 struct dt_object *fsdb;
235 struct lu_buf buf = {
237 .lb_len = sizeof(version)
243 LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
245 fsdb = local_file_find_or_create(env, mgs->mgs_los, mgs->mgs_nidtbl_dir,
246 tbl->mn_fsdb->fsdb_name,
247 S_IFREG | S_IRUGO | S_IWUSR);
249 RETURN(PTR_ERR(fsdb));
251 th = dt_trans_create(env, mgs->mgs_bottom);
253 GOTO(out_put, rc = PTR_ERR(th));
255 th->th_sync = 1; /* update table synchronously */
256 rc = dt_declare_record_write(env, fsdb, buf.lb_len, off, th);
260 rc = dt_trans_start_local(env, mgs->mgs_bottom, th);
264 version = cpu_to_le64(tbl->mn_version);
265 rc = dt_record_write(env, fsdb, &buf, &off, th);
268 dt_trans_stop(env, mgs->mgs_bottom, th);
270 lu_object_put(env, &fsdb->do_lu);
274 #define MGS_NIDTBL_VERSION_INIT 2
276 static int nidtbl_read_version(const struct lu_env *env,
277 struct mgs_device *mgs, struct mgs_nidtbl *tbl,
280 struct dt_object *fsdb;
283 struct lu_buf buf = {
285 .lb_len = sizeof(tmpver)
291 LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
293 LASSERT(mgs->mgs_nidtbl_dir);
294 rc = dt_lookup_dir(env, mgs->mgs_nidtbl_dir, tbl->mn_fsdb->fsdb_name,
297 *version = MGS_NIDTBL_VERSION_INIT;
303 fsdb = dt_locate_at(env, mgs->mgs_bottom, &fid,
304 &mgs->mgs_dt_dev.dd_lu_dev);
306 RETURN(PTR_ERR(fsdb));
308 rc = dt_read(env, fsdb, &buf, &off);
309 if (rc == buf.lb_len) {
310 *version = le64_to_cpu(tmpver);
312 } else if (rc == 0) {
313 *version = MGS_NIDTBL_VERSION_INIT;
315 CERROR("%s: read version file %s error %d\n",
316 mgs->mgs_obd->obd_name, tbl->mn_fsdb->fsdb_name, rc);
318 lu_object_put(env, &fsdb->do_lu);
322 static int mgs_nidtbl_write(const struct lu_env *env, struct fs_db *fsdb,
323 struct mgs_target_info *mti)
325 struct mgs_nidtbl *tbl;
326 struct mgs_nidtbl_target *tgt;
328 int type = mti->mti_flags & LDD_F_SV_TYPE_MASK;
332 type &= ~LDD_F_SV_TYPE_MGS;
335 tbl = &fsdb->fsdb_nidtbl;
336 cfs_mutex_lock(&tbl->mn_lock);
337 cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
338 struct mgs_target_info *info = &tgt->mnt_mti;
339 if (type == tgt->mnt_type &&
340 mti->mti_stripe_index == info->mti_stripe_index) {
348 GOTO(out, rc = -ENOMEM);
350 CFS_INIT_LIST_HEAD(&tgt->mnt_list);
352 tgt->mnt_version = 0; /* 0 means invalid */
353 tgt->mnt_type = type;
355 ++tbl->mn_nr_targets;
358 tgt->mnt_version = ++tbl->mn_version;
361 cfs_list_move_tail(&tgt->mnt_list, &tbl->mn_targets);
363 rc = nidtbl_update_version(env, fsdb->fsdb_mgs, tbl);
367 cfs_mutex_unlock(&tbl->mn_lock);
369 CERROR("Write NID table version for file system %s error %d\n",
370 fsdb->fsdb_name, rc);
374 static void mgs_nidtbl_fini_fs(struct fs_db *fsdb)
376 struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
379 cfs_mutex_lock(&tbl->mn_lock);
380 tbl->mn_nr_targets = 0;
381 cfs_list_splice_init(&tbl->mn_targets, &head);
382 cfs_mutex_unlock(&tbl->mn_lock);
384 while (!cfs_list_empty(&head)) {
385 struct mgs_nidtbl_target *tgt;
386 tgt = list_entry(head.next, struct mgs_nidtbl_target, mnt_list);
387 cfs_list_del(&tgt->mnt_list);
392 static int mgs_nidtbl_init_fs(const struct lu_env *env, struct fs_db *fsdb)
394 struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
397 CFS_INIT_LIST_HEAD(&tbl->mn_targets);
398 cfs_mutex_init(&tbl->mn_lock);
399 tbl->mn_nr_targets = 0;
401 cfs_mutex_lock(&tbl->mn_lock);
402 rc = nidtbl_read_version(env, fsdb->fsdb_mgs, tbl, &tbl->mn_version);
403 cfs_mutex_unlock(&tbl->mn_lock);
405 CERROR("%s: IR: failed to read current version, rc = %d\n",
406 fsdb->fsdb_mgs->mgs_obd->obd_name, rc);
408 CDEBUG(D_MGS, "IR: current version is %llu\n",
414 /* --------- Imperative Recovery relies on nidtbl stuff ------- */
415 void mgs_ir_notify_complete(struct fs_db *fsdb)
418 cfs_duration_t delta;
420 cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
423 fsdb->fsdb_notify_count++;
424 delta = cfs_time_sub(cfs_time_current(), fsdb->fsdb_notify_start);
425 fsdb->fsdb_notify_total += delta;
426 if (delta > fsdb->fsdb_notify_max)
427 fsdb->fsdb_notify_max = delta;
429 cfs_duration_usec(delta, &tv);
430 CDEBUG(D_MGS, "Revoke recover lock of %s completed after %ld.%06lds\n",
431 fsdb->fsdb_name, tv.tv_sec, tv.tv_usec);
434 static int mgs_ir_notify(void *arg)
436 struct fs_db *fsdb = arg;
437 struct ldlm_res_id resid;
439 char name[sizeof(fsdb->fsdb_name) + 20];
441 LASSERTF(sizeof(name) < 32, "name is too large to be in stack.\n");
442 sprintf(name, "mgs_%s_notify", fsdb->fsdb_name);
445 cfs_complete(&fsdb->fsdb_notify_comp);
447 set_user_nice(current, -2);
449 mgc_fsname2resid(fsdb->fsdb_name, &resid, CONFIG_T_RECOVER);
451 struct l_wait_info lwi = { 0 };
453 l_wait_event(fsdb->fsdb_notify_waitq,
454 fsdb->fsdb_notify_stop ||
455 cfs_atomic_read(&fsdb->fsdb_notify_phase),
457 if (fsdb->fsdb_notify_stop)
460 CDEBUG(D_MGS, "%s woken up, phase is %d\n",
461 name, cfs_atomic_read(&fsdb->fsdb_notify_phase));
463 fsdb->fsdb_notify_start = cfs_time_current();
464 mgs_revoke_lock(fsdb->fsdb_mgs, fsdb, CONFIG_T_RECOVER);
467 cfs_complete(&fsdb->fsdb_notify_comp);
471 int mgs_ir_init_fs(const struct lu_env *env, struct mgs_device *mgs,
477 ir_timeout = OBD_IR_MGS_TIMEOUT;
479 fsdb->fsdb_ir_state = IR_FULL;
480 if (cfs_time_before(cfs_time_current_sec(),
481 mgs->mgs_start_time + ir_timeout))
482 fsdb->fsdb_ir_state = IR_STARTUP;
483 fsdb->fsdb_nonir_clients = 0;
484 CFS_INIT_LIST_HEAD(&fsdb->fsdb_clients);
486 /* start notify thread */
487 fsdb->fsdb_mgs = mgs;
488 cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
489 cfs_waitq_init(&fsdb->fsdb_notify_waitq);
490 cfs_init_completion(&fsdb->fsdb_notify_comp);
491 rc = cfs_create_thread(mgs_ir_notify, fsdb, CFS_DAEMON_FLAGS);
493 cfs_wait_for_completion(&fsdb->fsdb_notify_comp);
495 CERROR("Start notify thread error %d\n", rc);
497 mgs_nidtbl_init_fs(env, fsdb);
501 void mgs_ir_fini_fs(struct mgs_device *mgs, struct fs_db *fsdb)
503 if (cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags))
506 mgs_fsc_cleanup_by_fsdb(fsdb);
508 mgs_nidtbl_fini_fs(fsdb);
510 LASSERT(cfs_list_empty(&fsdb->fsdb_clients));
512 fsdb->fsdb_notify_stop = 1;
513 cfs_waitq_signal(&fsdb->fsdb_notify_waitq);
514 cfs_wait_for_completion(&fsdb->fsdb_notify_comp);
517 /* caller must have held fsdb_mutex */
518 static inline void ir_state_graduate(struct fs_db *fsdb)
520 if (fsdb->fsdb_ir_state == IR_STARTUP) {
521 if (cfs_time_before(fsdb->fsdb_mgs->mgs_start_time + ir_timeout,
522 cfs_time_current_sec())) {
523 fsdb->fsdb_ir_state = IR_FULL;
524 if (fsdb->fsdb_nonir_clients)
525 fsdb->fsdb_ir_state = IR_PARTIAL;
530 int mgs_ir_update(const struct lu_env *env, struct mgs_device *mgs,
531 struct mgs_target_info *mti)
537 if (mti->mti_instance == 0)
540 rc = mgs_find_or_make_fsdb(env, mgs, mti->mti_fsname, &fsdb);
544 rc = mgs_nidtbl_write(env, fsdb, mti);
549 cfs_mutex_lock(&fsdb->fsdb_mutex);
550 ir_state_graduate(fsdb);
551 switch (fsdb->fsdb_ir_state) {
553 mti->mti_flags |= LDD_F_IR_CAPABLE;
563 cfs_mutex_unlock(&fsdb->fsdb_mutex);
565 LASSERT(ergo(mti->mti_flags & LDD_F_IR_CAPABLE, notify));
567 CDEBUG(D_MGS, "Try to revoke recover lock of %s\n",
569 cfs_atomic_inc(&fsdb->fsdb_notify_phase);
570 cfs_waitq_signal(&fsdb->fsdb_notify_waitq);
575 /* NID table can be cached by two entities: Clients and MDTs */
581 static int delogname(char *logname, char *fsname, int *typ)
587 ptr = strrchr(logname, '-');
591 /* decouple file system name. The llog name may be:
592 * - "prefix-fsname", prefix is "cliir" or "mdtir"
594 if (strncmp(ptr, "-mdtir", 6) == 0)
596 else if (strncmp(ptr, "-cliir", 6) == 0)
605 memcpy(fsname, logname, len);
612 int mgs_get_ir_logs(struct ptlrpc_request *req)
614 struct lu_env *env = req->rq_svc_thread->t_env;
615 struct mgs_device *mgs = exp2mgs_dev(req->rq_export);
617 struct mgs_config_body *body;
618 struct mgs_config_res *res;
619 struct ptlrpc_bulk_desc *desc;
620 struct l_wait_info lwi;
630 cfs_page_t **pages = NULL;
633 body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
637 if (body->mcb_type != CONFIG_T_RECOVER)
640 rc = delogname(body->mcb_name, fsname, &type);
644 rc = mgs_find_or_make_fsdb(env, mgs, fsname, &fsdb);
648 bufsize = body->mcb_units << body->mcb_bits;
649 nrpages = (bufsize + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
650 if (nrpages > PTLRPC_MAX_BRW_PAGES)
653 CDEBUG(D_MGS, "Reading IR log %s bufsize %ld.\n",
654 body->mcb_name, bufsize);
656 OBD_ALLOC(pages, sizeof(*pages) * nrpages);
660 rc = req_capsule_server_pack(&req->rq_pill);
664 res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES);
666 GOTO(out, rc = -EINVAL);
668 res->mcr_offset = body->mcb_offset;
669 unit_size = min_t(int, 1 << body->mcb_bits, CFS_PAGE_SIZE);
670 bytes = mgs_nidtbl_read(req->rq_export, &fsdb->fsdb_nidtbl, res,
671 pages, nrpages, bufsize / unit_size, unit_size);
673 GOTO(out, rc = bytes);
675 /* start bulk transfer */
676 page_count = (bytes + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
677 LASSERT(page_count <= nrpages);
678 desc = ptlrpc_prep_bulk_exp(req, page_count,
679 BULK_PUT_SOURCE, MGS_BULK_PORTAL);
681 GOTO(out, rc = -ENOMEM);
683 for (i = 0; i < page_count && bytes > 0; i++) {
684 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0,
685 min_t(int, bytes, CFS_PAGE_SIZE));
686 bytes -= CFS_PAGE_SIZE;
689 rc = target_bulk_io(req->rq_export, desc, &lwi);
690 ptlrpc_free_bulk_pin(desc);
693 for (i = 0; i < nrpages; i++) {
694 if (pages[i] == NULL)
696 cfs_free_page(pages[i]);
698 OBD_FREE(pages, sizeof(*pages) * nrpages);
702 static int lprocfs_ir_set_state(struct fs_db *fsdb, const char *buf)
704 const char *strings[] = IR_STRINGS;
708 for (i = 0; i < ARRAY_SIZE(strings); i++) {
709 if (strcmp(strings[i], buf) == 0) {
717 CDEBUG(D_MGS, "change fsr state of %s from %s to %s\n",
718 fsdb->fsdb_name, strings[fsdb->fsdb_ir_state], strings[state]);
719 cfs_mutex_lock(&fsdb->fsdb_mutex);
720 if (state == IR_FULL && fsdb->fsdb_nonir_clients)
722 fsdb->fsdb_ir_state = state;
723 cfs_mutex_unlock(&fsdb->fsdb_mutex);
728 static int lprocfs_ir_set_timeout(struct fs_db *fsdb, const char *buf)
733 static int lprocfs_ir_clear_stats(struct fs_db *fsdb, const char *buf)
738 fsdb->fsdb_notify_total = 0;
739 fsdb->fsdb_notify_max = 0;
740 fsdb->fsdb_notify_count = 0;
744 static struct lproc_ir_cmd {
747 int (*handler)(struct fs_db *, const char *);
749 { "state=", 6, lprocfs_ir_set_state },
750 { "timeout=", 8, lprocfs_ir_set_timeout },
751 { "0", 1, lprocfs_ir_clear_stats }
754 int lprocfs_wr_ir_state(struct file *file, const char *buffer,
755 unsigned long count, void *data)
757 struct fs_db *fsdb = data;
762 if (count > CFS_PAGE_SIZE)
765 OBD_ALLOC(kbuf, count + 1);
769 if (copy_from_user(kbuf, buffer, count)) {
770 OBD_FREE(kbuf, count);
774 kbuf[count] = 0; /* buffer is supposed to end with 0 */
775 if (kbuf[count - 1] == '\n')
779 /* fsname=<file system name> must be the 1st entry */
780 while (ptr != NULL) {
784 tmpptr = strchr(ptr, ';');
789 for (i = 0; i < ARRAY_SIZE(ir_cmds); i++) {
790 struct lproc_ir_cmd *cmd;
794 cmdlen = cmd->namelen;
795 if (strncmp(cmd->name, ptr, cmdlen) == 0) {
797 rc = cmd->handler(fsdb, ptr);
807 CERROR("Unable to process command: %s(%d)\n", ptr, rc);
808 OBD_FREE(kbuf, count + 1);
812 int lprocfs_rd_ir_state(struct seq_file *seq, void *data)
814 struct fs_db *fsdb = data;
815 struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
816 const char *ir_strings[] = IR_STRINGS;
817 struct timeval tv_max;
820 /* mgs_live_seq_show() already holds fsdb_mutex. */
821 ir_state_graduate(fsdb);
823 seq_printf(seq, "\nimperative_recovery_state:\n");
826 " nonir_clients: %d\n"
827 " nidtbl_version: %lld\n",
828 ir_strings[fsdb->fsdb_ir_state], fsdb->fsdb_nonir_clients,
831 cfs_duration_usec(fsdb->fsdb_notify_total, &tv);
832 cfs_duration_usec(fsdb->fsdb_notify_max, &tv_max);
834 seq_printf(seq, " notify_duration_total: %lu.%06lu\n"
835 " notify_duation_max: %lu.%06lu\n"
836 " notify_count: %u\n",
837 tv.tv_sec, tv.tv_usec,
838 tv_max.tv_sec, tv_max.tv_usec,
839 fsdb->fsdb_notify_count);
844 int lprocfs_rd_ir_timeout(char *page, char **start, off_t off, int count,
845 int *eof, void *data)
848 return snprintf(page, count, "%d\n", ir_timeout);
851 int lprocfs_wr_ir_timeout(struct file *file, const char *buffer,
852 unsigned long count, void *data)
854 return lprocfs_wr_uint(file, buffer, count, &ir_timeout);
857 /* --------------- Handle non IR support clients --------------- */
858 /* attach a lustre file system to an export */
859 int mgs_fsc_attach(const struct lu_env *env, struct obd_export *exp,
862 struct mgs_export_data *data = &exp->u.eu_mgs_data;
863 struct mgs_device *mgs = exp2mgs_dev(exp);
865 struct mgs_fsc *fsc = NULL;
866 struct mgs_fsc *new_fsc = NULL;
871 rc = mgs_find_or_make_fsdb(env, mgs, fsname, &fsdb);
875 /* allocate a new fsc in case we need it in spinlock. */
876 OBD_ALLOC_PTR(new_fsc);
880 CFS_INIT_LIST_HEAD(&new_fsc->mfc_export_list);
881 CFS_INIT_LIST_HEAD(&new_fsc->mfc_fsdb_list);
882 new_fsc->mfc_fsdb = fsdb;
883 new_fsc->mfc_export = class_export_get(exp);
884 new_fsc->mfc_ir_capable =
885 !!(exp->exp_connect_flags & OBD_CONNECT_IMP_RECOV);
888 cfs_mutex_lock(&fsdb->fsdb_mutex);
890 /* tend to find it in export list because this list is shorter. */
891 cfs_spin_lock(&data->med_lock);
892 cfs_list_for_each_entry(fsc, &data->med_clients, mfc_export_list) {
893 if (strcmp(fsname, fsc->mfc_fsdb->fsdb_name) == 0) {
902 /* add it into export list. */
903 cfs_list_add(&fsc->mfc_export_list, &data->med_clients);
905 /* add into fsdb list. */
906 cfs_list_add(&fsc->mfc_fsdb_list, &fsdb->fsdb_clients);
907 if (!fsc->mfc_ir_capable) {
908 ++fsdb->fsdb_nonir_clients;
909 if (fsdb->fsdb_ir_state == IR_FULL)
910 fsdb->fsdb_ir_state = IR_PARTIAL;
914 cfs_spin_unlock(&data->med_lock);
915 cfs_mutex_unlock(&fsdb->fsdb_mutex);
918 class_export_put(new_fsc->mfc_export);
919 OBD_FREE_PTR(new_fsc);
924 void mgs_fsc_cleanup(struct obd_export *exp)
926 struct mgs_export_data *data = &exp->u.eu_mgs_data;
927 struct mgs_fsc *fsc, *tmp;
930 cfs_spin_lock(&data->med_lock);
931 cfs_list_splice_init(&data->med_clients, &head);
932 cfs_spin_unlock(&data->med_lock);
934 cfs_list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
935 struct fs_db *fsdb = fsc->mfc_fsdb;
937 LASSERT(fsc->mfc_export == exp);
939 cfs_mutex_lock(&fsdb->fsdb_mutex);
940 cfs_list_del_init(&fsc->mfc_fsdb_list);
941 if (fsc->mfc_ir_capable == 0) {
942 --fsdb->fsdb_nonir_clients;
943 LASSERT(fsdb->fsdb_ir_state != IR_FULL);
944 if (fsdb->fsdb_nonir_clients == 0 &&
945 fsdb->fsdb_ir_state == IR_PARTIAL)
946 fsdb->fsdb_ir_state = IR_FULL;
948 cfs_mutex_unlock(&fsdb->fsdb_mutex);
949 cfs_list_del_init(&fsc->mfc_export_list);
950 class_export_put(fsc->mfc_export);
955 /* must be called with fsdb->fsdb_mutex held */
956 void mgs_fsc_cleanup_by_fsdb(struct fs_db *fsdb)
958 struct mgs_fsc *fsc, *tmp;
960 cfs_list_for_each_entry_safe(fsc, tmp, &fsdb->fsdb_clients,
962 struct mgs_export_data *data = &fsc->mfc_export->u.eu_mgs_data;
964 LASSERT(fsdb == fsc->mfc_fsdb);
965 cfs_list_del_init(&fsc->mfc_fsdb_list);
967 cfs_spin_lock(&data->med_lock);
968 cfs_list_del_init(&fsc->mfc_export_list);
969 cfs_spin_unlock(&data->med_lock);
970 class_export_put(fsc->mfc_export);
974 fsdb->fsdb_nonir_clients = 0;
975 if (fsdb->fsdb_ir_state == IR_PARTIAL)
976 fsdb->fsdb_ir_state = IR_FULL;