1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdfilter/filter.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Andreas Dilger <adilger@clusterfs.com>
43 * Invariant: Get O/R i_mutex for lookup, if needed, before any journal ops
44 * (which need to get journal_lock, may block if journal full).
46 * Invariant: Call filter_start_transno() before any journal ops to avoid the
47 * same deadlock problem. We can (and want) to get rid of the
48 * transno sem in favour of the dir/inode i_mutex to avoid single
49 * threaded operation on the OST.
52 #define DEBUG_SUBSYSTEM S_FILTER
53 #ifndef AUTOCONF_INCLUDED
54 #include <linux/config.h>
56 #include <linux/module.h>
58 #include <linux/dcache.h>
59 #include <linux/init.h>
60 #include <linux/version.h>
61 #include <linux/sched.h>
62 #include <linux/mount.h>
63 #include <linux/buffer_head.h>
65 #include <obd_class.h>
67 #include <lustre_dlm.h>
68 #include <lustre_fsfilt.h>
69 #include <lprocfs_status.h>
70 #include <lustre_log.h>
71 #include <libcfs/list.h>
72 #include <lustre_disk.h>
73 #include <lustre_quota.h>
74 #include <linux/slab.h>
75 #include <lustre_param.h>
76 #include <lustre/ll_fiemap.h>
78 #include "filter_internal.h"
80 static struct lvfs_callback_ops filter_lvfs_ops;
81 cfs_mem_cache_t *ll_fmd_cachep;
83 static void filter_commit_cb(struct obd_device *obd, __u64 transno,
84 void *cb_data, int error)
86 struct obd_export *exp = cb_data;
87 LASSERTF(exp->exp_obd == obd,
88 "%s: bad export (%p), obd (%p) != exp->exp_obd (%p)\n",
89 obd->obd_name, exp, obd, exp->exp_obd);
90 obd_transno_commit_cb(obd, transno, exp, error);
91 class_export_put(exp);
94 int filter_version_get_check(struct obd_export *exp,
95 struct obd_trans_info *oti, struct inode *inode)
99 if (inode == NULL || oti == NULL)
102 curr_version = fsfilt_get_version(exp->exp_obd, inode);
103 if ((__s64)curr_version == -EOPNOTSUPP)
105 /* VBR: version is checked always because costs nothing */
106 if (oti->oti_pre_version != 0 &&
107 oti->oti_pre_version != curr_version) {
108 CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
109 oti->oti_pre_version, curr_version);
110 spin_lock(&exp->exp_lock);
111 exp->exp_vbr_failed = 1;
112 spin_unlock(&exp->exp_lock);
115 oti->oti_pre_version = curr_version;
119 /* Assumes caller has already pushed us into the kernel context. */
120 int filter_finish_transno(struct obd_export *exp, struct inode *inode,
121 struct obd_trans_info *oti,
122 int rc, int force_sync)
124 struct filter_obd *filter = &exp->exp_obd->u.filter;
125 struct filter_export_data *fed = &exp->exp_filter_data;
126 struct lsd_client_data *lcd = fed->fed_lcd;
129 int err, log_pri = D_RPCTRACE;
131 /* Propagate error code. */
135 if (!exp->exp_obd->obd_replayable || oti == NULL)
138 /* we don't allocate new transnos for replayed requests */
139 spin_lock(&filter->fo_translock);
140 if (oti->oti_transno == 0) {
141 last_rcvd = le64_to_cpu(filter->fo_fsd->lsd_last_transno) + 1;
142 filter->fo_fsd->lsd_last_transno = cpu_to_le64(last_rcvd);
144 last_rcvd = oti->oti_transno;
145 if (last_rcvd > le64_to_cpu(filter->fo_fsd->lsd_last_transno))
146 filter->fo_fsd->lsd_last_transno = cpu_to_le64(last_rcvd);
148 oti->oti_transno = last_rcvd;
149 if (last_rcvd <= le64_to_cpu(lcd->lcd_last_transno)) {
150 spin_unlock(&filter->fo_translock);
153 lcd->lcd_last_transno = cpu_to_le64(last_rcvd);
154 lcd->lcd_pre_versions[0] = cpu_to_le64(oti->oti_pre_version);
155 lcd->lcd_last_xid = cpu_to_le64(oti->oti_xid);
156 target_trans_table_update(exp, last_rcvd);
158 spin_unlock(&filter->fo_translock);
161 fsfilt_set_version(exp->exp_obd, inode, last_rcvd);
163 off = fed->fed_lr_off;
165 CERROR("%s: client idx %d is %lld\n", exp->exp_obd->obd_name,
166 fed->fed_lr_idx, fed->fed_lr_off);
169 class_export_get(exp); /* released when the cb is called */
171 force_sync = fsfilt_add_journal_cb(exp->exp_obd,
177 err = fsfilt_write_record(exp->exp_obd, filter->fo_rcvd_filp,
178 lcd, sizeof(*lcd), &off,
179 force_sync | exp->exp_need_sync);
181 filter_commit_cb(exp->exp_obd, last_rcvd, exp, err);
189 CDEBUG(log_pri, "wrote trans "LPU64" for client %s at #%d: err = %d\n",
190 last_rcvd, lcd->lcd_uuid, fed->fed_lr_idx, err);
195 void f_dput(struct dentry *dentry)
197 /* Can't go inside filter_ddelete because it can block */
198 CDEBUG(D_INODE, "putting %s: %p, count = %d\n",
199 dentry->d_name.name, dentry, atomic_read(&dentry->d_count) - 1);
200 LASSERT(atomic_read(&dentry->d_count) > 0);
205 static void init_brw_stats(struct brw_stats *brw_stats)
208 for (i = 0; i < BRW_LAST; i++)
209 spin_lock_init(&brw_stats->hist[i].oh_lock);
212 static int lprocfs_init_rw_stats(struct obd_device *obd,
213 struct lprocfs_stats **stats)
217 num_stats = (sizeof(*obd->obd_type->typ_ops) / sizeof(void *)) +
218 LPROC_FILTER_LAST - 1;
219 *stats = lprocfs_alloc_stats(num_stats, LPROCFS_STATS_FLAG_NOPERCPU);
223 lprocfs_init_ops_stats(LPROC_FILTER_LAST, *stats);
224 lprocfs_counter_init(*stats, LPROC_FILTER_READ_BYTES,
225 LPROCFS_CNTR_AVGMINMAX, "read_bytes", "bytes");
226 lprocfs_counter_init(*stats, LPROC_FILTER_WRITE_BYTES,
227 LPROCFS_CNTR_AVGMINMAX, "write_bytes", "bytes");
232 /* brw_stats are 2128, ops are 3916, ldlm are 204, so 6248 bytes per client,
233 plus the procfs overhead :( */
234 static int filter_export_stats_init(struct obd_device *obd,
235 struct obd_export *exp,
239 struct proc_dir_entry *brw_entry;
243 if (obd_uuid_equals(&exp->exp_client_uuid, &obd->obd_uuid))
244 /* Self-export gets no proc entry */
246 rc = lprocfs_exp_setup(exp, (lnet_nid_t *)client_nid,
249 /* Mask error for already created
258 struct nid_stat *tmp = exp->exp_nid_stats;
259 LASSERT(tmp != NULL);
261 OBD_ALLOC(tmp->nid_brw_stats, sizeof(struct brw_stats));
262 if (tmp->nid_brw_stats == NULL)
265 init_brw_stats(tmp->nid_brw_stats);
267 brw_entry = create_proc_entry("brw_stats", 0644,
268 exp->exp_nid_stats->nid_proc);
269 if (brw_entry == NULL)
272 brw_entry->proc_fops = &filter_per_nid_stats_fops;
273 brw_entry->data = exp->exp_nid_stats;
275 rc = lprocfs_init_rw_stats(obd, &exp->exp_nid_stats->nid_stats);
279 rc = lprocfs_register_stats(tmp->nid_proc, "stats",
284 /* Always add in ldlm_stats */
285 tmp->nid_ldlm_stats =
286 lprocfs_alloc_stats(LDLM_LAST_OPC - LDLM_FIRST_OPC,
287 LPROCFS_STATS_FLAG_NOPERCPU);
288 if (tmp->nid_ldlm_stats == NULL)
291 lprocfs_init_ldlm_stats(tmp->nid_ldlm_stats);
293 rc = lprocfs_register_stats(tmp->nid_proc, "ldlm_stats",
294 tmp->nid_ldlm_stats);
300 /* VBR: to determine the delayed client the lcd should be updated for each new
302 static int filter_update_client_epoch(struct obd_export *exp)
304 struct filter_export_data *fed = &exp->exp_filter_data;
305 struct filter_obd *filter = &exp->exp_obd->u.filter;
306 struct lvfs_run_ctxt saved;
307 loff_t off = fed->fed_lr_off;
310 /* VBR: set client last_epoch to current epoch */
311 if (le32_to_cpu(fed->fed_lcd->lcd_last_epoch) >=
312 le32_to_cpu(filter->fo_fsd->lsd_start_epoch))
314 fed->fed_lcd->lcd_last_epoch = filter->fo_fsd->lsd_start_epoch;
315 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
316 rc = fsfilt_write_record(exp->exp_obd, filter->fo_rcvd_filp,
317 fed->fed_lcd, sizeof(*fed->fed_lcd), &off,
319 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
321 CDEBUG(D_INFO, "update client idx %u last_epoch %#x (%#x)\n",
322 fed->fed_lr_idx, le32_to_cpu(fed->fed_lcd->lcd_last_epoch),
323 le32_to_cpu(filter->fo_fsd->lsd_start_epoch));
328 /* Called after recovery is done on server */
329 static void filter_update_last_epoch(struct obd_device *obd)
331 struct ptlrpc_request *req;
332 struct filter_obd *filter = &obd->u.filter;
333 struct lr_server_data *fsd = filter->fo_fsd;
336 /* Increase server epoch after recovery */
337 spin_lock(&filter->fo_translock);
338 /* VBR: increase the epoch and store it in lsd */
339 start_epoch = lr_epoch(le64_to_cpu(fsd->lsd_last_transno)) + 1;
340 fsd->lsd_last_transno = cpu_to_le64((__u64)start_epoch << LR_EPOCH_BITS);
341 fsd->lsd_start_epoch = cpu_to_le32(start_epoch);
342 spin_unlock(&filter->fo_translock);
344 /* go through delayed reply queue to find all exports participate in
345 * recovery and set new epoch for them */
346 list_for_each_entry(req, &obd->obd_delayed_reply_queue, rq_list) {
347 LASSERT(!req->rq_export->exp_delayed);
348 filter_update_client_epoch(req->rq_export);
350 filter_update_server_data(obd, filter->fo_rcvd_filp, fsd, 1);
353 static int filter_postrecov(struct obd_device *obd)
360 LASSERT(!obd->obd_recovering);
361 /* VBR: update start_epoch on server */
362 filter_update_last_epoch(obd);
367 /* Add client data to the FILTER. We use a bitmap to locate a free space
368 * in the last_rcvd file if cl_idx is -1 (i.e. a new client).
369 * Otherwise, we have just read the data from the last_rcvd file and
370 * we know its offset. */
371 static int filter_client_add(struct obd_device *obd, struct obd_export *exp,
374 struct filter_obd *filter = &obd->u.filter;
375 struct filter_export_data *fed = &exp->exp_filter_data;
376 unsigned long *bitmap = filter->fo_last_rcvd_slots;
377 int new_client = (cl_idx == -1);
381 LASSERT(bitmap != NULL);
382 LASSERTF(cl_idx > -2, "%d\n", cl_idx);
385 if (strcmp(fed->fed_lcd->lcd_uuid, obd->obd_uuid.uuid) == 0)
388 /* VBR: remove expired exports before searching for free slot */
390 class_disconnect_expired_exports(obd);
392 /* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
393 * there's no need for extra complication here
396 cl_idx = find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
398 if (cl_idx >= LR_MAX_CLIENTS) {
399 CERROR("no room for %u clients - fix LR_MAX_CLIENTS\n",
403 if (test_and_set_bit(cl_idx, bitmap)) {
404 cl_idx = find_next_zero_bit(bitmap, LR_MAX_CLIENTS,
409 if (test_and_set_bit(cl_idx, bitmap)) {
410 CERROR("FILTER client %d: bit already set in bitmap!\n",
416 fed->fed_lr_idx = cl_idx;
417 fed->fed_lr_off = le32_to_cpu(filter->fo_fsd->lsd_client_start) +
418 cl_idx * le16_to_cpu(filter->fo_fsd->lsd_client_size);
419 LASSERTF(fed->fed_lr_off > 0, "fed_lr_off = %llu\n", fed->fed_lr_off);
421 CDEBUG(D_INFO, "client at index %d (%llu) with UUID '%s' added\n",
422 fed->fed_lr_idx, fed->fed_lr_off, fed->fed_lcd->lcd_uuid);
425 struct lvfs_run_ctxt saved;
426 loff_t off = fed->fed_lr_off;
430 CDEBUG(D_INFO, "writing client lcd at idx %u (%llu) (len %u)\n",
431 fed->fed_lr_idx,off,(unsigned int)sizeof(*fed->fed_lcd));
433 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
434 /* Transaction needed to fix bug 1403 */
435 handle = fsfilt_start(obd,
436 filter->fo_rcvd_filp->f_dentry->d_inode,
437 FSFILT_OP_SETATTR, NULL);
438 if (IS_ERR(handle)) {
439 rc = PTR_ERR(handle);
440 CERROR("unable to start transaction: rc %d\n", rc);
442 fed->fed_lcd->lcd_last_epoch =
443 filter->fo_fsd->lsd_start_epoch;
444 exp->exp_last_request_time = cfs_time_current_sec();
445 rc = fsfilt_add_journal_cb(obd, 0, handle,
446 target_client_add_cb, exp);
448 spin_lock(&exp->exp_lock);
449 exp->exp_need_sync = 1;
450 spin_unlock(&exp->exp_lock);
452 rc = fsfilt_write_record(obd, filter->fo_rcvd_filp,
454 sizeof(*fed->fed_lcd),
455 &off, rc /* sync if no cb */);
457 filter->fo_rcvd_filp->f_dentry->d_inode,
460 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
463 CERROR("error writing %s client idx %u: rc %d\n",
464 LAST_RCVD, fed->fed_lr_idx, rc);
471 struct lsd_client_data zero_lcd; /* globals are implicitly zeroed */
473 static int filter_client_free(struct obd_export *exp)
475 struct filter_export_data *fed = &exp->exp_filter_data;
476 struct filter_obd *filter = &exp->exp_obd->u.filter;
477 struct obd_device *obd = exp->exp_obd;
478 struct lvfs_run_ctxt saved;
483 if (fed->fed_lcd == NULL)
486 /* XXX if lcd_uuid were a real obd_uuid, I could use obd_uuid_equals */
487 if (strcmp(fed->fed_lcd->lcd_uuid, obd->obd_uuid.uuid ) == 0)
490 LASSERT(filter->fo_last_rcvd_slots != NULL);
492 off = fed->fed_lr_off;
494 CDEBUG(D_INFO, "freeing client at idx %u, offset %lld with UUID '%s'\n",
495 fed->fed_lr_idx, fed->fed_lr_off, fed->fed_lcd->lcd_uuid);
497 /* Don't clear fed_lr_idx here as it is likely also unset. At worst
498 * we leak a client slot that will be cleaned on the next recovery. */
500 CERROR("%s: client idx %d has med_off %lld\n",
501 obd->obd_name, fed->fed_lr_idx, off);
502 GOTO(free, rc = -EINVAL);
505 /* Clear the bit _after_ zeroing out the client so we don't
506 race with filter_client_add and zero out new clients.*/
507 if (!test_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
508 CERROR("FILTER client %u: bit already clear in bitmap!!\n",
513 if (!(exp->exp_flags & OBD_OPT_FAILOVER)) {
514 /* Don't force sync on disconnect if aborting recovery,
515 * or it does num_clients * num_osts. b=17194 */
516 int need_sync = exp->exp_need_sync &&
517 !(exp->exp_flags&OBD_OPT_ABORT_RECOV);
519 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
520 rc = fsfilt_write_record(obd, filter->fo_rcvd_filp, &zero_lcd,
521 sizeof(zero_lcd), &off, 0);
523 /* Make sure the server's last_transno is up to date. Do this
524 * after the client is freed so we know all the client's
525 * transactions have been committed. */
527 filter_update_server_data(obd, filter->fo_rcvd_filp,
528 filter->fo_fsd, need_sync);
529 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
531 CDEBUG(rc == 0 ? D_INFO : D_ERROR,
532 "zero out client %s at idx %u/%llu in %s %ssync rc %d\n",
533 fed->fed_lcd->lcd_uuid, fed->fed_lr_idx, fed->fed_lr_off,
534 LAST_RCVD, need_sync ? "" : "a", rc);
537 if (!test_and_clear_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
538 CERROR("FILTER client %u: bit already clear in bitmap!!\n",
545 OBD_FREE_PTR(fed->fed_lcd);
551 /* drop fmd reference, free it if last ref. must be called with fed_lock held.*/
552 static inline void filter_fmd_put_nolock(struct filter_export_data *fed,
553 struct filter_mod_data *fmd)
555 LASSERT_SPIN_LOCKED(&fed->fed_lock);
556 if (--fmd->fmd_refcount == 0) {
557 /* XXX when we have persistent reservations and the handle
558 * is stored herein we need to drop it here. */
559 fed->fed_mod_count--;
560 list_del(&fmd->fmd_list);
561 OBD_SLAB_FREE(fmd, ll_fmd_cachep, sizeof(*fmd));
565 /* drop fmd reference, free it if last ref */
566 void filter_fmd_put(struct obd_export *exp, struct filter_mod_data *fmd)
568 struct filter_export_data *fed;
573 fed = &exp->exp_filter_data;
574 spin_lock(&fed->fed_lock);
575 filter_fmd_put_nolock(fed, fmd); /* caller reference */
576 spin_unlock(&fed->fed_lock);
579 /* expire entries from the end of the list if there are too many
580 * or they are too old */
581 static void filter_fmd_expire_nolock(struct filter_obd *filter,
582 struct filter_export_data *fed,
583 struct filter_mod_data *keep)
585 struct filter_mod_data *fmd, *tmp;
587 list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
591 if (time_before(jiffies, fmd->fmd_expire) &&
592 fed->fed_mod_count < filter->fo_fmd_max_num)
595 list_del_init(&fmd->fmd_list);
596 filter_fmd_put_nolock(fed, fmd); /* list reference */
600 void filter_fmd_expire(struct obd_export *exp)
602 spin_lock(&exp->exp_filter_data.fed_lock);
603 filter_fmd_expire_nolock(&exp->exp_obd->u.filter,
604 &exp->exp_filter_data, NULL);
605 spin_unlock(&exp->exp_filter_data.fed_lock);
608 /* find specified objid, group in export fmd list.
609 * caller must hold fed_lock and take fmd reference itself */
610 static struct filter_mod_data *filter_fmd_find_nolock(struct filter_obd *filter,
611 struct filter_export_data *fed,
612 obd_id objid, obd_gr group)
614 struct filter_mod_data *found = NULL, *fmd;
616 LASSERT_SPIN_LOCKED(&fed->fed_lock);
618 list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
619 if (fmd->fmd_id == objid && fmd->fmd_gr == group) {
621 list_del(&fmd->fmd_list);
622 list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
623 fmd->fmd_expire = jiffies + filter->fo_fmd_max_age;
628 filter_fmd_expire_nolock(filter, fed, found);
633 /* Find fmd based on objid and group, or return NULL if not found. */
634 struct filter_mod_data *filter_fmd_find(struct obd_export *exp,
635 obd_id objid, obd_gr group)
637 struct filter_mod_data *fmd;
639 spin_lock(&exp->exp_filter_data.fed_lock);
640 fmd = filter_fmd_find_nolock(&exp->exp_obd->u.filter,
641 &exp->exp_filter_data, objid, group);
643 fmd->fmd_refcount++; /* caller reference */
644 spin_unlock(&exp->exp_filter_data.fed_lock);
649 /* Find fmd based on objid and group, or create a new one if none is found.
650 * It is possible for this function to return NULL under memory pressure,
651 * or if objid = 0 is passed (which will only cause old entries to expire).
652 * Currently this is not fatal because any fmd state is transient and
653 * may also be freed when it gets sufficiently old. */
654 struct filter_mod_data *filter_fmd_get(struct obd_export *exp,
655 obd_id objid, obd_gr group)
657 struct filter_export_data *fed = &exp->exp_filter_data;
658 struct filter_mod_data *found = NULL, *fmd_new = NULL;
660 OBD_SLAB_ALLOC(fmd_new, ll_fmd_cachep, CFS_ALLOC_IO, sizeof(*fmd_new));
662 spin_lock(&fed->fed_lock);
663 found = filter_fmd_find_nolock(&exp->exp_obd->u.filter,fed,objid,group);
666 list_add_tail(&fmd_new->fmd_list, &fed->fed_mod_list);
667 fmd_new->fmd_id = objid;
668 fmd_new->fmd_gr = group;
669 fmd_new->fmd_refcount++; /* list reference */
671 fed->fed_mod_count++;
673 OBD_SLAB_FREE(fmd_new, ll_fmd_cachep, sizeof(*fmd_new));
677 found->fmd_refcount++; /* caller reference */
678 found->fmd_expire = jiffies +
679 exp->exp_obd->u.filter.fo_fmd_max_age;
682 spin_unlock(&fed->fed_lock);
688 /* drop fmd list reference so it will disappear when last reference is put.
689 * This isn't so critical because it would in fact only affect the one client
690 * that is doing the unlink and at worst we have an stale entry referencing
691 * an object that should never be used again. */
692 static void filter_fmd_drop(struct obd_export *exp, obd_id objid, obd_gr group)
694 struct filter_mod_data *found = NULL;
696 spin_lock(&exp->exp_filter_data.fed_lock);
697 found = filter_fmd_find_nolock(&exp->exp_filter_data, objid, group);
699 list_del_init(&found->fmd_list);
700 filter_fmd_put_nolock(&exp->exp_filter_data, found);
702 spin_unlock(&exp->exp_filter_data.fed_lock);
705 #define filter_fmd_drop(exp, objid, group)
708 /* remove all entries from fmd list */
709 static void filter_fmd_cleanup(struct obd_export *exp)
711 struct filter_export_data *fed = &exp->exp_filter_data;
712 struct filter_mod_data *fmd = NULL, *tmp;
714 spin_lock(&fed->fed_lock);
715 list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
716 list_del_init(&fmd->fmd_list);
717 filter_fmd_put_nolock(fed, fmd);
719 spin_unlock(&fed->fed_lock);
722 static int filter_init_export(struct obd_export *exp)
724 spin_lock_init(&exp->exp_filter_data.fed_lock);
725 INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
727 spin_lock(&exp->exp_lock);
728 exp->exp_connecting = 1;
729 spin_unlock(&exp->exp_lock);
731 return ldlm_init_export(exp);
734 static int filter_free_server_data(struct filter_obd *filter)
736 OBD_FREE(filter->fo_fsd, sizeof(*filter->fo_fsd));
737 filter->fo_fsd = NULL;
738 OBD_FREE(filter->fo_last_rcvd_slots, LR_MAX_CLIENTS / 8);
739 filter->fo_last_rcvd_slots = NULL;
743 /* assumes caller is already in kernel ctxt */
744 int filter_update_server_data(struct obd_device *obd, struct file *filp,
745 struct lr_server_data *fsd, int force_sync)
751 CDEBUG(D_INODE, "server uuid : %s\n", fsd->lsd_uuid);
752 CDEBUG(D_INODE, "server last_rcvd : "LPU64"\n",
753 le64_to_cpu(fsd->lsd_last_transno));
754 CDEBUG(D_INODE, "server last_mount: "LPU64"\n",
755 le64_to_cpu(fsd->lsd_mount_count));
757 fsd->lsd_compat14 = fsd->lsd_last_transno;
758 rc = fsfilt_write_record(obd, filp, fsd, sizeof(*fsd), &off,force_sync);
760 CERROR("error writing lr_server_data: rc = %d\n", rc);
765 int filter_update_last_objid(struct obd_device *obd, obd_gr group,
768 struct filter_obd *filter = &obd->u.filter;
774 if (filter->fo_last_objid_files[group] == NULL) {
775 CERROR("Object group "LPU64" not fully setup; not updating "
776 "last_objid\n", group);
780 CDEBUG(D_INODE, "%s: server last_objid for group "LPU64": "LPU64"\n",
781 obd->obd_name, group, filter->fo_last_objids[group]);
783 tmp = cpu_to_le64(filter->fo_last_objids[group]);
784 rc = fsfilt_write_record(obd, filter->fo_last_objid_files[group],
785 &tmp, sizeof(tmp), &off, force_sync);
787 CERROR("error writing group "LPU64" last objid: rc = %d\n",
792 /* assumes caller has already in kernel ctxt */
793 static int filter_init_server_data(struct obd_device *obd, struct file * filp)
795 struct filter_obd *filter = &obd->u.filter;
796 struct lr_server_data *fsd;
797 struct lsd_client_data *lcd = NULL;
798 struct inode *inode = filp->f_dentry->d_inode;
799 unsigned long last_rcvd_size = i_size_read(inode);
800 struct lustre_mount_info *lmi;
807 /* ensure padding in the struct is the correct size */
808 CLASSERT (offsetof(struct lr_server_data, lsd_padding) +
809 sizeof(fsd->lsd_padding) == LR_SERVER_SIZE);
810 CLASSERT (offsetof(struct lsd_client_data, lcd_padding) +
811 sizeof(lcd->lcd_padding) == LR_CLIENT_SIZE);
813 OBD_ALLOC(fsd, sizeof(*fsd));
816 filter->fo_fsd = fsd;
818 OBD_ALLOC(filter->fo_last_rcvd_slots, LR_MAX_CLIENTS / 8);
819 if (filter->fo_last_rcvd_slots == NULL) {
820 OBD_FREE(fsd, sizeof(*fsd));
824 if (last_rcvd_size == 0) {
825 LCONSOLE_WARN("%s: new disk, initializing\n", obd->obd_name);
827 memcpy(fsd->lsd_uuid, obd->obd_uuid.uuid,sizeof(fsd->lsd_uuid));
828 fsd->lsd_last_transno = 0;
829 mount_count = fsd->lsd_mount_count = 0;
830 fsd->lsd_server_size = cpu_to_le32(LR_SERVER_SIZE);
831 fsd->lsd_client_start = cpu_to_le32(LR_CLIENT_START);
832 fsd->lsd_client_size = cpu_to_le16(LR_CLIENT_SIZE);
833 fsd->lsd_subdir_count = cpu_to_le16(FILTER_SUBDIR_COUNT);
834 filter->fo_subdir_count = FILTER_SUBDIR_COUNT;
835 fsd->lsd_feature_incompat = cpu_to_le32(OBD_INCOMPAT_OST);
837 rc = fsfilt_read_record(obd, filp, fsd, sizeof(*fsd), &off);
839 CDEBUG(D_INODE,"OBD filter: error reading %s: rc %d\n",
843 if (strcmp(fsd->lsd_uuid, obd->obd_uuid.uuid) != 0) {
844 LCONSOLE_ERROR_MSG(0x134, "Trying to start OBD %s using"
845 " the wrong disk %s. Were the /dev/ "
846 "assignments rearranged?\n",
847 obd->obd_uuid.uuid, fsd->lsd_uuid);
848 GOTO(err_fsd, rc = -EINVAL);
850 mount_count = le64_to_cpu(fsd->lsd_mount_count);
851 filter->fo_subdir_count = le16_to_cpu(fsd->lsd_subdir_count);
853 /* Assume old last_rcvd format unless I_C_LR is set */
854 if (!(fsd->lsd_feature_incompat &
855 cpu_to_le32(OBD_INCOMPAT_COMMON_LR)))
856 fsd->lsd_last_transno = fsd->lsd_compat14;
860 if (fsd->lsd_feature_incompat & ~cpu_to_le32(FILTER_INCOMPAT_SUPP)) {
861 CERROR("%s: unsupported incompat filesystem feature(s) %x\n",
862 obd->obd_name, le32_to_cpu(fsd->lsd_feature_incompat) &
863 ~FILTER_INCOMPAT_SUPP);
864 GOTO(err_fsd, rc = -EINVAL);
866 if (fsd->lsd_feature_rocompat & ~cpu_to_le32(FILTER_ROCOMPAT_SUPP)) {
867 CERROR("%s: unsupported read-only filesystem feature(s) %x\n",
868 obd->obd_name, le32_to_cpu(fsd->lsd_feature_rocompat) &
869 ~FILTER_ROCOMPAT_SUPP);
870 /* Do something like remount filesystem read-only */
871 GOTO(err_fsd, rc = -EINVAL);
874 target_trans_table_init(obd);
875 start_epoch = le32_to_cpu(fsd->lsd_start_epoch);
877 CDEBUG(D_INODE, "%s: server start_epoch : %#x\n",
878 obd->obd_name, start_epoch);
879 CDEBUG(D_INODE, "%s: server last_transno : "LPX64"\n",
880 obd->obd_name, le64_to_cpu(fsd->lsd_last_transno));
881 CDEBUG(D_INODE, "%s: server mount_count: "LPU64"\n",
882 obd->obd_name, mount_count + 1);
883 CDEBUG(D_INODE, "%s: server data size: %u\n",
884 obd->obd_name, le32_to_cpu(fsd->lsd_server_size));
885 CDEBUG(D_INODE, "%s: per-client data start: %u\n",
886 obd->obd_name, le32_to_cpu(fsd->lsd_client_start));
887 CDEBUG(D_INODE, "%s: per-client data size: %u\n",
888 obd->obd_name, le32_to_cpu(fsd->lsd_client_size));
889 CDEBUG(D_INODE, "%s: server subdir_count: %u\n",
890 obd->obd_name, le16_to_cpu(fsd->lsd_subdir_count));
891 CDEBUG(D_INODE, "%s: last_rcvd clients: %lu\n", obd->obd_name,
892 last_rcvd_size <= le32_to_cpu(fsd->lsd_client_start) ? 0 :
893 (last_rcvd_size - le32_to_cpu(fsd->lsd_client_start)) /
894 le16_to_cpu(fsd->lsd_client_size));
896 if (!obd->obd_replayable) {
897 CWARN("%s: recovery support OFF\n", obd->obd_name);
901 for (cl_idx = 0, off = le32_to_cpu(fsd->lsd_client_start);
902 off < last_rcvd_size; cl_idx++) {
904 struct obd_export *exp;
905 struct filter_export_data *fed;
910 GOTO(err_client, rc = -ENOMEM);
913 /* Don't assume off is incremented properly by
914 * fsfilt_read_record(), in case sizeof(*lcd)
915 * isn't the same as fsd->lsd_client_size. */
916 off = le32_to_cpu(fsd->lsd_client_start) +
917 cl_idx * le16_to_cpu(fsd->lsd_client_size);
918 rc = fsfilt_read_record(obd, filp, lcd, sizeof(*lcd), &off);
920 CERROR("error reading FILT %s idx %d off %llu: rc %d\n",
921 LAST_RCVD, cl_idx, off, rc);
922 break; /* read error shouldn't cause startup to fail */
925 if (lcd->lcd_uuid[0] == '\0') {
926 CDEBUG(D_INFO, "skipping zeroed client at offset %d\n",
931 check_lcd(obd->obd_name, cl_idx, lcd);
933 last_rcvd = le64_to_cpu(lcd->lcd_last_transno);
935 /* These exports are cleaned up by filter_disconnect(), so they
936 * need to be set up like real exports as filter_connect() does.
938 exp = class_new_export(obd, (struct obd_uuid *)lcd->lcd_uuid);
939 CDEBUG(D_HA, "RCVRNG CLIENT uuid: %s idx: %d lr: "LPU64
940 " srv lr: "LPU64"\n", lcd->lcd_uuid, cl_idx,
941 last_rcvd, le64_to_cpu(fsd->lsd_last_transno));
943 if (PTR_ERR(exp) == -EALREADY) {
944 /* export already exists, zero out this one */
945 CERROR("Zeroing out duplicate export due to "
947 lcd->lcd_uuid[0] = '\0';
949 GOTO(err_client, rc = PTR_ERR(exp));
952 fed = &exp->exp_filter_data;
954 filter_export_stats_init(obd, exp, 0, NULL);
955 rc = filter_client_add(obd, exp, cl_idx);
956 /* can't fail for existing client */
957 LASSERTF(rc == 0, "rc = %d\n", rc);
959 /* VBR: set export last committed */
960 exp->exp_last_committed = last_rcvd;
961 /* read last time from disk */
962 exp->exp_last_request_time = target_trans_table_last_time(exp);
964 spin_lock(&exp->exp_lock);
965 exp->exp_replay_needed = 1;
966 exp->exp_connecting = 0;
967 exp->exp_in_recovery = 0;
968 spin_unlock(&exp->exp_lock);
970 spin_lock_bh(&obd->obd_processing_task_lock);
971 obd->obd_recoverable_clients++;
972 obd->obd_max_recoverable_clients++;
973 spin_unlock_bh(&obd->obd_processing_task_lock);
975 /* VBR: if epoch too old mark export as delayed,
976 * if epoch is zero then client is pre-vbr one */
977 if (start_epoch > le32_to_cpu(lcd->lcd_last_epoch) &&
978 le32_to_cpu(lcd->lcd_last_epoch) != 0)
979 class_set_export_delayed(exp);
982 class_export_put(exp);
985 /* Need to check last_rcvd even for duplicated exports. */
986 CDEBUG(D_OTHER, "client at idx %d has last_rcvd = "LPX64"\n",
989 if (last_rcvd > le64_to_cpu(fsd->lsd_last_transno))
990 fsd->lsd_last_transno = cpu_to_le64(last_rcvd);
996 obd->obd_last_committed = le64_to_cpu(fsd->lsd_last_transno);
998 if (obd->obd_recoverable_clients) {
999 CWARN("RECOVERY: service %s, %d recoverable clients, "
1000 "%d delayed clients, last_rcvd "LPU64"\n",
1001 obd->obd_name, obd->obd_recoverable_clients,
1002 obd->obd_delayed_clients,
1003 le64_to_cpu(fsd->lsd_last_transno));
1004 obd->obd_next_recovery_transno = obd->obd_last_committed + 1;
1005 obd->obd_recovering = 1;
1006 obd->obd_recovery_start = 0;
1007 obd->obd_recovery_end = 0;
1009 LASSERT(!obd->obd_recovering);
1010 /* VBR: update boot epoch after recovery */
1011 filter_update_last_epoch(obd);
1014 obd->obd_recovery_timeout = OBD_RECOVERY_TIME_SOFT;
1015 obd->obd_recovery_time_hard = OBD_RECOVERY_TIME_HARD;
1017 lmi = server_find_mount_locked(obd->obd_name);
1019 struct lustre_sb_info *lsi = s2lsi(lmi->lmi_sb);
1021 if (lsi->lsi_lmd && lsi->lsi_lmd->lmd_recovery_time_soft)
1022 obd->obd_recovery_timeout =
1023 lsi->lsi_lmd->lmd_recovery_time_soft;
1025 if (lsi->lsi_lmd && lsi->lsi_lmd->lmd_recovery_time_hard)
1026 obd->obd_recovery_time_hard =
1027 lsi->lsi_lmd->lmd_recovery_time_hard;
1031 filter->fo_mount_count = mount_count + 1;
1032 fsd->lsd_mount_count = cpu_to_le64(filter->fo_mount_count);
1034 /* save it, so mount count and last_transno is current */
1035 rc = filter_update_server_data(obd, filp, filter->fo_fsd, 1);
1037 GOTO(err_client, rc);
1042 class_disconnect_exports(obd);
1044 filter_free_server_data(filter);
1048 static int filter_cleanup_groups(struct obd_device *obd)
1050 struct filter_obd *filter = &obd->u.filter;
1052 struct dentry *dentry;
1056 if (filter->fo_dentry_O_groups != NULL) {
1057 for (i = 0; i < FILTER_GROUPS; i++) {
1058 dentry = filter->fo_dentry_O_groups[i];
1062 OBD_FREE(filter->fo_dentry_O_groups,
1063 FILTER_GROUPS * sizeof(*filter->fo_dentry_O_groups));
1064 filter->fo_dentry_O_groups = NULL;
1066 if (filter->fo_last_objid_files != NULL) {
1067 for (i = 0; i < FILTER_GROUPS; i++) {
1068 filp = filter->fo_last_objid_files[i];
1070 filp_close(filp, 0);
1072 OBD_FREE(filter->fo_last_objid_files,
1073 FILTER_GROUPS * sizeof(*filter->fo_last_objid_files));
1074 filter->fo_last_objid_files = NULL;
1076 if (filter->fo_dentry_O_sub != NULL) {
1077 for (i = 0; i < filter->fo_subdir_count; i++) {
1078 dentry = filter->fo_dentry_O_sub[i];
1082 OBD_FREE(filter->fo_dentry_O_sub,
1083 filter->fo_subdir_count *
1084 sizeof(*filter->fo_dentry_O_sub));
1085 filter->fo_dentry_O_sub = NULL;
1087 if (filter->fo_last_objids != NULL) {
1088 OBD_FREE(filter->fo_last_objids,
1089 FILTER_GROUPS * sizeof(*filter->fo_last_objids));
1090 filter->fo_last_objids = NULL;
1092 if (filter->fo_dentry_O != NULL) {
1093 f_dput(filter->fo_dentry_O);
1094 filter->fo_dentry_O = NULL;
1099 /* FIXME: object groups */
1100 static int filter_prep_groups(struct obd_device *obd)
1102 struct filter_obd *filter = &obd->u.filter;
1103 struct dentry *dentry, *O_dentry;
1105 int i, rc = 0, cleanup_phase = 0;
1108 O_dentry = simple_mkdir(cfs_fs_pwd(current->fs), filter->fo_vfsmnt,
1110 CDEBUG(D_INODE, "got/created O: %p\n", O_dentry);
1111 if (IS_ERR(O_dentry)) {
1112 rc = PTR_ERR(O_dentry);
1113 CERROR("cannot open/create O: rc = %d\n", rc);
1116 filter->fo_dentry_O = O_dentry;
1117 cleanup_phase = 1; /* O_dentry */
1119 OBD_ALLOC(filter->fo_last_objids, FILTER_GROUPS * sizeof(__u64));
1120 if (filter->fo_last_objids == NULL)
1121 GOTO(cleanup, rc = -ENOMEM);
1122 cleanup_phase = 2; /* groups */
1124 OBD_ALLOC(filter->fo_dentry_O_groups, FILTER_GROUPS * sizeof(dentry));
1125 if (filter->fo_dentry_O_groups == NULL)
1126 GOTO(cleanup, rc = -ENOMEM);
1127 OBD_ALLOC(filter->fo_last_objid_files, FILTER_GROUPS * sizeof(filp));
1128 if (filter->fo_last_objid_files == NULL)
1129 GOTO(cleanup, rc = -ENOMEM);
1131 for (i = 0; i < FILTER_GROUPS; i++) {
1135 sprintf(name, "%d", i);
1136 dentry = simple_mkdir(O_dentry, filter->fo_vfsmnt,
1138 CDEBUG(D_INODE, "got/created O/%s: %p\n", name, dentry);
1139 if (IS_ERR(dentry)) {
1140 rc = PTR_ERR(dentry);
1141 CERROR("cannot lookup/create O/%s: rc = %d\n",
1145 filter->fo_dentry_O_groups[i] = dentry;
1147 sprintf(name, "O/%d/LAST_ID", i);
1148 filp = filp_open(name, O_CREAT | O_RDWR, 0700);
1151 CERROR("cannot create %s: rc = %d\n", name, rc);
1154 filter->fo_last_objid_files[i] = filp;
1156 if (i_size_read(filp->f_dentry->d_inode) == 0) {
1157 filter->fo_last_objids[i] = FILTER_INIT_OBJID;
1158 rc = filter_update_last_objid(obd, i, 1);
1164 rc = fsfilt_read_record(obd, filp, &filter->fo_last_objids[i],
1165 sizeof(__u64), &off);
1167 CDEBUG(D_INODE,"OBD filter: error reading %s: rc %d\n",
1171 filter->fo_last_objids[i] =
1172 le64_to_cpu(filter->fo_last_objids[i]);
1173 CDEBUG(D_HA, "%s: server last_objid group %d: "LPU64"\n",
1174 obd->obd_name, i, filter->fo_last_objids[i]);
1177 if (filter->fo_subdir_count) {
1178 O_dentry = filter->fo_dentry_O_groups[0];
1179 OBD_ALLOC(filter->fo_dentry_O_sub,
1180 filter->fo_subdir_count * sizeof(dentry));
1181 if (filter->fo_dentry_O_sub == NULL)
1182 GOTO(cleanup, rc = -ENOMEM);
1184 for (i = 0; i < filter->fo_subdir_count; i++) {
1186 snprintf(dir, sizeof(dir), "d%u", i);
1188 dentry = simple_mkdir(O_dentry, filter->fo_vfsmnt,
1190 CDEBUG(D_INODE, "got/created O/0/%s: %p\n", dir,dentry);
1191 if (IS_ERR(dentry)) {
1192 rc = PTR_ERR(dentry);
1193 CERROR("can't lookup/create O/0/%s: rc = %d\n",
1197 filter->fo_dentry_O_sub[i] = dentry;
1203 filter_cleanup_groups(obd);
1207 /* setup the object store with correct subdirectories */
1208 static int filter_prep(struct obd_device *obd)
1210 struct lvfs_run_ctxt saved;
1211 struct filter_obd *filter = &obd->u.filter;
1213 struct inode *inode;
1217 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1218 file = filp_open(LAST_RCVD, O_RDWR | O_CREAT | O_LARGEFILE, 0700);
1219 if (!file || IS_ERR(file)) {
1221 CERROR("OBD filter: cannot open/create %s: rc = %d\n",
1225 filter->fo_rcvd_filp = file;
1226 if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1227 CERROR("%s is not a regular file!: mode = %o\n", LAST_RCVD,
1228 file->f_dentry->d_inode->i_mode);
1229 GOTO(err_filp, rc = -ENOENT);
1232 inode = file->f_dentry->d_parent->d_inode;
1233 /* We use i_op->unlink directly in filter_vfs_unlink() */
1234 if (!inode->i_op || !inode->i_op->create || !inode->i_op->unlink) {
1235 CERROR("%s: filesystem does not support create/unlink ops\n",
1237 GOTO(err_filp, rc = -EOPNOTSUPP);
1240 rc = filter_init_server_data(obd, file);
1242 CERROR("cannot read %s: rc = %d\n", LAST_RCVD, rc);
1245 /* open and create health check io file*/
1246 file = filp_open(HEALTH_CHECK, O_RDWR | O_CREAT, 0644);
1249 CERROR("OBD filter: cannot open/create %s rc = %d\n",
1253 filter->fo_obt.obt_health_check_filp = file;
1254 if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1255 CERROR("%s is not a regular file!: mode = %o\n", HEALTH_CHECK,
1256 file->f_dentry->d_inode->i_mode);
1257 GOTO(err_health_check, rc = -ENOENT);
1259 rc = lvfs_check_io_health(obd, file);
1261 GOTO(err_health_check, rc);
1263 rc = filter_prep_groups(obd);
1265 GOTO(err_server_data, rc);
1267 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1272 //class_disconnect_exports(obd, 0);
1273 filter_free_server_data(filter);
1275 if (filp_close(filter->fo_obt.obt_health_check_filp, 0))
1276 CERROR("can't close %s after error\n", HEALTH_CHECK);
1277 filter->fo_obt.obt_health_check_filp = NULL;
1279 if (filp_close(filter->fo_rcvd_filp, 0))
1280 CERROR("can't close %s after error\n", LAST_RCVD);
1281 filter->fo_rcvd_filp = NULL;
1285 /* cleanup the filter: write last used object id to status file */
1286 static void filter_post(struct obd_device *obd)
1288 struct lvfs_run_ctxt saved;
1289 struct filter_obd *filter = &obd->u.filter;
1292 /* XXX: filter_update_lastobjid used to call fsync_dev. It might be
1293 * best to start a transaction with h_sync, because we removed this
1296 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1297 rc = filter_update_server_data(obd, filter->fo_rcvd_filp,
1300 CERROR("error writing server data: rc = %d\n", rc);
1302 for (i = 0; i < FILTER_GROUPS; i++) {
1303 rc = filter_update_last_objid(obd, i, (i == FILTER_GROUPS - 1));
1305 CERROR("error writing group %d lastobjid: rc = %d\n",
1309 rc = filp_close(filter->fo_rcvd_filp, 0);
1310 filter->fo_rcvd_filp = NULL;
1312 CERROR("error closing %s: rc = %d\n", LAST_RCVD, rc);
1314 rc = filp_close(filter->fo_obt.obt_health_check_filp, 0);
1315 filter->fo_obt.obt_health_check_filp = NULL;
1317 CERROR("error closing %s: rc = %d\n", HEALTH_CHECK, rc);
1319 filter_cleanup_groups(obd);
1320 filter_free_server_data(filter);
1321 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1324 static void filter_set_last_id(struct filter_obd *filter,
1325 obd_id id, obd_gr group)
1327 LASSERT(filter->fo_fsd != NULL);
1328 LASSERT(group <= FILTER_GROUPS);
1330 spin_lock(&filter->fo_objidlock);
1331 filter->fo_last_objids[group] = id;
1332 spin_unlock(&filter->fo_objidlock);
1335 obd_id filter_last_id(struct filter_obd *filter, obd_gr group)
1338 LASSERT(filter->fo_fsd != NULL);
1339 LASSERT(group <= FILTER_GROUPS);
1340 LASSERT(filter->fo_last_objids != NULL);
1342 /* FIXME: object groups */
1343 spin_lock(&filter->fo_objidlock);
1344 id = filter->fo_last_objids[group];
1345 spin_unlock(&filter->fo_objidlock);
1350 static int filter_lock_dentry(struct obd_device *obd, struct dentry *dparent)
1352 LOCK_INODE_MUTEX(dparent->d_inode);
1356 /* We never dget the object parent, so DON'T dput it either */
1357 struct dentry *filter_parent(struct obd_device *obd, obd_gr group, obd_id objid)
1359 struct filter_obd *filter = &obd->u.filter;
1360 LASSERT(group < FILTER_GROUPS); /* FIXME: object groups */
1362 if (group > 0 || filter->fo_subdir_count == 0)
1363 return filter->fo_dentry_O_groups[group];
1365 return filter->fo_dentry_O_sub[objid & (filter->fo_subdir_count - 1)];
1368 /* We never dget the object parent, so DON'T dput it either */
1369 struct dentry *filter_parent_lock(struct obd_device *obd, obd_gr group,
1372 unsigned long now = jiffies;
1373 struct dentry *dparent = filter_parent(obd, group, objid);
1376 if (IS_ERR(dparent))
1379 rc = filter_lock_dentry(obd, dparent);
1380 fsfilt_check_slow(obd, now, "parent lock");
1381 return rc ? ERR_PTR(rc) : dparent;
1384 /* We never dget the object parent, so DON'T dput it either */
1385 static void filter_parent_unlock(struct dentry *dparent)
1387 UNLOCK_INODE_MUTEX(dparent->d_inode);
1390 /* How to get files, dentries, inodes from object id's.
1392 * If dir_dentry is passed, the caller has already locked the parent
1393 * appropriately for this operation (normally a write lock). If
1394 * dir_dentry is NULL, we do a read lock while we do the lookup to
1395 * avoid races with create/destroy and such changing the directory
1396 * internal to the filesystem code. */
1397 struct dentry *filter_fid2dentry(struct obd_device *obd,
1398 struct dentry *dir_dentry,
1399 obd_gr group, obd_id id)
1401 struct dentry *dparent = dir_dentry;
1402 struct dentry *dchild;
1407 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT) &&
1408 !obd->u.filter.fo_destroy_in_progress) {
1409 /* don't fail lookups for orphan recovery, it causes
1410 * later LBUGs when objects still exist during precreate */
1411 CDEBUG(D_INFO, "*** obd_fail_loc=%x ***\n",OBD_FAIL_OST_ENOENT);
1412 RETURN(ERR_PTR(-ENOENT));
1416 CERROR("fatal: invalid object id 0\n");
1417 RETURN(ERR_PTR(-ESTALE));
1420 len = sprintf(name, LPU64, id);
1421 if (dir_dentry == NULL) {
1422 dparent = filter_parent_lock(obd, group, id);
1423 if (IS_ERR(dparent)) {
1424 CERROR("%s: error getting object "LPU64":"LPU64
1425 " parent: rc %ld\n", obd->obd_name,
1426 id, group, PTR_ERR(dparent));
1430 CDEBUG(D_INODE, "looking up object O/%.*s/%s\n",
1431 dparent->d_name.len, dparent->d_name.name, name);
1432 dchild = /*ll_*/lookup_one_len(name, dparent, len);
1433 if (dir_dentry == NULL)
1434 filter_parent_unlock(dparent);
1435 if (IS_ERR(dchild)) {
1436 CERROR("%s: child lookup error %ld\n", obd->obd_name,
1441 if (dchild->d_inode != NULL && is_bad_inode(dchild->d_inode)) {
1442 CERROR("%s: got bad object "LPU64" inode %lu\n",
1443 obd->obd_name, id, dchild->d_inode->i_ino);
1445 RETURN(ERR_PTR(-ENOENT));
1448 CDEBUG(D_INODE, "got child objid %s: %p, count = %d\n",
1449 name, dchild, atomic_read(&dchild->d_count));
1451 LASSERT(atomic_read(&dchild->d_count) > 0);
1456 static int filter_prepare_destroy(struct obd_device *obd, obd_id objid,
1457 struct lustre_handle *lockh)
1459 int flags = LDLM_AST_DISCARD_DATA, rc;
1460 struct ldlm_res_id res_id = { .name = { objid } };
1461 ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
1464 /* Tell the clients that the object is gone now and that they should
1465 * throw away any cached pages. */
1466 rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id, LDLM_EXTENT,
1467 &policy, LCK_PW, &flags, ldlm_blocking_ast,
1468 ldlm_completion_ast, NULL, NULL, 0, NULL,
1471 if (rc != ELDLM_OK) {
1473 CERROR("%s: failed to get lock to destroy objid "LPU64" (%d)\n",
1474 obd->obd_name, objid, rc);
1479 static void filter_fini_destroy(struct obd_device *obd,
1480 struct lustre_handle *lockh)
1482 if (lustre_handle_is_used(lockh))
1483 ldlm_lock_decref(lockh, LCK_PW);
1486 /* This is vfs_unlink() without down(i_sem). If we call regular vfs_unlink()
1487 * we have 2.6 lock ordering issues with filter_commitrw_write() as it takes
1488 * i_sem before starting a handle, while filter_destroy() + vfs_unlink do the
1489 * reverse. Caller must take i_sem before starting the transaction and we
1490 * drop it here before the inode is removed from the dentry. bug 4180/6984 */
1491 int filter_vfs_unlink(struct inode *dir, struct dentry *dentry,
1492 struct vfsmount *mnt)
1497 /* don't need dir->i_zombie for 2.4, it is for rename/unlink of dir
1498 * itself we already hold dir->i_mutex for child create/unlink ops */
1499 LASSERT(dentry->d_inode != NULL);
1500 LASSERT(TRYLOCK_INODE_MUTEX(dir) == 0);
1501 LASSERT(TRYLOCK_INODE_MUTEX(dentry->d_inode) == 0);
1505 if (/*!dentry->d_inode ||*/dentry->d_parent->d_inode != dir)
1506 GOTO(out, rc = -ENOENT);
1508 rc = ll_permission(dir, MAY_WRITE | MAY_EXEC, NULL);
1513 GOTO(out, rc = -EPERM);
1515 /* check_sticky() */
1516 if ((dentry->d_inode->i_uid != current->fsuid &&
1517 !cfs_capable(CFS_CAP_FOWNER)) || IS_APPEND(dentry->d_inode) ||
1518 IS_IMMUTABLE(dentry->d_inode))
1519 GOTO(out, rc = -EPERM);
1521 /* NOTE: This might need to go outside i_mutex, though it isn't clear if
1522 * that was done because of journal_start (which is already done
1523 * here) or some other ordering issue. */
1526 rc = ll_security_inode_unlink(dir, dentry, mnt);
1529 rc = dir->i_op->unlink(dir, dentry);
1531 /* need to drop i_mutex before we lose inode reference */
1532 UNLOCK_INODE_MUTEX(dentry->d_inode);
1539 /* Caller must hold LCK_PW on parent and push us into kernel context.
1540 * Caller must hold child i_mutex, we drop it always.
1541 * Caller is also required to ensure that dchild->d_inode exists. */
1542 static int filter_destroy_internal(struct obd_device *obd, obd_id objid,
1543 obd_gr group, struct dentry *dparent,
1544 struct dentry *dchild)
1546 struct inode *inode = dchild->d_inode;
1547 struct filter_obd *filter = &obd->u.filter;
1550 if (inode->i_nlink != 1 || atomic_read(&inode->i_count) != 1) {
1551 CERROR("destroying objid %.*s ino %lu nlink %lu count %d\n",
1552 dchild->d_name.len, dchild->d_name.name, inode->i_ino,
1553 (unsigned long)inode->i_nlink,
1554 atomic_read(&inode->i_count));
1557 rc = filter_vfs_unlink(dparent->d_inode, dchild, filter->fo_vfsmnt);
1559 CERROR("error unlinking objid %.*s: rc %d\n",
1560 dchild->d_name.len, dchild->d_name.name, rc);
1564 struct filter_intent_args {
1565 struct ldlm_lock **victim;
1570 static enum interval_iter filter_intent_cb(struct interval_node *n,
1573 struct ldlm_interval *node = (struct ldlm_interval *)n;
1574 struct filter_intent_args *arg = (struct filter_intent_args*)args;
1575 __u64 size = arg->size;
1576 struct ldlm_lock **v = arg->victim;
1577 struct ldlm_lock *lck;
1579 /* If the interval is lower than the current file size,
1581 if (interval_high(n) <= size)
1582 return INTERVAL_ITER_STOP;
1584 list_for_each_entry(lck, &node->li_group, l_sl_policy) {
1585 /* Don't send glimpse ASTs to liblustre clients.
1586 * They aren't listening for them, and they do
1587 * entirely synchronous I/O anyways. */
1588 if (lck->l_export == NULL ||
1589 lck->l_export->exp_libclient == 1)
1592 if (*arg->liblustre)
1593 *arg->liblustre = 0;
1596 *v = LDLM_LOCK_GET(lck);
1597 } else if ((*v)->l_policy_data.l_extent.start <
1598 lck->l_policy_data.l_extent.start) {
1600 *v = LDLM_LOCK_GET(lck);
1603 /* the same policy group - every lock has the
1604 * same extent, so needn't do it any more */
1608 return INTERVAL_ITER_CONT;
1611 static int filter_intent_policy(struct ldlm_namespace *ns,
1612 struct ldlm_lock **lockp, void *req_cookie,
1613 ldlm_mode_t mode, int flags, void *data)
1615 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
1616 struct ptlrpc_request *req = req_cookie;
1617 struct ldlm_lock *lock = *lockp, *l = NULL;
1618 struct ldlm_resource *res = lock->l_resource;
1619 ldlm_processing_policy policy;
1620 struct ost_lvb *res_lvb, *reply_lvb;
1621 struct ldlm_reply *rep;
1623 int idx, rc, tmpflags = 0, only_liblustre = 1;
1624 struct ldlm_interval_tree *tree;
1625 struct filter_intent_args arg;
1626 __u32 repsize[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
1627 [DLM_LOCKREPLY_OFF] = sizeof(*rep),
1628 [DLM_REPLY_REC_OFF] = sizeof(*reply_lvb) };
1631 policy = ldlm_get_processing_policy(res);
1632 LASSERT(policy != NULL);
1633 LASSERT(req != NULL);
1635 rc = lustre_pack_reply(req, 3, repsize, NULL);
1637 RETURN(req->rq_status = rc);
1639 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF, sizeof(*rep));
1640 LASSERT(rep != NULL);
1642 reply_lvb = lustre_msg_buf(req->rq_repmsg, DLM_REPLY_REC_OFF,
1643 sizeof(*reply_lvb));
1644 LASSERT(reply_lvb != NULL);
1646 //fixup_handle_for_resent_req(req, lock, &lockh);
1648 /* Call the extent policy function to see if our request can be
1649 * granted, or is blocked.
1650 * If the OST lock has LDLM_FL_HAS_INTENT set, it means a glimpse
1651 * lock, and should not be granted if the lock will be blocked.
1654 LASSERT(ns == res->lr_namespace);
1656 rc = policy(lock, &tmpflags, 0, &err, &rpc_list);
1657 check_res_locked(res);
1659 /* FIXME: we should change the policy function slightly, to not make
1660 * this list at all, since we just turn around and free it */
1661 while (!list_empty(&rpc_list)) {
1662 struct ldlm_lock *wlock =
1663 list_entry(rpc_list.next, struct ldlm_lock, l_cp_ast);
1664 LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
1665 LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1666 lock->l_flags &= ~LDLM_FL_CP_REQD;
1667 list_del_init(&wlock->l_cp_ast);
1668 LDLM_LOCK_PUT(wlock);
1671 /* The lock met with no resistance; we're finished. */
1672 if (rc == LDLM_ITER_CONTINUE) {
1673 /* do not grant locks to the liblustre clients: they cannot
1674 * handle ASTs robustly. We need to do this while still
1675 * holding ns_lock to avoid the lock remaining on the res_link
1676 * list (and potentially being added to l_pending_list by an
1677 * AST) when we are going to drop this lock ASAP. */
1678 if (lock->l_export->exp_libclient ||
1679 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2)) {
1680 ldlm_resource_unlink_lock(lock);
1681 err = ELDLM_LOCK_ABORTED;
1683 err = ELDLM_LOCK_REPLACED;
1689 /* Do not grant any lock, but instead send GL callbacks. The extent
1690 * policy nicely created a list of all PW locks for us. We will choose
1691 * the highest of those which are larger than the size in the LVB, if
1692 * any, and perform a glimpse callback. */
1693 res_lvb = res->lr_lvb_data;
1694 LASSERT(res_lvb != NULL);
1695 *reply_lvb = *res_lvb;
1698 * ->ns_lock guarantees that no new locks are granted, and,
1699 * therefore, that res->lr_lvb_data cannot increase beyond the
1700 * end of already granted lock. As a result, it is safe to
1701 * check against "stale" reply_lvb->lvb_size value without
1704 arg.size = reply_lvb->lvb_size;
1706 arg.liblustre = &only_liblustre;
1707 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1708 tree = &res->lr_itree[idx];
1709 if (tree->lit_mode == LCK_PR)
1712 interval_iterate_reverse(tree->lit_root,
1713 filter_intent_cb, &arg);
1717 /* There were no PW locks beyond the size in the LVB; finished. */
1719 if (only_liblustre) {
1720 /* If we discovered a liblustre client with a PW lock,
1721 * however, the LVB may be out of date! The LVB is
1722 * updated only on glimpse (which we don't do for
1723 * liblustre clients) and cancel (which the client
1724 * obviously has not yet done). So if it has written
1725 * data but kept the lock, the LVB is stale and needs
1726 * to be updated from disk.
1728 * Of course, this will all disappear when we switch to
1729 * taking liblustre locks on the OST. */
1730 ldlm_res_lvbo_update(res, NULL, 0, 1);
1732 RETURN(ELDLM_LOCK_ABORTED);
1736 * This check is for lock taken in filter_prepare_destroy() that does
1737 * not have l_glimpse_ast set. So the logic is: if there is a lock
1738 * with no l_glimpse_ast set, this object is being destroyed already.
1740 * Hence, if you are grabbing DLM locks on the server, always set
1741 * non-NULL glimpse_ast (e.g., ldlm_request.c:ldlm_glimpse_ast()).
1743 if (l->l_glimpse_ast == NULL) {
1744 /* We are racing with unlink(); just return -ENOENT */
1745 rep->lock_policy_res1 = -ENOENT;
1749 LASSERTF(l->l_glimpse_ast != NULL, "l == %p", l);
1750 rc = l->l_glimpse_ast(l, NULL); /* this will update the LVB */
1751 /* Update the LVB from disk if the AST failed (this is a legal race) */
1753 * XXX nikita: situation when ldlm_server_glimpse_ast() failed before
1754 * sending ast is not handled. This can result in lost client writes.
1757 ldlm_res_lvbo_update(res, NULL, 0, 1);
1760 *reply_lvb = *res_lvb;
1766 RETURN(ELDLM_LOCK_ABORTED);
1770 * per-obd_device iobuf pool.
1772 * To avoid memory deadlocks in low-memory setups, amount of dynamic
1773 * allocations in write-path has to be minimized (see bug 5137).
1775 * Pages, niobuf_local's and niobuf_remote's are pre-allocated and attached to
1776 * OST threads (see ost_thread_{init,done}()).
1778 * "iobuf's" used by filter cannot be attached to OST thread, however, because
1779 * at the OST layer there are only (potentially) multiple obd_device of type
1780 * unknown at the time of OST thread creation.
1782 * Instead array of iobuf's is attached to struct filter_obd (->fo_iobuf_pool
1783 * field). This array has size OST_MAX_THREADS, so that each OST thread uses
1784 * it's very own iobuf.
1788 * filter_kiobuf_pool_init()
1790 * filter_kiobuf_pool_done()
1792 * filter_iobuf_get()
1794 * operate on this array. They are "generic" in a sense that they don't depend
1795 * on actual type of iobuf's (the latter depending on Linux kernel version).
1799 * destroy pool created by filter_iobuf_pool_init
1801 static void filter_iobuf_pool_done(struct filter_obd *filter)
1803 struct filter_iobuf **pool;
1808 pool = filter->fo_iobuf_pool;
1810 for (i = 0; i < filter->fo_iobuf_count; ++ i) {
1811 if (pool[i] != NULL)
1812 filter_free_iobuf(pool[i]);
1814 OBD_FREE(pool, filter->fo_iobuf_count * sizeof pool[0]);
1815 filter->fo_iobuf_pool = NULL;
1821 * pre-allocate pool of iobuf's to be used by filter_{prep,commit}rw_write().
1823 static int filter_iobuf_pool_init(struct filter_obd *filter)
1829 OBD_ALLOC_GFP(filter->fo_iobuf_pool, OSS_THREADS_MAX * sizeof(*pool),
1831 if (filter->fo_iobuf_pool == NULL)
1834 filter->fo_iobuf_count = OSS_THREADS_MAX;
1839 /* Return iobuf allocated for @thread_id. We don't know in advance how
1840 * many threads there will be so we allocate a large empty array and only
1841 * fill in those slots that are actually in use.
1842 * If we haven't allocated a pool entry for this thread before, do so now. */
1843 void *filter_iobuf_get(struct filter_obd *filter, struct obd_trans_info *oti)
1845 int thread_id = (oti && oti->oti_thread) ?
1846 oti->oti_thread->t_id : -1;
1847 struct filter_iobuf *pool = NULL;
1848 struct filter_iobuf **pool_place = NULL;
1850 if (thread_id >= 0) {
1851 LASSERT(thread_id < filter->fo_iobuf_count);
1852 pool = *(pool_place = &filter->fo_iobuf_pool[thread_id]);
1855 if (unlikely(pool == NULL)) {
1856 pool = filter_alloc_iobuf(filter, OBD_BRW_WRITE,
1857 PTLRPC_MAX_BRW_PAGES);
1858 if (pool_place != NULL)
1865 /* mount the file system (secretly). lustre_cfg parameters are:
1868 * 3 = flags: failover=f, failout=n
1871 int filter_common_setup(struct obd_device *obd, obd_count len, void *buf,
1874 struct lustre_cfg* lcfg = buf;
1875 struct filter_obd *filter = &obd->u.filter;
1876 struct vfsmount *mnt;
1877 struct lustre_mount_info *lmi;
1878 struct obd_uuid uuid;
1882 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
1883 struct request_queue *q;
1888 if (lcfg->lcfg_bufcount < 3 ||
1889 LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
1890 LUSTRE_CFG_BUFLEN(lcfg, 2) < 1)
1893 lmi = server_get_mount(obd->obd_name);
1895 /* We already mounted in lustre_fill_super.
1896 lcfg bufs 1, 2, 4 (device, fstype, mount opts) are ignored.*/
1897 struct lustre_sb_info *lsi = s2lsi(lmi->lmi_sb);
1899 obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
1901 /* old path - used by lctl */
1902 CERROR("Using old MDS mount method\n");
1903 mnt = ll_kern_mount(lustre_cfg_string(lcfg, 2),
1904 MS_NOATIME|MS_NODIRATIME,
1905 lustre_cfg_string(lcfg, 1), option);
1908 LCONSOLE_ERROR_MSG(0x135, "Can't mount disk %s (%d)\n",
1909 lustre_cfg_string(lcfg, 1), rc);
1913 obd->obd_fsops = fsfilt_get_ops(lustre_cfg_string(lcfg, 2));
1915 if (IS_ERR(obd->obd_fsops))
1916 GOTO(err_mntput, rc = PTR_ERR(obd->obd_fsops));
1918 rc = filter_iobuf_pool_init(filter);
1922 if (lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb))) {
1923 CERROR("%s: Underlying device is marked as read-only. "
1924 "Setup failed\n", obd->obd_name);
1925 GOTO(err_ops, rc = -EROFS);
1928 /* failover is the default */
1929 obd->obd_replayable = 1;
1931 if (lcfg->lcfg_bufcount > 3 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
1932 str = lustre_cfg_string(lcfg, 3);
1933 if (strchr(str, 'n')) {
1934 CWARN("%s: recovery disabled\n", obd->obd_name);
1935 obd->obd_replayable = 0;
1939 filter->fo_vfsmnt = mnt;
1940 obd->u.obt.obt_sb = mnt->mnt_sb;
1941 obd->u.obt.obt_stale_export_age = STALE_EXPORT_MAXTIME_DEFAULT;
1942 spin_lock_init(&obd->u.obt.obt_trans_table_lock);
1944 filter->fo_fstype = mnt->mnt_sb->s_type->name;
1945 CDEBUG(D_SUPER, "%s: mnt = %p\n", filter->fo_fstype, mnt);
1947 rc = fsfilt_setup(obd, obd->u.obt.obt_sb);
1951 OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
1952 obd->obd_lvfs_ctxt.pwdmnt = mnt;
1953 obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
1954 obd->obd_lvfs_ctxt.fs = get_ds();
1955 obd->obd_lvfs_ctxt.cb_ops = filter_lvfs_ops;
1957 filter->fo_destroy_in_progress = 0;
1958 sema_init(&filter->fo_create_lock, 1);
1959 spin_lock_init(&filter->fo_translock);
1960 spin_lock_init(&filter->fo_objidlock);
1961 INIT_LIST_HEAD(&filter->fo_export_list);
1962 sema_init(&filter->fo_alloc_lock, 1);
1963 init_brw_stats(&filter->fo_filter_stats);
1964 filter->fo_read_cache = 1; /* enable read-only cache by default */
1965 filter->fo_writethrough_cache = 1; /* enable writethrough cache */
1966 filter->fo_readcache_max_filesize = FILTER_MAX_CACHE_SIZE;
1967 filter->fo_fmd_max_num = FILTER_FMD_MAX_NUM_DEFAULT;
1968 filter->fo_fmd_max_age = FILTER_FMD_MAX_AGE_DEFAULT;
1969 filter->fo_syncjournal = 1; /* Sync journals on i/o by default b=19128 */
1971 rc = filter_prep(obd);
1975 sprintf(ns_name, "filter-%s", obd->obd_uuid.uuid);
1976 obd->obd_namespace = ldlm_namespace_new(obd, ns_name, LDLM_NAMESPACE_SERVER,
1977 LDLM_NAMESPACE_GREEDY);
1978 if (obd->obd_namespace == NULL)
1979 GOTO(err_post, rc = -ENOMEM);
1980 obd->obd_namespace->ns_lvbp = obd;
1981 obd->obd_namespace->ns_lvbo = &filter_lvbo;
1982 ldlm_register_intent(obd->obd_namespace, filter_intent_policy);
1984 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1985 "filter_ldlm_cb_client", &obd->obd_ldlm_client);
1987 rc = obd_llog_init(obd, obd, NULL);
1989 CERROR("failed to setup llogging subsystems\n");
1993 rc = lquota_setup(filter_quota_interface_ref, obd);
1997 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
1998 q = bdev_get_queue(mnt->mnt_sb->s_bdev);
1999 if (q->max_sectors < q->max_hw_sectors &&
2000 q->max_sectors < PTLRPC_MAX_BRW_SIZE >> 9)
2001 LCONSOLE_INFO("%s: underlying device %s should be tuned "
2002 "for larger I/O requests: max_sectors = %u "
2003 "could be up to max_hw_sectors=%u\n",
2004 obd->obd_name, mnt->mnt_sb->s_id,
2005 q->max_sectors, q->max_hw_sectors);
2008 uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
2009 if (uuid_ptr != NULL) {
2010 class_uuid_unparse(uuid_ptr, &uuid);
2016 label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
2018 if (obd->obd_recovering) {
2019 LCONSOLE_WARN("OST %s now serving %s (%s%s%s), but will be in "
2020 "recovery for at least %d:%.02d, or until %d "
2021 "client%s reconnect%s.\n",
2022 obd->obd_name, lustre_cfg_string(lcfg, 1),
2023 label ?: "", label ? "/" : "", str,
2024 obd->obd_recovery_timeout / 60,
2025 obd->obd_recovery_timeout % 60,
2026 obd->obd_recoverable_clients,
2027 obd->obd_recoverable_clients == 1 ? "":"s",
2028 obd->obd_recoverable_clients == 1 ? "s":"");
2030 LCONSOLE_INFO("OST %s now serving %s (%s%s%s) with recovery "
2031 "%s\n", obd->obd_name, lustre_cfg_string(lcfg, 1),
2032 label ?: "", label ? "/" : "", str,
2033 obd->obd_replayable ? "enabled" : "disabled");
2041 fsfilt_put_ops(obd->obd_fsops);
2042 filter_iobuf_pool_done(filter);
2044 server_put_mount(obd->obd_name, mnt);
2045 obd->u.obt.obt_sb = 0;
2049 static int filter_setup(struct obd_device *obd, obd_count len, void *buf)
2051 struct lprocfs_static_vars lvars;
2052 struct lustre_cfg* lcfg = buf;
2057 CLASSERT(offsetof(struct obd_device, u.obt) ==
2058 offsetof(struct obd_device, u.filter.fo_obt));
2060 if (!LUSTRE_CFG_BUFLEN(lcfg, 1) || !LUSTRE_CFG_BUFLEN(lcfg, 2))
2063 /* 2.6.9 selinux wants a full option page for do_kern_mount (bug6471) */
2064 OBD_PAGE_ALLOC(page, CFS_ALLOC_STD);
2067 addr = (unsigned long)cfs_page_address(page);
2068 clear_page((void *)addr);
2070 /* lprocfs must be setup before the filter so state can be safely added
2071 * to /proc incrementally as the filter is setup */
2072 lprocfs_filter_init_vars(&lvars);
2073 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0 &&
2074 lprocfs_alloc_obd_stats(obd, LPROC_FILTER_LAST) == 0) {
2075 /* Init obdfilter private stats here */
2076 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_READ_BYTES,
2077 LPROCFS_CNTR_AVGMINMAX,
2078 "read_bytes", "bytes");
2079 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
2080 LPROCFS_CNTR_AVGMINMAX,
2081 "write_bytes", "bytes");
2082 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_GET_PAGE,
2083 LPROCFS_CNTR_AVGMINMAX|LPROCFS_CNTR_STDDEV,
2084 "get_page", "usec");
2085 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_NO_PAGE,
2086 LPROCFS_CNTR_AVGMINMAX,
2087 "get_page failures", "num");
2088 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_ACCESS,
2089 LPROCFS_CNTR_AVGMINMAX,
2090 "cache_access", "pages");
2091 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_HIT,
2092 LPROCFS_CNTR_AVGMINMAX,
2093 "cache_hit", "pages");
2094 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_MISS,
2095 LPROCFS_CNTR_AVGMINMAX,
2096 "cache_miss", "pages");
2097 lproc_filter_attach_seqstat(obd);
2098 #ifdef HAVE_DELAYED_RECOVERY
2099 lprocfs_obd_attach_stale_exports(obd);
2101 obd->obd_proc_exports_entry = proc_mkdir("exports",
2102 obd->obd_proc_entry);
2104 if (obd->obd_proc_exports_entry)
2105 lprocfs_add_simple(obd->obd_proc_exports_entry, "clear",
2106 lprocfs_nid_stats_clear_read,
2107 lprocfs_nid_stats_clear_write, obd, NULL);
2109 memcpy((void *)addr, lustre_cfg_buf(lcfg, 4),
2110 LUSTRE_CFG_BUFLEN(lcfg, 4));
2111 rc = filter_common_setup(obd, len, buf, (void *)addr);
2112 OBD_PAGE_FREE(page);
2115 remove_proc_entry("clear", obd->obd_proc_exports_entry);
2116 lprocfs_free_per_client_stats(obd);
2117 lprocfs_free_obd_stats(obd);
2118 lprocfs_obd_cleanup(obd);
2124 static struct llog_operations filter_mds_ost_repl_logops /* initialized below*/;
2125 static struct llog_operations filter_size_orig_logops = {
2126 lop_setup: llog_obd_origin_setup,
2127 lop_cleanup: llog_obd_origin_cleanup,
2128 lop_add: llog_obd_origin_add
2131 static int filter_llog_init(struct obd_device *obd, struct obd_device *disk_obd,
2134 struct filter_obd *filter = &obd->u.filter;
2135 struct llog_ctxt *ctxt;
2139 filter->fo_lcm = llog_recov_thread_init(obd->obd_name);
2140 if (!filter->fo_lcm)
2143 filter_mds_ost_repl_logops = llog_client_ops;
2144 filter_mds_ost_repl_logops.lop_cancel = llog_obd_repl_cancel;
2145 filter_mds_ost_repl_logops.lop_connect = llog_obd_repl_connect;
2146 filter_mds_ost_repl_logops.lop_sync = llog_obd_repl_sync;
2148 rc = llog_setup(obd, LLOG_MDS_OST_REPL_CTXT, disk_obd, 0, NULL,
2149 &filter_mds_ost_repl_logops);
2151 GOTO(cleanup_lcm, rc);
2153 /* FIXME - assign unlink_cb for filter's recovery */
2154 ctxt = llog_get_context(obd, LLOG_MDS_OST_REPL_CTXT);
2155 ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
2156 ctxt->loc_lcm = filter->fo_lcm;
2157 llog_ctxt_put(ctxt);
2159 rc = llog_setup(obd, LLOG_SIZE_ORIG_CTXT, disk_obd, 0, NULL,
2160 &filter_size_orig_logops);
2162 GOTO(cleanup_ctxt, rc);
2165 ctxt = llog_get_context(obd, LLOG_MDS_OST_REPL_CTXT);
2169 llog_recov_thread_fini(filter->fo_lcm, 1);
2170 filter->fo_lcm = NULL;
2174 static int filter_llog_finish(struct obd_device *obd, int count)
2176 struct filter_obd *filter = &obd->u.filter;
2177 struct llog_ctxt *ctxt;
2178 int rc = 0, rc2 = 0;
2181 ctxt = llog_get_context(obd, LLOG_MDS_OST_REPL_CTXT);
2184 * Make sure that no cached llcds left in recov_thread. We
2185 * actually do sync in disconnect time, but disconnect may
2186 * not come being marked rq_no_resend = 1.
2188 llog_sync(ctxt, NULL);
2191 * Balance class_import_get() called in llog_receptor_accept().
2192 * This is safe to do here, as llog is already synchronized and
2193 * its import may go.
2195 mutex_down(&ctxt->loc_sem);
2196 if (ctxt->loc_imp) {
2197 class_import_put(ctxt->loc_imp);
2198 ctxt->loc_imp = NULL;
2200 mutex_up(&ctxt->loc_sem);
2203 if (filter->fo_lcm) {
2204 llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
2205 filter->fo_lcm = NULL;
2209 rc = llog_cleanup(ctxt);
2211 ctxt = llog_get_context(obd, LLOG_SIZE_ORIG_CTXT);
2213 rc2 = llog_cleanup(ctxt);
2220 static int filter_precleanup(struct obd_device *obd,
2221 enum obd_cleanup_stage stage)
2227 case OBD_CLEANUP_EARLY:
2229 case OBD_CLEANUP_EXPORTS:
2230 target_cleanup_recovery(obd);
2231 rc = filter_llog_finish(obd, 0);
2233 case OBD_CLEANUP_SELF_EXP:
2235 case OBD_CLEANUP_OBD:
2241 static int filter_cleanup(struct obd_device *obd)
2243 struct filter_obd *filter = &obd->u.filter;
2247 LCONSOLE_WARN("%s: shutting down for failover; client state "
2248 "will be preserved.\n", obd->obd_name);
2250 if (!list_empty(&obd->obd_exports)) {
2251 CERROR("%s: still has clients!\n", obd->obd_name);
2252 class_disconnect_exports(obd);
2253 if (!list_empty(&obd->obd_exports)) {
2254 CERROR("still has exports after forced cleanup?\n");
2259 /* some exports may still be in the zombie queue, so we make sure that
2260 * all the exports have been processed, otherwise the last_rcvd slot
2261 * may not be updated on time */
2262 obd_zombie_barrier();
2264 remove_proc_entry("clear", obd->obd_proc_exports_entry);
2265 lprocfs_free_per_client_stats(obd);
2266 lprocfs_free_obd_stats(obd);
2267 lprocfs_obd_cleanup(obd);
2269 lquota_cleanup(filter_quota_interface_ref, obd);
2271 ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
2272 obd->obd_namespace = NULL;
2274 if (obd->u.obt.obt_sb == NULL)
2279 LL_DQUOT_OFF(obd->u.obt.obt_sb, 0);
2280 shrink_dcache_sb(obd->u.obt.obt_sb);
2282 server_put_mount(obd->obd_name, filter->fo_vfsmnt);
2283 obd->u.obt.obt_sb = NULL;
2285 fsfilt_put_ops(obd->obd_fsops);
2287 filter_iobuf_pool_done(filter);
2289 LCONSOLE_INFO("OST %s has stopped.\n", obd->obd_name);
2294 static int filter_connect_internal(struct obd_export *exp,
2295 struct obd_connect_data *data)
2300 CDEBUG(D_RPCTRACE, "%s: cli %s/%p ocd_connect_flags: "LPX64
2301 " ocd_version: %x ocd_grant: %d ocd_index: %u\n",
2302 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
2303 data->ocd_connect_flags, data->ocd_version,
2304 data->ocd_grant, data->ocd_index);
2306 data->ocd_connect_flags &= OST_CONNECT_SUPPORTED;
2307 exp->exp_connect_flags = data->ocd_connect_flags;
2308 data->ocd_version = LUSTRE_VERSION_CODE;
2310 /* Kindly make sure the SKIP_ORPHAN flag is from MDS. */
2311 if (!ergo(data->ocd_connect_flags & OBD_CONNECT_SKIP_ORPHAN,
2312 data->ocd_connect_flags & OBD_CONNECT_MDS))
2315 if (exp->exp_connect_flags & OBD_CONNECT_GRANT) {
2316 struct filter_obd *filter = &exp->exp_obd->u.filter;
2317 struct filter_export_data *fed = &exp->exp_filter_data;
2318 obd_size left, want;
2320 spin_lock(&exp->exp_obd->obd_osfs_lock);
2321 left = filter_grant_space_left(exp);
2322 want = data->ocd_grant;
2323 filter_grant(exp, fed->fed_grant, want, left);
2324 data->ocd_grant = fed->fed_grant;
2325 spin_unlock(&exp->exp_obd->obd_osfs_lock);
2327 CDEBUG(D_CACHE, "%s: cli %s/%p ocd_grant: %d want: "
2328 LPU64" left: "LPU64"\n", exp->exp_obd->obd_name,
2329 exp->exp_client_uuid.uuid, exp,
2330 data->ocd_grant, want, left);
2332 filter->fo_tot_granted_clients ++;
2335 if (data->ocd_connect_flags & OBD_CONNECT_INDEX) {
2336 struct filter_obd *filter = &exp->exp_obd->u.filter;
2337 struct lr_server_data *lsd = filter->fo_fsd;
2338 int index = le32_to_cpu(lsd->lsd_ost_index);
2340 if (!(lsd->lsd_feature_compat &
2341 cpu_to_le32(OBD_COMPAT_OST))) {
2342 /* this will only happen on the first connect */
2343 lsd->lsd_ost_index = cpu_to_le32(data->ocd_index);
2344 lsd->lsd_feature_compat |= cpu_to_le32(OBD_COMPAT_OST);
2345 filter_update_server_data(exp->exp_obd,
2346 filter->fo_rcvd_filp, lsd, 1);
2347 } else if (index != data->ocd_index) {
2348 LCONSOLE_ERROR_MSG(0x136, "Connection from %s to index "
2349 "%u doesn't match actual OST index "
2350 "%u in last_rcvd file, bad "
2352 obd_export_nid2str(exp), index,
2358 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_SIZE)) {
2359 data->ocd_brw_size = 65536;
2360 } else if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) {
2361 data->ocd_brw_size = min(data->ocd_brw_size,
2362 (__u32)(PTLRPC_MAX_BRW_PAGES <<
2364 LASSERT(data->ocd_brw_size);
2367 if (data->ocd_connect_flags & OBD_CONNECT_CKSUM) {
2368 __u32 cksum_types = data->ocd_cksum_types;
2370 /* The client set in ocd_cksum_types the checksum types it
2371 * supports. We have to mask off the algorithms that we don't
2373 if (cksum_types & OBD_CKSUM_ALL)
2374 data->ocd_cksum_types &= OBD_CKSUM_ALL;
2376 data->ocd_cksum_types = OBD_CKSUM_CRC32;
2378 CDEBUG(D_RPCTRACE, "%s: cli %s supports cksum type %x, return "
2379 "%x\n", exp->exp_obd->obd_name,
2380 obd_export_nid2str(exp), cksum_types,
2381 data->ocd_cksum_types);
2383 /* This client does not support OBD_CONNECT_CKSUM
2384 * fall back to CRC32 */
2385 CDEBUG(D_RPCTRACE, "%s: cli %s does not support "
2386 "OBD_CONNECT_CKSUM, CRC32 will be used\n",
2387 exp->exp_obd->obd_name,
2388 obd_export_nid2str(exp));
2391 /* FIXME: Do the same with the MDS UUID and fsd_peeruuid.
2392 * FIXME: We don't strictly need the COMPAT flag for that,
2393 * FIXME: as fsd_peeruuid[0] will tell us if that is set.
2394 * FIXME: We needed it for the index, as index 0 is valid. */
2399 static int filter_reconnect(struct obd_export *exp, struct obd_device *obd,
2400 struct obd_uuid *cluuid,
2401 struct obd_connect_data *data,
2407 if (exp == NULL || obd == NULL || cluuid == NULL)
2410 rc = filter_connect_internal(exp, data);
2412 filter_export_stats_init(obd, exp, 1, localdata);
2417 /* nearly identical to mds_connect */
2418 static int filter_connect(struct lustre_handle *conn, struct obd_device *obd,
2419 struct obd_uuid *cluuid,
2420 struct obd_connect_data *data,
2423 struct obd_export *exp;
2424 struct filter_export_data *fed;
2425 struct lsd_client_data *lcd = NULL;
2429 if (conn == NULL || obd == NULL || cluuid == NULL)
2432 /* Check for aborted recovery. */
2433 target_recovery_check_and_stop(obd);
2435 rc = class_connect(conn, obd, cluuid);
2438 exp = class_conn2export(conn);
2439 LASSERT(exp != NULL);
2441 fed = &exp->exp_filter_data;
2443 rc = filter_connect_internal(exp, data);
2447 filter_export_stats_init(obd, exp, 0, localdata);
2449 if (!obd->obd_replayable)
2450 GOTO(cleanup, rc = 0);
2454 CERROR("filter: out of memory for client data\n");
2455 GOTO(cleanup, rc = -ENOMEM);
2458 memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
2461 rc = filter_client_add(obd, exp, -1);
2469 fed->fed_lcd = NULL;
2471 class_disconnect(exp);
2472 lprocfs_exp_cleanup(exp);
2474 class_export_put(exp);
2480 /* Do extra sanity checks for grant accounting. We do this at connect,
2481 * disconnect, and statfs RPC time, so it shouldn't be too bad. We can
2482 * always get rid of it or turn it off when we know accounting is good. */
2483 static void filter_grant_sanity_check(struct obd_device *obd, const char *func)
2485 struct filter_export_data *fed;
2486 struct obd_export *exp;
2487 obd_size maxsize = obd->obd_osfs.os_blocks * obd->obd_osfs.os_bsize;
2488 obd_size tot_dirty = 0, tot_pending = 0, tot_granted = 0;
2489 obd_size fo_tot_dirty, fo_tot_pending, fo_tot_granted;
2491 if (list_empty(&obd->obd_exports))
2494 /* We don't want to do this for large machines that do lots of
2495 mounts or unmounts. It burns... */
2496 if (obd->obd_num_exports > 100)
2499 spin_lock(&obd->obd_osfs_lock);
2500 spin_lock(&obd->obd_dev_lock);
2501 list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2503 fed = &exp->exp_filter_data;
2504 if (fed->fed_grant < 0 || fed->fed_pending < 0 ||
2507 if (maxsize > 0) { /* we may not have done a statfs yet */
2508 LASSERTF(fed->fed_grant + fed->fed_pending <= maxsize,
2509 "%s: cli %s/%p %ld+%ld > "LPU64"\n", func,
2510 exp->exp_client_uuid.uuid, exp,
2511 fed->fed_grant, fed->fed_pending, maxsize);
2512 LASSERTF(fed->fed_dirty <= maxsize,
2513 "%s: cli %s/%p %ld > "LPU64"\n", func,
2514 exp->exp_client_uuid.uuid, exp,
2515 fed->fed_dirty, maxsize);
2518 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
2519 obd->obd_name, exp->exp_client_uuid.uuid, exp,
2520 fed->fed_dirty, fed->fed_pending,fed->fed_grant);
2522 CDEBUG(D_CACHE, "%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
2523 obd->obd_name, exp->exp_client_uuid.uuid, exp,
2524 fed->fed_dirty, fed->fed_pending,fed->fed_grant);
2525 tot_granted += fed->fed_grant + fed->fed_pending;
2526 tot_pending += fed->fed_pending;
2527 tot_dirty += fed->fed_dirty;
2529 fo_tot_granted = obd->u.filter.fo_tot_granted;
2530 fo_tot_pending = obd->u.filter.fo_tot_pending;
2531 fo_tot_dirty = obd->u.filter.fo_tot_dirty;
2532 spin_unlock(&obd->obd_dev_lock);
2533 spin_unlock(&obd->obd_osfs_lock);
2535 /* Do these assertions outside the spinlocks so we don't kill system */
2536 if (tot_granted != fo_tot_granted)
2537 CERROR("%s: tot_granted "LPU64" != fo_tot_granted "LPU64"\n",
2538 func, tot_granted, fo_tot_granted);
2539 if (tot_pending != fo_tot_pending)
2540 CERROR("%s: tot_pending "LPU64" != fo_tot_pending "LPU64"\n",
2541 func, tot_pending, fo_tot_pending);
2542 if (tot_dirty != fo_tot_dirty)
2543 CERROR("%s: tot_dirty "LPU64" != fo_tot_dirty "LPU64"\n",
2544 func, tot_dirty, fo_tot_dirty);
2545 if (tot_pending > tot_granted)
2546 CERROR("%s: tot_pending "LPU64" > tot_granted "LPU64"\n",
2547 func, tot_pending, tot_granted);
2548 if (tot_granted > maxsize)
2549 CERROR("%s: tot_granted "LPU64" > maxsize "LPU64"\n",
2550 func, tot_granted, maxsize);
2551 if (tot_dirty > maxsize)
2552 CERROR("%s: tot_dirty "LPU64" > maxsize "LPU64"\n",
2553 func, tot_dirty, maxsize);
2556 /* Remove this client from the grant accounting totals. We also remove
2557 * the export from the obd device under the osfs and dev locks to ensure
2558 * that the filter_grant_sanity_check() calculations are always valid.
2559 * The client should do something similar when it invalidates its import. */
2560 static void filter_grant_discard(struct obd_export *exp)
2562 struct obd_device *obd = exp->exp_obd;
2563 struct filter_obd *filter = &obd->u.filter;
2564 struct filter_export_data *fed = &exp->exp_filter_data;
2566 spin_lock(&obd->obd_osfs_lock);
2567 spin_lock(&obd->obd_dev_lock);
2568 list_del_init(&exp->exp_obd_chain);
2569 spin_unlock(&obd->obd_dev_lock);
2571 LASSERTF(filter->fo_tot_granted >= fed->fed_grant,
2572 "%s: tot_granted "LPU64" cli %s/%p fed_grant %ld\n",
2573 obd->obd_name, filter->fo_tot_granted,
2574 exp->exp_client_uuid.uuid, exp, fed->fed_grant);
2575 filter->fo_tot_granted -= fed->fed_grant;
2576 LASSERTF(filter->fo_tot_pending >= fed->fed_pending,
2577 "%s: tot_pending "LPU64" cli %s/%p fed_pending %ld\n",
2578 obd->obd_name, filter->fo_tot_pending,
2579 exp->exp_client_uuid.uuid, exp, fed->fed_pending);
2580 /* fo_tot_pending is handled in filter_grant_commit as bulk finishes */
2581 LASSERTF(filter->fo_tot_dirty >= fed->fed_dirty,
2582 "%s: tot_dirty "LPU64" cli %s/%p fed_dirty %ld\n",
2583 obd->obd_name, filter->fo_tot_dirty,
2584 exp->exp_client_uuid.uuid, exp, fed->fed_dirty);
2585 filter->fo_tot_dirty -= fed->fed_dirty;
2589 spin_unlock(&obd->obd_osfs_lock);
2592 static int filter_destroy_export(struct obd_export *exp)
2596 if (exp->exp_filter_data.fed_pending)
2597 CERROR("%s: cli %s/%p has %lu pending on destroyed export\n",
2598 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid,
2599 exp, exp->exp_filter_data.fed_pending);
2601 lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
2603 target_destroy_export(exp);
2604 ldlm_destroy_export(exp);
2606 if (obd_uuid_equals(&exp->exp_client_uuid, &exp->exp_obd->obd_uuid))
2610 if (exp->exp_obd->obd_replayable)
2611 filter_client_free(exp);
2613 fsfilt_sync(exp->exp_obd, exp->exp_obd->u.obt.obt_sb);
2615 filter_grant_discard(exp);
2616 filter_fmd_cleanup(exp);
2618 if (!(exp->exp_flags & OBD_OPT_FORCE))
2619 filter_grant_sanity_check(exp->exp_obd, __FUNCTION__);
2624 /* also incredibly similar to mds_disconnect */
2625 static int filter_disconnect(struct obd_export *exp)
2627 struct obd_device *obd = exp->exp_obd;
2628 struct llog_ctxt *ctxt;
2633 class_export_get(exp);
2635 /* Flush any remaining cancel messages out to the target */
2636 ctxt = llog_get_context(obd, LLOG_MDS_OST_REPL_CTXT);
2638 if (ctxt->loc_imp == exp->exp_imp_reverse)
2639 CDEBUG(D_RPCTRACE, "Reverse import disconnect\n");
2640 llog_sync(ctxt, exp);
2641 llog_ctxt_put(ctxt);
2644 if (exp->exp_connect_flags & OBD_CONNECT_GRANT_SHRINK) {
2645 struct filter_obd *filter = &exp->exp_obd->u.filter;
2646 if (filter->fo_tot_granted_clients > 0)
2647 filter->fo_tot_granted_clients --;
2650 if (!(exp->exp_flags & OBD_OPT_FORCE))
2651 filter_grant_sanity_check(obd, __FUNCTION__);
2652 filter_grant_discard(exp);
2654 lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
2656 /* Disconnect early so that clients can't keep using export */
2657 rc = class_disconnect(exp);
2658 if (exp->exp_obd->obd_namespace != NULL)
2659 ldlm_cancel_locks_for_export(exp);
2661 lprocfs_exp_cleanup(exp);
2662 class_export_put(exp);
2666 static int filter_ping(struct obd_export *exp)
2668 filter_fmd_expire(exp);
2670 if (exp->exp_delayed)
2671 filter_update_client_epoch(exp);
2676 struct dentry *__filter_oa2dentry(struct obd_device *obd, struct obdo *oa,
2677 const char *what, int quiet)
2679 struct dentry *dchild = NULL;
2681 if (!(oa->o_valid & OBD_MD_FLGROUP))
2684 dchild = filter_fid2dentry(obd, NULL, oa->o_gr, oa->o_id);
2686 if (IS_ERR(dchild)) {
2687 CERROR("%s error looking up object: "LPU64"\n",
2692 if (dchild->d_inode == NULL) {
2694 CERROR("%s: %s on non-existent object: "LPU64"\n",
2695 obd->obd_name, what, oa->o_id);
2697 RETURN(ERR_PTR(-ENOENT));
2703 static int filter_getattr(struct obd_export *exp, struct obd_info *oinfo)
2705 struct dentry *dentry = NULL;
2706 struct obd_device *obd;
2710 obd = class_exp2obd(exp);
2712 CDEBUG(D_IOCTL, "invalid client export %p\n", exp);
2716 dentry = filter_oa2dentry(obd, oinfo->oi_oa);
2718 RETURN(PTR_ERR(dentry));
2720 /* Limit the valid bits in the return data to what we actually use */
2721 oinfo->oi_oa->o_valid = OBD_MD_FLID;
2722 obdo_from_inode(oinfo->oi_oa, dentry->d_inode, FILTER_VALID_FLAGS);
2728 /* this should be enabled/disabled in condition to enabled/disabled large
2729 * inodes (fast EAs) in backing store FS. */
2730 int filter_update_fidea(struct obd_export *exp, struct inode *inode,
2731 void *handle, struct obdo *oa)
2733 struct obd_device *obd = exp->exp_obd;
2737 if (oa->o_valid & OBD_MD_FLFID) {
2738 struct filter_fid ff;
2740 if (!(oa->o_valid & OBD_MD_FLGROUP))
2743 /* packing fid and converting it to LE for storing into EA.
2744 * Here ->o_stripe_idx should be filled by LOV and rest of
2745 * fields - by client. */
2746 ff.ff_fid.id = cpu_to_le64(oa->o_fid);
2747 ff.ff_fid.f_type = cpu_to_le32(oa->o_stripe_idx);
2748 ff.ff_fid.generation = cpu_to_le32(oa->o_generation);
2749 ff.ff_objid = cpu_to_le64(oa->o_id);
2750 ff.ff_group = cpu_to_le64(oa->o_gr);
2752 CDEBUG(D_INODE, "storing filter fid EA ("LPU64"/%u/%u"
2753 LPU64"/"LPU64")\n", oa->o_fid, oa->o_stripe_idx,
2754 oa->o_generation, oa->o_id, oa->o_gr);
2756 rc = fsfilt_set_md(obd, inode, handle, &ff, sizeof(ff), "fid");
2758 CERROR("store fid in object failed! rc: %d\n", rc);
2760 CDEBUG(D_HA, "OSS object without fid info!\n");
2766 /* this is called from filter_truncate() until we have filter_punch() */
2767 int filter_setattr_internal(struct obd_export *exp, struct dentry *dentry,
2768 struct obdo *oa, struct obd_trans_info *oti)
2770 unsigned int orig_ids[MAXQUOTAS] = {0, 0};
2771 struct llog_cookie *fcc = NULL;
2772 struct filter_obd *filter;
2773 int rc, err, sync = 0;
2774 loff_t old_size = 0;
2775 unsigned int ia_valid;
2776 struct inode *inode;
2777 struct page *page = NULL;
2782 LASSERT(dentry != NULL);
2783 LASSERT(!IS_ERR(dentry));
2785 inode = dentry->d_inode;
2786 LASSERT(inode != NULL);
2788 filter = &exp->exp_obd->u.filter;
2789 iattr_from_obdo(&iattr, oa, oa->o_valid);
2790 ia_valid = iattr.ia_valid;
2792 if (oa->o_valid & OBD_MD_FLCOOKIE) {
2793 OBD_ALLOC(fcc, sizeof(*fcc));
2795 *fcc = oa->o_lcookie;
2797 if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
2799 /* Filter truncates and writes are serialized by
2800 * i_alloc_sem, see the comment in
2801 * filter_preprw_write.*/
2802 if (ia_valid & ATTR_SIZE)
2803 down_write(&inode->i_alloc_sem);
2804 LOCK_INODE_MUTEX(inode);
2805 old_size = i_size_read(inode);
2808 /* VBR: version recovery check */
2809 rc = filter_version_get_check(exp, oti, inode);
2811 GOTO(out_unlock, rc);
2813 /* Let's pin the last page so that ldiskfs_truncate
2814 * should not start GFP_FS allocation (20008). */
2815 if (ia_valid & ATTR_SIZE) {
2816 page = grab_cache_page(inode->i_mapping,
2817 iattr.ia_size >> PAGE_CACHE_SHIFT);
2819 GOTO(out_unlock, rc = -ENOMEM);
2824 /* If the inode still has SUID+SGID bits set (see filter_precreate())
2825 * then we will accept the UID+GID sent by the client during write for
2826 * initializing the ownership of this inode. We only allow this to
2827 * happen once so clear these bits in setattr. In 2.6 kernels it is
2828 * possible to get ATTR_UID and ATTR_GID separately, so we only clear
2829 * the flags that are actually being set. */
2830 if (ia_valid & (ATTR_UID | ATTR_GID)) {
2831 CDEBUG(D_INODE, "update UID/GID to %lu/%lu\n",
2832 (unsigned long)oa->o_uid, (unsigned long)oa->o_gid);
2834 if ((inode->i_mode & S_ISUID) && (ia_valid & ATTR_UID)) {
2835 if (!(ia_valid & ATTR_MODE)) {
2836 iattr.ia_mode = inode->i_mode;
2837 iattr.ia_valid |= ATTR_MODE;
2839 iattr.ia_mode &= ~S_ISUID;
2841 if ((inode->i_mode & S_ISGID) && (ia_valid & ATTR_GID)) {
2842 if (!(iattr.ia_valid & ATTR_MODE)) {
2843 iattr.ia_mode = inode->i_mode;
2844 iattr.ia_valid |= ATTR_MODE;
2846 iattr.ia_mode &= ~S_ISGID;
2849 orig_ids[USRQUOTA] = inode->i_uid;
2850 orig_ids[GRPQUOTA] = inode->i_gid;
2851 handle = fsfilt_start_log(exp->exp_obd, inode,
2852 FSFILT_OP_SETATTR, oti, 1);
2855 GOTO(out_unlock, rc = PTR_ERR(handle));
2857 /* update inode EA only once when inode is suid bit marked. As
2858 * on 2.6.x UID and GID may be set separately, we check here
2859 * only one of them to avoid double setting. */
2860 if (inode->i_mode & S_ISUID)
2861 filter_update_fidea(exp, inode, handle, oa);
2863 handle = fsfilt_start(exp->exp_obd, inode,
2864 FSFILT_OP_SETATTR, oti);
2867 GOTO(out_unlock, rc = PTR_ERR(handle));
2870 if (oa->o_valid & OBD_MD_FLFLAGS) {
2871 rc = fsfilt_iocontrol(exp->exp_obd, dentry,
2872 FSFILT_IOC_SETFLAGS, (long)&oa->o_flags);
2874 rc = fsfilt_setattr(exp->exp_obd, dentry, handle, &iattr, 1);
2876 /* set cancel cookie callback function */
2877 sync = fsfilt_add_journal_cb(exp->exp_obd, 0, handle,
2878 filter_cancel_cookies_cb,
2882 if (OBD_FAIL_CHECK(OBD_FAIL_OST_SETATTR_CREDITS))
2883 fsfilt_extend(exp->exp_obd, inode, 0, handle);
2885 /* The truncate might have used up our transaction credits. Make
2886 * sure we have one left for the last_rcvd update. */
2887 err = fsfilt_extend(exp->exp_obd, inode, 1, handle);
2888 rc = filter_finish_transno(exp, inode, oti, rc, sync);
2890 filter_cancel_cookies_cb(exp->exp_obd, 0, fcc, rc);
2894 err = fsfilt_commit(exp->exp_obd, inode, handle, 0);
2896 CERROR("error on commit, err = %d\n", err);
2903 /* For a partial-page truncate flush the page to disk immediately
2904 * to avoid data corruption during direct disk write. b=17397 */
2905 if (!sync && (iattr.ia_valid & ATTR_SIZE) &&
2906 old_size != iattr.ia_size && (iattr.ia_size & ~CFS_PAGE_MASK)) {
2907 err = filemap_fdatawrite_range(inode->i_mapping, iattr.ia_size,
2917 page_cache_release(page);
2919 if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID))
2920 UNLOCK_INODE_MUTEX(inode);
2921 if (ia_valid & ATTR_SIZE)
2922 up_write(&inode->i_alloc_sem);
2924 OBD_FREE(fcc, sizeof(*fcc));
2926 /* trigger quota release */
2927 if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
2928 unsigned int cur_ids[MAXQUOTAS] = {oa->o_uid, oa->o_gid};
2929 int rc2 = lquota_adjust(filter_quota_interface_ref,exp->exp_obd,
2930 cur_ids, orig_ids,rc,FSFILT_OP_SETATTR);
2931 CDEBUG(rc2 ? D_ERROR : D_QUOTA,
2932 "filter adjust qunit. (rc:%d)\n", rc2);
2937 /* this is called from filter_truncate() until we have filter_punch() */
2938 int filter_setattr(struct obd_export *exp, struct obd_info *oinfo,
2939 struct obd_trans_info *oti)
2941 struct ldlm_res_id res_id = { .name = { oinfo->oi_oa->o_id } };
2942 struct filter_mod_data *fmd;
2943 struct lvfs_run_ctxt saved;
2944 struct filter_obd *filter;
2945 struct ldlm_resource *res;
2946 struct dentry *dentry;
2950 dentry = __filter_oa2dentry(exp->exp_obd, oinfo->oi_oa,
2953 RETURN(PTR_ERR(dentry));
2955 filter = &exp->exp_obd->u.filter;
2956 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
2959 * We need to be atomic against a concurrent write
2960 * (which takes the semaphore for reading). fmd_mactime_xid
2961 * checks will have no effect if a write request with lower
2962 * xid starts just before a setattr and finishes later than
2963 * the setattr (see bug 21489, comment 27).
2965 if (oinfo->oi_oa->o_valid &
2966 (OBD_MD_FLMTIME | OBD_MD_FLATIME | OBD_MD_FLCTIME)) {
2967 down_write(&dentry->d_inode->i_alloc_sem);
2968 fmd = filter_fmd_get(exp,oinfo->oi_oa->o_id,oinfo->oi_oa->o_gr);
2969 if (fmd && fmd->fmd_mactime_xid < oti->oti_xid)
2970 fmd->fmd_mactime_xid = oti->oti_xid;
2971 filter_fmd_put(exp, fmd);
2972 up_write(&dentry->d_inode->i_alloc_sem);
2975 /* setting objects attributes (including owner/group) */
2976 rc = filter_setattr_internal(exp, dentry, oinfo->oi_oa, oti);
2978 GOTO(out_unlock, rc);
2980 res = ldlm_resource_get(exp->exp_obd->obd_namespace, NULL,
2981 res_id, LDLM_EXTENT, 0);
2984 rc = ldlm_res_lvbo_update(res, NULL, 0, 0);
2985 ldlm_resource_putref(res);
2988 oinfo->oi_oa->o_valid = OBD_MD_FLID;
2990 /* Quota release need uid/gid info */
2991 obdo_from_inode(oinfo->oi_oa, dentry->d_inode,
2992 FILTER_VALID_FLAGS | OBD_MD_FLUID | OBD_MD_FLGID);
2997 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
3001 /* XXX identical to osc_unpackmd */
3002 static int filter_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
3003 struct lov_mds_md *lmm, int lmm_bytes)
3009 if (lmm_bytes < sizeof (*lmm)) {
3010 CERROR("lov_mds_md too small: %d, need %d\n",
3011 lmm_bytes, (int)sizeof(*lmm));
3014 /* XXX LOV_MAGIC etc check? */
3016 if (lmm->lmm_object_id == cpu_to_le64(0)) {
3017 CERROR("lov_mds_md: zero lmm_object_id\n");
3022 lsm_size = lov_stripe_md_size(1);
3026 if (*lsmp != NULL && lmm == NULL) {
3027 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
3028 OBD_FREE(*lsmp, lsm_size);
3033 if (*lsmp == NULL) {
3034 OBD_ALLOC(*lsmp, lsm_size);
3037 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
3038 if ((*lsmp)->lsm_oinfo[0] == NULL) {
3039 OBD_FREE(*lsmp, lsm_size);
3042 loi_init((*lsmp)->lsm_oinfo[0]);
3046 /* XXX zero *lsmp? */
3047 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
3048 LASSERT((*lsmp)->lsm_object_id);
3051 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
3056 /* caller must hold fo_create_lock */
3057 static int filter_destroy_precreated(struct obd_export *exp, struct obdo *oa,
3058 struct filter_obd *filter)
3060 struct obdo doa = { 0 }; /* XXX obdo on stack */
3067 LASSERT(down_trylock(&filter->fo_create_lock) != 0);
3069 memset(&doa, 0, sizeof(doa));
3070 if (oa->o_valid & OBD_MD_FLGROUP) {
3071 doa.o_valid |= OBD_MD_FLGROUP;
3072 doa.o_gr = oa->o_gr;
3076 doa.o_mode = S_IFREG;
3078 if (!filter->fo_destroy_in_progress) {
3079 CERROR("%s: destroy_in_progress already cleared\n",
3080 exp->exp_obd->obd_name);
3084 last = filter_last_id(filter, doa.o_gr);
3085 skip_orphan = !!(exp->exp_connect_flags & OBD_CONNECT_SKIP_ORPHAN);
3087 CWARN("%s: deleting orphan objects from "LPU64" to "LPU64"%s\n",
3088 exp->exp_obd->obd_name, oa->o_id + 1, last,
3089 skip_orphan ? ", orphan objids won't be reused any more." : ".");
3091 for (id = last; id > oa->o_id; id--) {
3093 rc = filter_destroy(exp, &doa, NULL, NULL, NULL);
3094 if (rc && rc != -ENOENT) /* this is pretty fatal... */
3095 CEMERG("error destroying precreate objid "LPU64": %d\n",
3097 /* update last_id on disk periodically so that if we restart
3098 * we don't need to re-scan all of the just-deleted objects. */
3099 if ((id & 511) == 0 && !skip_orphan) {
3100 filter_set_last_id(filter, id - 1, doa.o_gr);
3101 filter_update_last_objid(exp->exp_obd, doa.o_gr, 0);
3105 CDEBUG(D_HA, "%s: after destroy: set last_objids["LPU64"] = "LPU64"\n",
3106 exp->exp_obd->obd_name, doa.o_gr, oa->o_id);
3109 filter_set_last_id(filter, id, doa.o_gr);
3110 rc = filter_update_last_objid(exp->exp_obd, doa.o_gr, 1);
3112 /* don't reuse orphan object, return last used objid */
3116 filter->fo_destroy_in_progress = 0;
3121 static int filter_precreate(struct obd_device *obd, struct obdo *oa,
3122 obd_gr group, int *num);
3123 /* returns a negative error or a nonnegative number of files to create */
3124 static int filter_handle_precreate(struct obd_export *exp, struct obdo *oa,
3125 obd_gr group, struct obd_trans_info *oti)
3127 struct obd_device *obd = exp->exp_obd;
3128 struct filter_obd *filter = &obd->u.filter;
3132 /* delete orphans request */
3133 if ((oa->o_valid & OBD_MD_FLFLAGS) && (oa->o_flags & OBD_FL_DELORPHAN)){
3134 if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
3135 CERROR("%s: dropping old orphan cleanup request\n",
3140 /* This causes inflight precreates to abort and drop lock */
3141 filter->fo_destroy_in_progress = 1;
3142 down(&filter->fo_create_lock);
3143 diff = oa->o_id - filter_last_id(filter, group);
3144 CDEBUG(D_HA, "filter_last_id() = "LPU64" -> diff = %d\n",
3145 filter_last_id(filter, group), diff);
3147 if (-diff > OST_MAX_PRECREATE) {
3148 CERROR("%s: ignoring bogus orphan destroy request: "
3149 "obdid "LPU64" last_id "LPU64"\n", obd->obd_name,
3150 oa->o_id, filter_last_id(filter, group));
3151 /* FIXME: should reset precreate_next_id on MDS */
3152 GOTO(out, rc = -EINVAL);
3155 rc = filter_destroy_precreated(exp, oa, filter);
3157 CERROR("%s: unable to write lastobjid, but "
3158 "orphans were deleted\n", obd->obd_name);
3161 /*XXX used by MDS for the first time! */
3162 filter->fo_destroy_in_progress = 0;
3165 down(&filter->fo_create_lock);
3166 if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
3167 CERROR("%s: dropping old precreate request\n",
3171 /* only precreate if group == 0 and o_id is specfied */
3172 if (group != 0 || oa->o_id == 0)
3175 diff = oa->o_id - filter_last_id(filter, group);
3176 CDEBUG(D_RPCTRACE, "filter_last_id() = "LPU64" -> diff = %d\n",
3177 filter_last_id(filter, group), diff);
3179 LASSERTF(diff >= 0,"%s: "LPU64" - "LPU64" = %d\n",obd->obd_name,
3180 oa->o_id, filter_last_id(filter, group), diff);
3184 oa->o_id = filter_last_id(&obd->u.filter, group);
3185 rc = filter_precreate(obd, oa, group, &diff);
3186 oa->o_id = filter_last_id(&obd->u.filter, group);
3187 oa->o_valid = OBD_MD_FLID;
3190 /* else diff == 0 */
3193 up(&filter->fo_create_lock);
3197 static int filter_statfs(struct obd_device *obd, struct obd_statfs *osfs,
3198 __u64 max_age, __u32 flags)
3200 struct filter_obd *filter = &obd->u.filter;
3201 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
3205 /* at least try to account for cached pages. its still racey and
3206 * might be under-reporting if clients haven't announced their
3207 * caches with brw recently */
3208 spin_lock(&obd->obd_osfs_lock);
3209 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
3210 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
3211 spin_unlock(&obd->obd_osfs_lock);
3213 CDEBUG(D_SUPER | D_CACHE, "blocks cached "LPU64" granted "LPU64
3214 " pending "LPU64" free "LPU64" avail "LPU64"\n",
3215 filter->fo_tot_dirty, filter->fo_tot_granted,
3216 filter->fo_tot_pending,
3217 osfs->os_bfree << blockbits, osfs->os_bavail << blockbits);
3219 filter_grant_sanity_check(obd, __FUNCTION__);
3221 osfs->os_bavail -= min(osfs->os_bavail, GRANT_FOR_LLOG(obd) +
3222 ((filter->fo_tot_dirty + filter->fo_tot_pending +
3223 osfs->os_bsize - 1) >> blockbits));
3225 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOSPC)) {
3226 struct lr_server_data *lsd = filter->fo_fsd;
3227 int index = le32_to_cpu(lsd->lsd_ost_index);
3229 if (obd_fail_val == -1 ||
3230 index == obd_fail_val)
3231 osfs->os_bfree = osfs->os_bavail = 2;
3232 else if (obd_fail_loc & OBD_FAIL_ONCE)
3233 obd_fail_loc &= ~OBD_FAILED; /* reset flag */
3236 /* set EROFS to state field if FS is mounted as RDONLY. The goal is to
3237 * stop creating files on MDS if OST is not good shape to create
3241 if (filter->fo_obt.obt_sb->s_flags & MS_RDONLY)
3242 osfs->os_state = OS_STATE_READONLY;
3244 if (filter->fo_raid_degraded)
3245 osfs->os_state |= OS_STATE_DEGRADED;
3249 static int filter_use_existing_obj(struct obd_device *obd,
3250 struct dentry *dchild, void **handle,
3253 struct inode *inode = dchild->d_inode;
3257 if ((inode->i_mode & (S_ISUID | S_ISGID)) == (S_ISUID|S_ISGID))
3260 *handle = fsfilt_start_log(obd, inode, FSFILT_OP_SETATTR, NULL, 1);
3261 if (IS_ERR(*handle))
3262 return PTR_ERR(*handle);
3264 iattr.ia_valid = ATTR_MODE;
3265 iattr.ia_mode = S_ISUID | S_ISGID |0666;
3266 rc = fsfilt_setattr(obd, dchild, *handle, &iattr, 1);
3274 /* We rely on the fact that only one thread will be creating files in a given
3275 * group at a time, which is why we don't need an atomic filter_get_new_id.
3276 * Even if we had that atomic function, the following race would exist:
3278 * thread 1: gets id x from filter_next_id
3279 * thread 2: gets id (x + 1) from filter_next_id
3280 * thread 2: creates object (x + 1)
3281 * thread 1: tries to create object x, gets -ENOSPC
3283 * Caller must hold fo_create_lock
3285 static int filter_precreate(struct obd_device *obd, struct obdo *oa,
3286 obd_gr group, int *num)
3288 struct dentry *dchild = NULL, *dparent = NULL;
3289 struct filter_obd *filter;
3290 int err = 0, rc = 0, recreate_obj = 0, i;
3291 cfs_time_t enough_time = cfs_time_shift(DISK_TIMEOUT/2);
3293 void *handle = NULL;
3296 filter = &obd->u.filter;
3298 LASSERT(down_trylock(&filter->fo_create_lock) != 0);
3300 OBD_FAIL_TIMEOUT(OBD_FAIL_TGT_DELAY_PRECREATE, obd_timeout / 2);
3302 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
3303 (oa->o_flags & OBD_FL_RECREATE_OBJS)) {
3306 struct obd_statfs *osfs;
3308 OBD_ALLOC(osfs, sizeof(*osfs));
3311 rc = filter_statfs(obd, osfs,
3312 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
3314 if (rc == 0 && osfs->os_bavail < (osfs->os_blocks >> 10)) {
3315 CDEBUG(D_RPCTRACE,"%s: not enough space for create "
3316 LPU64"\n", obd->obd_name, osfs->os_bavail <<
3317 filter->fo_vfsmnt->mnt_sb->s_blocksize_bits);
3321 OBD_FREE(osfs, sizeof(*osfs));
3326 CDEBUG(D_RPCTRACE, "%s: precreating %d objects in group "LPU64
3327 " at "LPU64"\n", obd->obd_name, *num, group, oa->o_id);
3329 for (i = 0; i < *num && err == 0; i++) {
3330 int cleanup_phase = 0;
3335 last_id = filter_last_id(filter, group);
3336 if (next_id > last_id) {
3337 CERROR("%s: trying to recreate obj greater"
3338 "than last id "LPD64" > "LPD64"\n",
3339 obd->obd_name, next_id, last_id);
3340 GOTO(cleanup, rc = -EINVAL);
3342 } else if (filter->fo_destroy_in_progress) {
3343 CWARN("%s: precreate aborted by destroy\n",
3348 next_id = filter_last_id(filter, group) + 1;
3350 dparent = filter_parent_lock(obd, group, next_id);
3351 if (IS_ERR(dparent))
3352 GOTO(cleanup, rc = PTR_ERR(dparent));
3353 cleanup_phase = 1; /* filter_parent_unlock(dparent) */
3355 dchild = filter_fid2dentry(obd, dparent, group, next_id);
3357 GOTO(cleanup, rc = PTR_ERR(dchild));
3358 cleanup_phase = 2; /* f_dput(dchild) */
3360 if (dchild->d_inode != NULL) {
3361 /* This would only happen if lastobjid was bad on disk*/
3362 /* Could also happen if recreating missing obj but it
3363 * already exists. */
3365 CERROR("%s: recreating existing object %.*s?\n",
3366 obd->obd_name, dchild->d_name.len,
3367 dchild->d_name.name);
3369 /* Use these existing objects if they are
3371 if (dchild->d_inode->i_size == 0) {
3372 rc = filter_use_existing_obj(obd,dchild,
3373 &handle, &cleanup_phase);
3380 CERROR("%s: Serious error: objid %.*s already "
3381 "exists; is this filesystem corrupt?\n",
3382 obd->obd_name, dchild->d_name.len,
3383 dchild->d_name.name);
3386 GOTO(cleanup, rc = -EEXIST);
3389 handle = fsfilt_start_log(obd, dparent->d_inode,
3390 FSFILT_OP_CREATE, NULL, 1);
3392 GOTO(cleanup, rc = PTR_ERR(handle));
3395 CDEBUG(D_INODE, "%s: filter_precreate(od->o_gr="LPU64
3396 ",od->o_id="LPU64")\n", obd->obd_name, group,
3399 /* We mark object SUID+SGID to flag it for accepting UID+GID
3400 * from client on first write. Currently the permission bits
3401 * on the OST are never used, so this is OK. */
3402 rc = ll_vfs_create(dparent->d_inode, dchild,
3403 S_IFREG | S_ISUID | S_ISGID | 0666, NULL);
3405 CERROR("create failed rc = %d\n", rc);
3408 if (dchild->d_inode)
3409 CDEBUG(D_INFO, "objid "LPU64" got inum %lu\n", next_id,
3410 dchild->d_inode->i_ino);
3413 if (!recreate_obj) {
3414 filter_set_last_id(filter, next_id, group);
3415 err = filter_update_last_objid(obd, group, 0);
3417 CERROR("unable to write lastobjid "
3418 "but file created\n");
3422 switch(cleanup_phase) {
3424 err = fsfilt_commit(obd, dparent->d_inode, handle, 0);
3426 CERROR("error on commit, err = %d\n", err);
3433 filter_parent_unlock(dparent);
3440 if (cfs_time_after(cfs_time_current(), enough_time)) {
3442 "%s: precreate slow - want %d got %d \n",
3443 obd->obd_name, *num, i);
3450 "%s: created %d objects for group "LPU64": "LPU64" rc %d\n",
3451 obd->obd_name, i, group, filter->fo_last_objids[group], rc);
3456 int filter_recreate(struct obd_device *obd, struct obdo *oa)
3458 struct ldlm_res_id res_id = { .name = { oa->o_id } };
3459 struct ldlm_valblock_ops *ns_lvbo;
3460 struct ldlm_resource *res;
3461 obd_valid old_valid = oa->o_valid;
3462 obd_flag old_flags = oa->o_flags;
3466 if (oa->o_id > filter_last_id(&obd->u.filter, oa->o_gr)) {
3467 if (!obd->obd_recovering ||
3468 oa->o_id > filter_last_id(&obd->u.filter, oa->o_gr) +
3469 OST_MAX_PRECREATE) {
3470 CERROR("recreate objid "LPU64" > last id "LPU64"\n",
3471 oa->o_id, filter_last_id(&obd->u.filter,
3475 diff = oa->o_id - filter_last_id(&obd->u.filter, oa->o_gr);
3477 if ((oa->o_valid & OBD_MD_FLFLAGS) == 0) {
3478 oa->o_valid |= OBD_MD_FLFLAGS;
3479 oa->o_flags = OBD_FL_RECREATE_OBJS;
3481 oa->o_flags |= OBD_FL_RECREATE_OBJS;
3485 down(&obd->u.filter.fo_create_lock);
3486 rc = filter_precreate(obd, oa, oa->o_gr, &diff);
3487 up(&obd->u.filter.fo_create_lock);
3489 res = ldlm_resource_get(obd->obd_namespace, NULL,
3490 res_id, LDLM_EXTENT, 0);
3492 /* Update lvb->lvb_blocks for the recreated object */
3493 ns_lvbo = res->lr_namespace->ns_lvbo;
3494 if (ns_lvbo && ns_lvbo->lvbo_update) {
3495 rc = ns_lvbo->lvbo_update(res, NULL, 0, 1);
3499 ldlm_resource_putref(res);
3503 CWARN("%s: recreated missing object "LPU64"/"LPU64"\n",
3504 obd->obd_name, oa->o_id, oa->o_gr);
3506 oa->o_valid = old_valid;
3507 oa->o_flags = old_flags;
3511 static int filter_create(struct obd_export *exp, struct obdo *oa,
3512 struct lov_stripe_md **ea, struct obd_trans_info *oti)
3514 struct obd_device *obd = exp->exp_obd;
3515 struct lvfs_run_ctxt saved;
3516 struct lov_stripe_md *lsm = NULL;
3517 struct ldlm_res_id res_id = { .name = { oa->o_id } };
3518 ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
3519 struct lustre_handle lockh;
3524 CDEBUG(D_INODE, "%s: filter_create(od->o_gr="LPU64",od->o_id="
3525 LPU64")\n", obd->obd_name, oa->o_gr, oa->o_id);
3527 if (!(oa->o_valid & OBD_MD_FLGROUP))
3530 CDEBUG(D_INFO, "object "LPU64"/"LPU64"\n", oa->o_id, oa->o_gr);
3534 rc = obd_alloc_memmd(exp, &lsm);
3540 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3542 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
3543 (oa->o_flags & OBD_FL_RECREATE_OBJS)) {
3544 /* Cancel all conflicting extent locks on recreating object,
3545 * thus object's metadata will be updated on the clients */
3546 rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id,
3547 LDLM_EXTENT, &policy, LCK_PW,
3548 &flags, ldlm_blocking_ast,
3549 ldlm_completion_ast,
3550 ldlm_glimpse_ast, NULL, 0,
3552 rc = filter_recreate(obd, oa);
3553 ldlm_lock_decref(&lockh, LCK_PW);
3555 rc = filter_handle_precreate(exp, oa, oa->o_gr, oti);
3558 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3559 if (rc && ea != NULL && *ea != lsm) {
3560 obd_free_memmd(exp, &lsm);
3561 } else if (rc == 0 && ea != NULL) {
3562 /* XXX LOV STACKING: the lsm that is passed to us from
3563 * LOV does not have valid lsm_oinfo data structs, so
3564 * don't go touching that. This needs to be fixed in a
3566 lsm->lsm_object_id = oa->o_id;
3573 int filter_destroy(struct obd_export *exp, struct obdo *oa,
3574 struct lov_stripe_md *md, struct obd_trans_info *oti,
3575 struct obd_export *md_exp)
3577 unsigned int qcids[MAXQUOTAS] = {0, 0};
3578 struct obd_device *obd;
3579 struct filter_obd *filter;
3580 struct dentry *dchild = NULL, *dparent = NULL;
3581 struct lustre_handle lockh = { 0 };
3582 struct lvfs_run_ctxt saved;
3583 void *handle = NULL;
3584 struct llog_cookie *fcc = NULL;
3585 int rc, rc2, cleanup_phase = 0, sync = 0;
3589 if (!(oa->o_valid & OBD_MD_FLGROUP))
3593 filter = &obd->u.filter;
3595 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3598 CDEBUG(D_INODE, "%s: filter_destroy(od->o_gr="LPU64",od->o_id="
3599 LPU64")\n", obd->obd_name, oa->o_gr, oa->o_id);
3601 dchild = filter_fid2dentry(obd, NULL, oa->o_gr, oa->o_id);
3603 GOTO(cleanup, rc = PTR_ERR(dchild));
3606 if (dchild->d_inode == NULL) {
3607 CDEBUG(D_INODE, "destroying non-existent object "LPU64"\n",
3609 /* If object already gone, cancel cookie right now */
3610 if (oa->o_valid & OBD_MD_FLCOOKIE) {
3611 struct llog_ctxt *ctxt;
3612 fcc = &oa->o_lcookie;
3613 ctxt = llog_get_context(obd, fcc->lgc_subsys + 1);
3614 llog_cancel(ctxt, NULL, 1, fcc, 0);
3615 llog_ctxt_put(ctxt);
3616 fcc = NULL; /* we didn't allocate fcc, don't free it */
3618 GOTO(cleanup, rc = -ENOENT);
3621 rc = filter_prepare_destroy(obd, oa->o_id, &lockh);
3625 /* Our MDC connection is established by the MDS to us */
3626 if (oa->o_valid & OBD_MD_FLCOOKIE) {
3627 OBD_ALLOC(fcc, sizeof(*fcc));
3629 *fcc = oa->o_lcookie;
3631 DQUOT_INIT(dchild->d_inode);
3633 /* we're gonna truncate it first in order to avoid possible deadlock:
3635 * open trasaction open transaction
3636 * down(i_zombie) down(i_zombie)
3637 * restart transaction
3638 * (see BUG 4180) -bzzz
3640 * take i_alloc_sem too to prevent other threads from writing to the
3641 * file while we are truncating it. This can cause lock ordering issue
3642 * between page lock, i_mutex & starting new journal handle.
3643 * (see bug 20321) -johann
3645 down_write(&dchild->d_inode->i_alloc_sem);
3646 LOCK_INODE_MUTEX(dchild->d_inode);
3648 /* VBR: version recovery check */
3649 rc = filter_version_get_check(exp, oti, dchild->d_inode);
3651 UNLOCK_INODE_MUTEX(dchild->d_inode);
3652 up_write(&dchild->d_inode->i_alloc_sem);
3656 handle = fsfilt_start_log(obd, dchild->d_inode, FSFILT_OP_SETATTR,
3658 if (IS_ERR(handle)) {
3659 UNLOCK_INODE_MUTEX(dchild->d_inode);
3660 up_write(&dchild->d_inode->i_alloc_sem);
3661 GOTO(cleanup, rc = PTR_ERR(handle));
3664 iattr.ia_valid = ATTR_SIZE;
3666 rc = fsfilt_setattr(obd, dchild, handle, &iattr, 1);
3667 rc2 = fsfilt_commit(obd, dchild->d_inode, handle, 0);
3668 UNLOCK_INODE_MUTEX(dchild->d_inode);
3669 up_write(&dchild->d_inode->i_alloc_sem);
3673 GOTO(cleanup, rc = rc2);
3675 /* We don't actually need to lock the parent until we are unlinking
3676 * here, and not while truncating above. That avoids holding the
3677 * parent lock for a long time during truncate, which can block other
3678 * threads from doing anything to objects in that directory. bug 7171 */
3679 dparent = filter_parent_lock(obd, oa->o_gr, oa->o_id);
3680 if (IS_ERR(dparent))
3681 GOTO(cleanup, rc = PTR_ERR(dparent));
3682 cleanup_phase = 3; /* filter_parent_unlock */
3684 LOCK_INODE_MUTEX(dchild->d_inode);
3685 handle = fsfilt_start_log(obd, dparent->d_inode,FSFILT_OP_UNLINK,oti,1);
3686 if (IS_ERR(handle)) {
3687 UNLOCK_INODE_MUTEX(dchild->d_inode);
3688 GOTO(cleanup, rc = PTR_ERR(handle));
3690 cleanup_phase = 4; /* fsfilt_commit */
3692 /* Quota release need uid/gid of inode */
3693 obdo_from_inode(oa, dchild->d_inode, OBD_MD_FLUID|OBD_MD_FLGID);
3695 filter_fmd_drop(exp, oa->o_id, oa->o_gr);
3697 /* this drops dchild->d_inode->i_mutex unconditionally */
3698 rc = filter_destroy_internal(obd, oa->o_id, oa->o_gr, dparent, dchild);
3702 switch(cleanup_phase) {
3705 sync = fsfilt_add_journal_cb(obd, 0, oti ?
3706 oti->oti_handle : handle,
3707 filter_cancel_cookies_cb,
3709 /* If add_journal_cb failed, then filter_finish_transno
3710 * will commit the handle and we will do a sync
3711 * on commit. then we call callback directly to free
3714 rc = filter_finish_transno(exp, NULL, oti, rc, sync);
3716 filter_cancel_cookies_cb(obd, 0, fcc, rc);
3719 rc2 = fsfilt_commit(obd, dparent->d_inode, handle, 0);
3721 CERROR("error on commit, err = %d\n", rc2);
3728 filter_parent_unlock(dparent);
3730 filter_fini_destroy(obd, &lockh);
3734 OBD_FREE(fcc, sizeof(*fcc));
3736 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3739 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
3743 /* trigger quota release */
3744 qcids[USRQUOTA] = oa->o_uid;
3745 qcids[GRPQUOTA] = oa->o_gid;
3746 rc2 = lquota_adjust(filter_quota_interface_ref, obd, qcids, NULL, rc,
3749 CERROR("filter adjust qunit! (rc:%d)\n", rc2);
3753 /* NB start and end are used for punch, but not truncate */
3754 static int filter_truncate(struct obd_export *exp, struct obd_info *oinfo,
3755 struct obd_trans_info *oti,
3756 struct ptlrpc_request_set *rqset)
3761 if (oinfo->oi_policy.l_extent.end != OBD_OBJECT_EOF) {
3762 CERROR("PUNCH not supported, only truncate: end = "LPX64"\n",
3763 oinfo->oi_policy.l_extent.end);
3767 CDEBUG(D_INODE, "calling truncate for object "LPU64", valid = "LPX64
3768 ", o_size = "LPD64"\n", oinfo->oi_oa->o_id,
3769 oinfo->oi_oa->o_valid, oinfo->oi_policy.l_extent.start);
3771 oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start;
3772 rc = filter_setattr(exp, oinfo, oti);
3777 static int filter_sync(struct obd_export *exp, struct obd_info *oinfo,
3778 obd_off start, obd_off end,
3779 struct ptlrpc_request_set *set)
3781 struct lvfs_run_ctxt saved;
3782 struct filter_obd *filter;
3783 struct dentry *dentry;
3784 struct llog_ctxt *ctxt;
3788 filter = &exp->exp_obd->u.filter;
3790 /* An objid of zero is taken to mean "sync whole filesystem" */
3791 if (!oinfo->oi_oa || !(oinfo->oi_oa->o_valid & OBD_MD_FLID)) {
3792 rc = fsfilt_sync(exp->exp_obd, filter->fo_obt.obt_sb);
3794 /* Flush any remaining cancel messages out to the target */
3795 ctxt = llog_get_context(exp->exp_obd, LLOG_MDS_OST_REPL_CTXT);
3797 llog_sync(ctxt, exp);
3798 llog_ctxt_put(ctxt);
3800 CERROR("No LLOG_MDS_OST_REPL_CTXT found in obd %p\n",
3806 dentry = filter_oa2dentry(exp->exp_obd, oinfo->oi_oa);
3808 RETURN(PTR_ERR(dentry));
3810 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
3812 LOCK_INODE_MUTEX(dentry->d_inode);
3814 rc = filemap_fdatawrite(dentry->d_inode->i_mapping);
3816 /* just any file to grab fsync method - "file" arg unused */
3817 struct file *file = filter->fo_rcvd_filp;
3819 if (file->f_op && file->f_op->fsync)
3820 rc = file->f_op->fsync(NULL, dentry, 1);
3822 rc2 = filemap_fdatawait(dentry->d_inode->i_mapping);
3826 UNLOCK_INODE_MUTEX(dentry->d_inode);
3828 oinfo->oi_oa->o_valid = OBD_MD_FLID;
3829 obdo_from_inode(oinfo->oi_oa, dentry->d_inode, FILTER_VALID_FLAGS);
3831 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
3837 static int filter_get_info(struct obd_export *exp, __u32 keylen,
3838 void *key, __u32 *vallen, void *val,
3839 struct lov_stripe_md *lsm)
3841 struct obd_device *obd;
3844 obd = class_exp2obd(exp);
3846 CDEBUG(D_IOCTL, "invalid client export %p\n", exp);
3850 if (KEY_IS(KEY_BLOCKSIZE)) {
3851 __u32 *blocksize = val;
3853 if (*vallen < sizeof(*blocksize))
3855 *blocksize = obd->u.obt.obt_sb->s_blocksize;
3857 *vallen = sizeof(*blocksize);
3861 if (KEY_IS(KEY_BLOCKSIZE_BITS)) {
3862 __u32 *blocksize_bits = val;
3863 if (blocksize_bits) {
3864 if (*vallen < sizeof(*blocksize_bits))
3866 *blocksize_bits = obd->u.obt.obt_sb->s_blocksize_bits;
3868 *vallen = sizeof(*blocksize_bits);
3872 if (KEY_IS(KEY_LAST_ID)) {
3873 obd_id *last_id = val;
3874 /* FIXME: object groups */
3876 if (*vallen < sizeof(*last_id))
3878 *last_id = filter_last_id(&obd->u.filter, 0);
3880 *vallen = sizeof(*last_id);
3884 if (KEY_IS(KEY_FIEMAP)) {
3885 struct ll_fiemap_info_key *fm_key = key;
3886 struct dentry *dentry;
3887 struct ll_user_fiemap *fiemap = val;
3888 struct lvfs_run_ctxt saved;
3891 if (fiemap == NULL) {
3892 *vallen = fiemap_count_to_size(
3893 fm_key->fiemap.fm_extent_count);
3897 dentry = __filter_oa2dentry(exp->exp_obd, &fm_key->oa,
3900 RETURN(PTR_ERR(dentry));
3902 memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
3903 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3905 rc = fsfilt_iocontrol(obd, dentry, FSFILT_IOC_FIEMAP,
3907 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3913 CDEBUG(D_IOCTL, "invalid key\n");
3917 static int filter_set_info_async(struct obd_export *exp, __u32 keylen,
3918 void *key, __u32 vallen, void *val,
3919 struct ptlrpc_request_set *set)
3921 struct obd_device *obd;
3922 struct llog_ctxt *ctxt;
3928 CDEBUG(D_IOCTL, "invalid export %p\n", exp);
3932 if (KEY_IS(KEY_GRANT_SHRINK)) {
3933 struct ost_body *body = (struct ost_body *)val;
3934 /* handle shrink grant */
3935 spin_lock(&exp->exp_obd->obd_osfs_lock);
3936 filter_grant_incoming(exp, &body->oa);
3937 spin_unlock(&exp->exp_obd->obd_osfs_lock);
3941 if (KEY_IS(KEY_CAPA_KEY)) {
3945 if (!KEY_IS(KEY_MDS_CONN))
3948 LCONSOLE_WARN("%s: received MDS connection from %s\n", obd->obd_name,
3949 obd_export_nid2str(exp));
3950 obd->u.filter.fo_mdc_conn.cookie = exp->exp_handle.h_cookie;
3952 /* setup llog imports */
3953 ctxt = llog_get_context(obd, LLOG_MDS_OST_REPL_CTXT);
3954 rc = llog_receptor_accept(ctxt, exp->exp_imp_reverse);
3955 llog_ctxt_put(ctxt);
3957 lquota_setinfo(filter_quota_interface_ref, exp, obd);
3962 int filter_iocontrol(unsigned int cmd, struct obd_export *exp,
3963 int len, void *karg, void *uarg)
3965 struct obd_device *obd = exp->exp_obd;
3966 struct obd_ioctl_data *data = karg;
3970 case OBD_IOC_ABORT_RECOVERY: {
3971 CERROR("aborting recovery for device %s\n", obd->obd_name);
3972 target_abort_recovery(obd);
3976 case OBD_IOC_SYNC: {
3977 CDEBUG(D_RPCTRACE, "syncing ost %s\n", obd->obd_name);
3978 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
3982 case OBD_IOC_SET_READONLY: {
3984 struct super_block *sb = obd->u.obt.obt_sb;
3985 struct inode *inode = sb->s_root->d_inode;
3986 LCONSOLE_WARN("*** setting obd %s device '%s' read-only ***\n",
3987 obd->obd_name, sb->s_id);
3989 handle = fsfilt_start(obd, inode, FSFILT_OP_MKNOD, NULL);
3990 if (!IS_ERR(handle))
3991 rc = fsfilt_commit(obd, inode, handle, 1);
3993 CDEBUG(D_HA, "syncing ost %s\n", obd->obd_name);
3994 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
3996 lvfs_set_rdonly(obd, obd->u.obt.obt_sb);
4000 case OBD_IOC_CATLOGLIST: {
4001 rc = llog_catalog_list(obd, 1, data);
4005 case OBD_IOC_LLOG_CANCEL:
4006 case OBD_IOC_LLOG_REMOVE:
4007 case OBD_IOC_LLOG_INFO:
4008 case OBD_IOC_LLOG_PRINT: {
4009 /* FIXME to be finished */
4010 RETURN(-EOPNOTSUPP);
4012 struct llog_ctxt *ctxt = NULL;
4014 push_ctxt(&saved, &ctxt->loc_exp->exp_obd->obd_lvfs_ctxt, NULL);
4015 rc = llog_ioctl(ctxt, cmd, data);
4016 pop_ctxt(&saved, &ctxt->loc_exp->exp_obd->obd_lvfs_ctxt, NULL);
4027 static int filter_health_check(struct obd_device *obd)
4029 #ifdef USE_HEALTH_CHECK_WRITE
4030 struct filter_obd *filter = &obd->u.filter;
4035 * health_check to return 0 on healthy
4036 * and 1 on unhealthy.
4038 if (obd->u.obt.obt_sb->s_flags & MS_RDONLY)
4041 #ifdef USE_HEALTH_CHECK_WRITE
4042 LASSERT(filter->fo_obt.obt_health_check_filp != NULL);
4043 rc |= !!lvfs_check_io_health(obd, filter->fo_obt.obt_health_check_filp);
4049 static struct dentry *filter_lvfs_fid2dentry(__u64 id, __u32 gen, __u64 gr,
4052 return filter_fid2dentry(data, NULL, gr, id);
4055 static int filter_process_config(struct obd_device *obd,obd_count len,void *buf)
4057 struct lustre_cfg *lcfg = buf;
4058 struct lprocfs_static_vars lvars;
4061 lprocfs_filter_init_vars(&lvars);
4063 rc = class_process_proc_param(PARAM_OST, lvars.obd_vars, lcfg, obd);
4068 static struct lvfs_callback_ops filter_lvfs_ops = {
4069 l_fid2dentry: filter_lvfs_fid2dentry,
4072 static int filter_notify(struct obd_device *obd, struct obd_device *watched,
4073 enum obd_notify_event ev, void *data)
4077 CDEBUG(D_CONFIG, "notify %s ev=%d\n", watched->obd_name, ev);
4080 case OBD_NOTIFY_CONFIG:
4081 /* call this only when config is processed and stale_export_age
4082 * value is configured */
4083 class_disconnect_expired_exports(obd);
4089 static struct obd_ops filter_obd_ops = {
4090 .o_owner = THIS_MODULE,
4091 .o_get_info = filter_get_info,
4092 .o_set_info_async = filter_set_info_async,
4093 .o_setup = filter_setup,
4094 .o_precleanup = filter_precleanup,
4095 .o_cleanup = filter_cleanup,
4096 .o_connect = filter_connect,
4097 .o_reconnect = filter_reconnect,
4098 .o_disconnect = filter_disconnect,
4099 .o_ping = filter_ping,
4100 .o_init_export = filter_init_export,
4101 .o_destroy_export = filter_destroy_export,
4102 .o_statfs = filter_statfs,
4103 .o_getattr = filter_getattr,
4104 .o_unpackmd = filter_unpackmd,
4105 .o_create = filter_create,
4106 .o_setattr = filter_setattr,
4107 .o_destroy = filter_destroy,
4108 .o_brw = filter_brw,
4109 .o_punch = filter_truncate,
4110 .o_sync = filter_sync,
4111 .o_preprw = filter_preprw,
4112 .o_commitrw = filter_commitrw,
4113 .o_llog_init = filter_llog_init,
4114 .o_llog_finish = filter_llog_finish,
4115 .o_iocontrol = filter_iocontrol,
4116 .o_health_check = filter_health_check,
4117 .o_process_config = filter_process_config,
4118 .o_postrecov = filter_postrecov,
4119 .o_notify = filter_notify,
4122 quota_interface_t *filter_quota_interface_ref;
4123 extern quota_interface_t filter_quota_interface;
4125 static int __init obdfilter_init(void)
4127 struct lprocfs_static_vars lvars;
4130 printk(KERN_INFO "Lustre: Filtering OBD driver; http://www.lustre.org/\n");
4132 lprocfs_filter_init_vars(&lvars);
4134 request_module("lquota");
4135 OBD_ALLOC(obdfilter_created_scratchpad,
4136 OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
4137 sizeof(*obdfilter_created_scratchpad));
4138 if (obdfilter_created_scratchpad == NULL)
4141 ll_fmd_cachep = cfs_mem_cache_create("ll_fmd_cache",
4142 sizeof(struct filter_mod_data),
4145 GOTO(out, rc = -ENOMEM);
4147 filter_quota_interface_ref = PORTAL_SYMBOL_GET(filter_quota_interface);
4148 init_obd_quota_ops(filter_quota_interface_ref, &filter_obd_ops);
4150 rc = class_register_type(&filter_obd_ops, lvars.module_vars,
4155 err = cfs_mem_cache_destroy(ll_fmd_cachep);
4156 LASSERTF(err == 0, "Cannot destroy ll_fmd_cachep: rc %d\n",err);
4157 ll_fmd_cachep = NULL;
4159 if (filter_quota_interface_ref)
4160 PORTAL_SYMBOL_PUT(filter_quota_interface);
4162 OBD_FREE(obdfilter_created_scratchpad,
4163 OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
4164 sizeof(*obdfilter_created_scratchpad));
4170 static void __exit obdfilter_exit(void)
4172 if (filter_quota_interface_ref)
4173 PORTAL_SYMBOL_PUT(filter_quota_interface);
4175 if (ll_fmd_cachep) {
4176 int rc = cfs_mem_cache_destroy(ll_fmd_cachep);
4177 LASSERTF(rc == 0, "Cannot destroy ll_fmd_cachep: rc %d\n", rc);
4178 ll_fmd_cachep = NULL;
4181 class_unregister_type(LUSTRE_OST_NAME);
4182 OBD_FREE(obdfilter_created_scratchpad,
4183 OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
4184 sizeof(*obdfilter_created_scratchpad));
4187 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4188 MODULE_DESCRIPTION("Lustre Filtering OBD driver");
4189 MODULE_LICENSE("GPL");
4191 module_init(obdfilter_init);
4192 module_exit(obdfilter_exit);