4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdfilter/filter.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Andreas Dilger <adilger@clusterfs.com>
43 * Invariant: Get O/R i_mutex for lookup, if needed, before any journal ops
44 * (which need to get journal_lock, may block if journal full).
46 * Invariant: Call filter_start_transno() before any journal ops to avoid the
47 * same deadlock problem. We can (and want) to get rid of the
48 * transno sem in favour of the dir/inode i_mutex to avoid single
49 * threaded operation on the OST.
52 #define DEBUG_SUBSYSTEM S_FILTER
54 #include <linux/module.h>
56 #include <linux/dcache.h>
57 #include <linux/init.h>
58 #include <linux/version.h>
59 #include <linux/sched.h>
60 #include <linux/mount.h>
61 #include <linux/buffer_head.h>
63 #include <obd_cksum.h>
64 #include <obd_class.h>
66 #include <lustre_dlm.h>
67 #include <lustre_fsfilt.h>
68 #include <lprocfs_status.h>
69 #include <lustre_log.h>
70 #include <libcfs/list.h>
71 #include <lustre_disk.h>
72 #include <linux/slab.h>
73 #include <lustre_param.h>
74 #include <lustre/ll_fiemap.h>
76 #include "filter_internal.h"
78 static struct lvfs_callback_ops filter_lvfs_ops;
79 cfs_mem_cache_t *ll_fmd_cachep;
81 static void filter_commit_cb(struct obd_device *obd, __u64 transno,
82 void *cb_data, int error)
84 struct obd_export *exp = cb_data;
85 LASSERT(exp->exp_obd == obd);
86 obd_transno_commit_cb(obd, transno, exp, error);
87 class_export_cb_put(exp);
90 int filter_version_get_check(struct obd_export *exp,
91 struct obd_trans_info *oti, struct inode *inode)
95 if (inode == NULL || oti == NULL)
98 curr_version = fsfilt_get_version(exp->exp_obd, inode);
99 if ((__s64)curr_version == -EOPNOTSUPP)
101 /* VBR: version is checked always because costs nothing */
102 if (oti->oti_pre_version != 0 &&
103 oti->oti_pre_version != curr_version) {
104 CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
105 oti->oti_pre_version, curr_version);
106 cfs_spin_lock(&exp->exp_lock);
107 exp->exp_vbr_failed = 1;
108 cfs_spin_unlock(&exp->exp_lock);
111 oti->oti_pre_version = curr_version;
115 /* Assumes caller has already pushed us into the kernel context. */
116 int filter_finish_transno(struct obd_export *exp, struct inode *inode,
117 struct obd_trans_info *oti, int rc, int force_sync)
119 struct obd_device_target *obt = &exp->exp_obd->u.obt;
120 struct tg_export_data *ted = &exp->exp_target_data;
121 struct lr_server_data *lsd = class_server_data(exp->exp_obd);
122 struct lsd_client_data *lcd;
125 int err, log_pri = D_RPCTRACE;
127 /* Propagate error code. */
131 if (!exp->exp_obd->obd_replayable || oti == NULL)
134 cfs_mutex_lock(&ted->ted_lcd_lock);
136 /* if the export has already been disconnected, we have no last_rcvd slot,
137 * update server data with latest transno then */
139 cfs_mutex_unlock(&ted->ted_lcd_lock);
140 CWARN("commit transaction for disconnected client %s: rc %d\n",
141 exp->exp_client_uuid.uuid, rc);
142 err = filter_update_server_data(exp->exp_obd);
146 /* we don't allocate new transnos for replayed requests */
147 cfs_spin_lock(&obt->obt_lut->lut_translock);
148 if (oti->oti_transno == 0) {
149 last_rcvd = le64_to_cpu(lsd->lsd_last_transno) + 1;
150 lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
151 LASSERT(last_rcvd >= le64_to_cpu(lcd->lcd_last_transno));
153 last_rcvd = oti->oti_transno;
154 if (last_rcvd > le64_to_cpu(lsd->lsd_last_transno))
155 lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
156 if (unlikely(last_rcvd < le64_to_cpu(lcd->lcd_last_transno))) {
157 CERROR("Trying to overwrite bigger transno, on-disk: "
158 LPU64", new: "LPU64"\n",
159 le64_to_cpu(lcd->lcd_last_transno), last_rcvd);
160 cfs_spin_lock(&exp->exp_lock);
161 exp->exp_vbr_failed = 1;
162 cfs_spin_unlock(&exp->exp_lock);
163 cfs_spin_unlock(&obt->obt_lut->lut_translock);
164 cfs_mutex_unlock(&ted->ted_lcd_lock);
168 oti->oti_transno = last_rcvd;
170 lcd->lcd_last_transno = cpu_to_le64(last_rcvd);
171 lcd->lcd_pre_versions[0] = cpu_to_le64(oti->oti_pre_version);
172 lcd->lcd_last_xid = cpu_to_le64(oti->oti_xid);
173 cfs_spin_unlock(&obt->obt_lut->lut_translock);
176 fsfilt_set_version(exp->exp_obd, inode, last_rcvd);
178 off = ted->ted_lr_off;
180 CERROR("%s: client idx %d is %lld\n", exp->exp_obd->obd_name,
181 ted->ted_lr_idx, ted->ted_lr_off);
184 class_export_cb_get(exp); /* released when the cb is called */
186 force_sync = fsfilt_add_journal_cb(exp->exp_obd,
192 err = fsfilt_write_record(exp->exp_obd, obt->obt_rcvd_filp,
193 lcd, sizeof(*lcd), &off,
194 force_sync | exp->exp_need_sync);
196 filter_commit_cb(exp->exp_obd, last_rcvd, exp, err);
204 CDEBUG(log_pri, "wrote trans "LPU64" for client %s at #%d: err = %d\n",
205 last_rcvd, lcd->lcd_uuid, ted->ted_lr_idx, err);
206 cfs_mutex_unlock(&ted->ted_lcd_lock);
210 void f_dput(struct dentry *dentry)
212 /* Can't go inside filter_ddelete because it can block */
213 CDEBUG(D_INODE, "putting %s: %p, count = %d\n",
214 dentry->d_name.name, dentry, atomic_read(&dentry->d_count) - 1);
215 LASSERT(atomic_read(&dentry->d_count) > 0);
220 static void init_brw_stats(struct brw_stats *brw_stats)
223 for (i = 0; i < BRW_LAST; i++)
224 cfs_spin_lock_init(&brw_stats->hist[i].oh_lock);
227 static int lprocfs_init_rw_stats(struct obd_device *obd,
228 struct lprocfs_stats **stats)
232 num_stats = (sizeof(*obd->obd_type->typ_dt_ops) / sizeof(void *)) +
233 LPROC_FILTER_LAST - 1;
234 *stats = lprocfs_alloc_stats(num_stats, LPROCFS_STATS_FLAG_NOPERCPU);
238 lprocfs_init_ops_stats(LPROC_FILTER_LAST, *stats);
239 lprocfs_counter_init(*stats, LPROC_FILTER_READ_BYTES,
240 LPROCFS_CNTR_AVGMINMAX, "read_bytes", "bytes");
241 lprocfs_counter_init(*stats, LPROC_FILTER_WRITE_BYTES,
242 LPROCFS_CNTR_AVGMINMAX, "write_bytes", "bytes");
247 /* brw_stats are 2128, ops are 3916, ldlm are 204, so 6248 bytes per client,
248 plus the procfs overhead :( */
249 static int filter_export_stats_init(struct obd_device *obd,
250 struct obd_export *exp,
256 if (obd_uuid_equals(&exp->exp_client_uuid, &obd->obd_uuid))
257 /* Self-export gets no proc entry */
260 rc = lprocfs_exp_setup(exp, client_nid, &newnid);
262 /* Mask error for already created
270 struct nid_stat *tmp = exp->exp_nid_stats;
271 LASSERT(tmp != NULL);
273 OBD_ALLOC(tmp->nid_brw_stats, sizeof(struct brw_stats));
274 if (tmp->nid_brw_stats == NULL)
275 GOTO(clean, rc = -ENOMEM);
277 init_brw_stats(tmp->nid_brw_stats);
278 rc = lprocfs_seq_create(exp->exp_nid_stats->nid_proc, "brw_stats",
279 0644, &filter_per_nid_stats_fops,
282 CWARN("Error adding the brw_stats file\n");
284 rc = lprocfs_init_rw_stats(obd, &exp->exp_nid_stats->nid_stats);
288 rc = lprocfs_register_stats(tmp->nid_proc, "stats",
292 rc = lprocfs_nid_ldlm_stats_init(tmp);
302 /* Add client data to the FILTER. We use a bitmap to locate a free space
303 * in the last_rcvd file if cl_idx is -1 (i.e. a new client).
304 * Otherwise, we have just read the data from the last_rcvd file and
305 * we know its offset. */
306 static int filter_client_add(struct obd_device *obd, struct obd_export *exp,
309 struct obd_device_target *obt = &obd->u.obt;
310 struct tg_export_data *ted = &exp->exp_target_data;
311 struct lr_server_data *lsd = class_server_data(obd);
312 unsigned long *bitmap = obt->obt_lut->lut_client_bitmap;
313 int new_client = (cl_idx == -1);
317 LASSERT(bitmap != NULL);
318 LASSERTF(cl_idx > -2, "%d\n", cl_idx);
321 if (strcmp(ted->ted_lcd->lcd_uuid, obd->obd_uuid.uuid) == 0)
324 /* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
325 * there's no need for extra complication here
328 cl_idx = cfs_find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
330 if (cl_idx >= LR_MAX_CLIENTS) {
331 CERROR("no room for %u client - fix LR_MAX_CLIENTS\n",
335 if (cfs_test_and_set_bit(cl_idx, bitmap)) {
336 cl_idx = cfs_find_next_zero_bit(bitmap, LR_MAX_CLIENTS,
341 if (cfs_test_and_set_bit(cl_idx, bitmap)) {
342 CERROR("FILTER client %d: bit already set in bitmap!\n",
348 ted->ted_lr_idx = cl_idx;
349 ted->ted_lr_off = le32_to_cpu(lsd->lsd_client_start) +
350 cl_idx * le16_to_cpu(lsd->lsd_client_size);
351 cfs_mutex_init(&ted->ted_lcd_lock);
352 LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
354 CDEBUG(D_INFO, "client at index %d (%llu) with UUID '%s' added\n",
355 ted->ted_lr_idx, ted->ted_lr_off, ted->ted_lcd->lcd_uuid);
358 struct lvfs_run_ctxt saved;
359 loff_t off = ted->ted_lr_off;
363 CDEBUG(D_INFO, "writing client lcd at idx %u (%llu) (len %u)\n",
364 ted->ted_lr_idx,off,(unsigned int)sizeof(*ted->ted_lcd));
366 if (OBD_FAIL_CHECK(OBD_FAIL_TGT_CLIENT_ADD))
369 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
370 /* Transaction needed to fix bug 1403 */
371 handle = fsfilt_start(obd,
372 obt->obt_rcvd_filp->f_dentry->d_inode,
373 FSFILT_OP_SETATTR, NULL);
374 if (IS_ERR(handle)) {
375 rc = PTR_ERR(handle);
376 CERROR("unable to start transaction: rc %d\n", rc);
378 ted->ted_lcd->lcd_last_epoch = lsd->lsd_start_epoch;
379 exp->exp_last_request_time = cfs_time_current_sec();
380 rc = fsfilt_add_journal_cb(obd, 0, handle,
381 target_client_add_cb,
382 class_export_cb_get(exp));
384 cfs_spin_lock(&exp->exp_lock);
385 exp->exp_need_sync = 1;
386 cfs_spin_unlock(&exp->exp_lock);
388 rc = fsfilt_write_record(obd, obt->obt_rcvd_filp,
390 sizeof(*ted->ted_lcd),
391 &off, rc /* sync if no cb */);
393 obt->obt_rcvd_filp->f_dentry->d_inode,
396 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
399 CERROR("error writing %s client idx %u: rc %d\n",
400 LAST_RCVD, ted->ted_lr_idx, rc);
407 static int filter_client_del(struct obd_export *exp)
409 struct tg_export_data *ted = &exp->exp_target_data;
410 struct obd_device_target *obt = &exp->exp_obd->u.obt;
411 struct lvfs_run_ctxt saved;
416 if (ted->ted_lcd == NULL)
419 /* XXX if lcd_uuid were a real obd_uuid, I could use obd_uuid_equals */
420 if (strcmp(ted->ted_lcd->lcd_uuid, exp->exp_obd->obd_uuid.uuid ) == 0)
423 LASSERT(obt->obt_lut->lut_client_bitmap != NULL);
425 off = ted->ted_lr_off;
427 CDEBUG(D_INFO, "freeing client at idx %u, offset %lld with UUID '%s'\n",
428 ted->ted_lr_idx, ted->ted_lr_off, ted->ted_lcd->lcd_uuid);
430 /* Don't clear ted_lr_idx here as it is likely also unset. At worst
431 * we leak a client slot that will be cleaned on the next recovery. */
433 CERROR("%s: client idx %d has med_off %lld\n",
434 exp->exp_obd->obd_name, ted->ted_lr_idx, off);
435 GOTO(free, rc = -EINVAL);
438 /* Clear the bit _after_ zeroing out the client so we don't
439 race with filter_client_add and zero out new clients.*/
440 if (!cfs_test_bit(ted->ted_lr_idx, obt->obt_lut->lut_client_bitmap)) {
441 CERROR("FILTER client %u: bit already clear in bitmap!!\n",
446 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
447 /* Make sure the server's last_transno is up to date.
448 * This should be done before zeroing client slot so last_transno will
449 * be in server data or in client data in case of failure */
450 filter_update_server_data(exp->exp_obd);
452 cfs_mutex_lock(&ted->ted_lcd_lock);
453 memset(ted->ted_lcd->lcd_uuid, 0, sizeof ted->ted_lcd->lcd_uuid);
454 rc = fsfilt_write_record(exp->exp_obd, obt->obt_rcvd_filp,
456 sizeof(*ted->ted_lcd), &off, 0);
457 cfs_mutex_unlock(&ted->ted_lcd_lock);
458 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
460 CDEBUG(rc == 0 ? D_INFO : D_ERROR,
461 "zero out client %s at idx %u/%llu in %s, rc %d\n",
462 ted->ted_lcd->lcd_uuid, ted->ted_lr_idx, ted->ted_lr_off,
469 /* drop fmd reference, free it if last ref. must be called with fed_lock held.*/
470 static inline void filter_fmd_put_nolock(struct filter_export_data *fed,
471 struct filter_mod_data *fmd)
473 LASSERT_SPIN_LOCKED(&fed->fed_lock);
474 if (--fmd->fmd_refcount == 0) {
475 /* XXX when we have persistent reservations and the handle
476 * is stored herein we need to drop it here. */
477 fed->fed_mod_count--;
478 cfs_list_del(&fmd->fmd_list);
479 OBD_SLAB_FREE(fmd, ll_fmd_cachep, sizeof(*fmd));
483 /* drop fmd reference, free it if last ref */
484 void filter_fmd_put(struct obd_export *exp, struct filter_mod_data *fmd)
486 struct filter_export_data *fed;
491 fed = &exp->exp_filter_data;
492 cfs_spin_lock(&fed->fed_lock);
493 filter_fmd_put_nolock(fed, fmd); /* caller reference */
494 cfs_spin_unlock(&fed->fed_lock);
497 /* expire entries from the end of the list if there are too many
498 * or they are too old */
499 static void filter_fmd_expire_nolock(struct filter_obd *filter,
500 struct filter_export_data *fed,
501 struct filter_mod_data *keep)
503 struct filter_mod_data *fmd, *tmp;
505 cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
509 if (cfs_time_before(jiffies, fmd->fmd_expire) &&
510 fed->fed_mod_count < filter->fo_fmd_max_num)
513 cfs_list_del_init(&fmd->fmd_list);
514 filter_fmd_put_nolock(fed, fmd); /* list reference */
518 void filter_fmd_expire(struct obd_export *exp)
520 cfs_spin_lock(&exp->exp_filter_data.fed_lock);
521 filter_fmd_expire_nolock(&exp->exp_obd->u.filter,
522 &exp->exp_filter_data, NULL);
523 cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
526 /* find specified objid, group in export fmd list.
527 * caller must hold fed_lock and take fmd reference itself */
528 static struct filter_mod_data *filter_fmd_find_nolock(struct filter_obd *filter,
529 struct filter_export_data *fed,
530 obd_id objid, obd_seq group)
532 struct filter_mod_data *found = NULL, *fmd;
534 LASSERT_SPIN_LOCKED(&fed->fed_lock);
536 cfs_list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
537 if (fmd->fmd_id == objid && fmd->fmd_gr == group) {
539 cfs_list_del(&fmd->fmd_list);
540 cfs_list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
541 fmd->fmd_expire = jiffies + filter->fo_fmd_max_age;
546 filter_fmd_expire_nolock(filter, fed, found);
551 /* Find fmd based on objid and group, or return NULL if not found. */
552 struct filter_mod_data *filter_fmd_find(struct obd_export *exp,
553 obd_id objid, obd_seq group)
555 struct filter_mod_data *fmd;
557 cfs_spin_lock(&exp->exp_filter_data.fed_lock);
558 fmd = filter_fmd_find_nolock(&exp->exp_obd->u.filter,
559 &exp->exp_filter_data, objid, group);
561 fmd->fmd_refcount++; /* caller reference */
562 cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
567 /* Find fmd based on objid and group, or create a new one if none is found.
568 * It is possible for this function to return NULL under memory pressure,
569 * or if objid = 0 is passed (which will only cause old entries to expire).
570 * Currently this is not fatal because any fmd state is transient and
571 * may also be freed when it gets sufficiently old. */
572 struct filter_mod_data *filter_fmd_get(struct obd_export *exp,
573 obd_id objid, obd_seq group)
575 struct filter_export_data *fed = &exp->exp_filter_data;
576 struct filter_mod_data *found = NULL, *fmd_new = NULL;
578 OBD_SLAB_ALLOC_PTR_GFP(fmd_new, ll_fmd_cachep, CFS_ALLOC_IO);
580 cfs_spin_lock(&fed->fed_lock);
581 found = filter_fmd_find_nolock(&exp->exp_obd->u.filter,fed,objid,group);
584 cfs_list_add_tail(&fmd_new->fmd_list,
586 fmd_new->fmd_id = objid;
587 fmd_new->fmd_gr = group;
588 fmd_new->fmd_refcount++; /* list reference */
590 fed->fed_mod_count++;
592 OBD_SLAB_FREE(fmd_new, ll_fmd_cachep, sizeof(*fmd_new));
596 found->fmd_refcount++; /* caller reference */
597 found->fmd_expire = jiffies +
598 exp->exp_obd->u.filter.fo_fmd_max_age;
601 cfs_spin_unlock(&fed->fed_lock);
607 /* drop fmd list reference so it will disappear when last reference is put.
608 * This isn't so critical because it would in fact only affect the one client
609 * that is doing the unlink and at worst we have an stale entry referencing
610 * an object that should never be used again. */
611 static void filter_fmd_drop(struct obd_export *exp, obd_id objid, obd_seq group)
613 struct filter_mod_data *found = NULL;
615 cfs_spin_lock(&exp->exp_filter_data.fed_lock);
616 found = filter_fmd_find_nolock(&exp->exp_filter_data, objid, group);
618 cfs_list_del_init(&found->fmd_list);
619 filter_fmd_put_nolock(&exp->exp_filter_data, found);
621 cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
624 #define filter_fmd_drop(exp, objid, group)
627 /* remove all entries from fmd list */
628 static void filter_fmd_cleanup(struct obd_export *exp)
630 struct filter_export_data *fed = &exp->exp_filter_data;
631 struct filter_mod_data *fmd = NULL, *tmp;
633 cfs_spin_lock(&fed->fed_lock);
634 cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
635 cfs_list_del_init(&fmd->fmd_list);
636 filter_fmd_put_nolock(fed, fmd);
638 cfs_spin_unlock(&fed->fed_lock);
641 static int filter_init_export(struct obd_export *exp)
646 cfs_spin_lock_init(&exp->exp_filter_data.fed_lock);
647 CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
649 cfs_spin_lock(&exp->exp_lock);
650 exp->exp_connecting = 1;
651 cfs_spin_unlock(&exp->exp_lock);
653 /* self-export doesn't need client data and ldlm initialization */
654 if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
655 &exp->exp_client_uuid)))
658 rc = lut_client_alloc(exp);
660 rc = ldlm_init_export(exp);
662 CERROR("%s: Can't initialize export: rc %d\n",
663 exp->exp_obd->obd_name, rc);
668 static int filter_free_server_data(struct obd_device_target *obt)
670 lut_fini(NULL, obt->obt_lut);
671 OBD_FREE_PTR(obt->obt_lut);
675 /* assumes caller is already in kernel ctxt */
676 int filter_update_server_data(struct obd_device *obd)
678 struct file *filp = obd->u.obt.obt_rcvd_filp;
679 struct lr_server_data *lsd = class_server_data(obd);
684 CDEBUG(D_INODE, "server uuid : %s\n", lsd->lsd_uuid);
685 CDEBUG(D_INODE, "server last_rcvd : "LPU64"\n",
686 le64_to_cpu(lsd->lsd_last_transno));
687 CDEBUG(D_INODE, "server last_mount: "LPU64"\n",
688 le64_to_cpu(lsd->lsd_mount_count));
690 rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off, 0);
692 CERROR("error writing lr_server_data: rc = %d\n", rc);
697 int filter_update_last_objid(struct obd_device *obd, obd_seq group,
700 struct filter_obd *filter = &obd->u.filter;
706 if (filter->fo_last_objid_files[group] == NULL) {
707 CERROR("Object seq "LPU64" not fully setup; not updating "
708 "last_objid\n", group);
712 CDEBUG(D_INODE, "%s: server last_objid for "POSTID"\n",
713 obd->obd_name, filter->fo_last_objids[group], group);
715 tmp = cpu_to_le64(filter->fo_last_objids[group]);
716 rc = fsfilt_write_record(obd, filter->fo_last_objid_files[group],
717 &tmp, sizeof(tmp), &off, force_sync);
719 CERROR("error writing seq "LPU64" last objid: rc = %d\n",
723 extern int ost_handle(struct ptlrpc_request *req);
724 /* assumes caller has already in kernel ctxt */
725 static int filter_init_server_data(struct obd_device *obd, struct file * filp)
727 struct filter_obd *filter = &obd->u.filter;
728 struct lr_server_data *lsd;
729 struct lsd_client_data *lcd = NULL;
730 struct inode *inode = filp->f_dentry->d_inode;
731 unsigned long last_rcvd_size = i_size_read(inode);
732 struct lu_target *lut;
739 /* ensure padding in the struct is the correct size */
740 CLASSERT (offsetof(struct lr_server_data, lsd_padding) +
741 sizeof(lsd->lsd_padding) == LR_SERVER_SIZE);
742 CLASSERT (offsetof(struct lsd_client_data, lcd_padding) +
743 sizeof(lcd->lcd_padding) == LR_CLIENT_SIZE);
745 /* allocate and initialize lu_target */
749 rc = lut_init(NULL, lut, obd, NULL);
752 lsd = class_server_data(obd);
753 if (last_rcvd_size == 0) {
754 LCONSOLE_WARN("%s: new disk, initializing\n", obd->obd_name);
756 memcpy(lsd->lsd_uuid, obd->obd_uuid.uuid,sizeof(lsd->lsd_uuid));
757 lsd->lsd_last_transno = 0;
758 mount_count = lsd->lsd_mount_count = 0;
759 lsd->lsd_server_size = cpu_to_le32(LR_SERVER_SIZE);
760 lsd->lsd_client_start = cpu_to_le32(LR_CLIENT_START);
761 lsd->lsd_client_size = cpu_to_le16(LR_CLIENT_SIZE);
762 lsd->lsd_subdir_count = cpu_to_le16(FILTER_SUBDIR_COUNT);
763 filter->fo_subdir_count = FILTER_SUBDIR_COUNT;
764 /* OBD_COMPAT_OST is set in filter_connect_internal when the
765 * MDS first connects and assigns the OST index number. */
766 lsd->lsd_feature_incompat = cpu_to_le32(OBD_INCOMPAT_COMMON_LR|
769 rc = fsfilt_read_record(obd, filp, lsd, sizeof(*lsd), &off);
771 CDEBUG(D_INODE,"OBD filter: error reading %s: rc %d\n",
775 if (strcmp(lsd->lsd_uuid, obd->obd_uuid.uuid) != 0) {
776 LCONSOLE_ERROR_MSG(0x134, "Trying to start OBD %s "
777 "using the wrong disk %s. Were the "
778 "/dev/ assignments rearranged?\n",
779 obd->obd_uuid.uuid, lsd->lsd_uuid);
780 GOTO(err_lut, rc = -EINVAL);
782 mount_count = le64_to_cpu(lsd->lsd_mount_count);
783 filter->fo_subdir_count = le16_to_cpu(lsd->lsd_subdir_count);
785 /* Assume old last_rcvd format unless I_C_LR is set */
786 if (!(lsd->lsd_feature_incompat &
787 cpu_to_le32(OBD_INCOMPAT_COMMON_LR)))
788 lsd->lsd_last_transno = lsd->lsd_compat14;
790 /* OBD_COMPAT_OST is set in filter_connect_internal when the
791 * MDS first connects and assigns the OST index number. */
792 lsd->lsd_feature_incompat |= cpu_to_le32(OBD_INCOMPAT_COMMON_LR|
796 if (lsd->lsd_feature_incompat & ~cpu_to_le32(FILTER_INCOMPAT_SUPP)) {
797 CERROR("%s: unsupported incompat filesystem feature(s) %x\n",
798 obd->obd_name, le32_to_cpu(lsd->lsd_feature_incompat) &
799 ~FILTER_INCOMPAT_SUPP);
800 GOTO(err_lut, rc = -EINVAL);
802 if (lsd->lsd_feature_rocompat & ~cpu_to_le32(FILTER_ROCOMPAT_SUPP)) {
803 CERROR("%s: unsupported read-only filesystem feature(s) %x\n",
804 obd->obd_name, le32_to_cpu(lsd->lsd_feature_rocompat) &
805 ~FILTER_ROCOMPAT_SUPP);
806 /* Do something like remount filesystem read-only */
807 GOTO(err_lut, rc = -EINVAL);
810 start_epoch = le32_to_cpu(lsd->lsd_start_epoch);
812 CDEBUG(D_INODE, "%s: server start_epoch : %#x\n",
813 obd->obd_name, start_epoch);
814 CDEBUG(D_INODE, "%s: server last_transno : "LPX64"\n",
815 obd->obd_name, le64_to_cpu(lsd->lsd_last_transno));
816 CDEBUG(D_INODE, "%s: server mount_count: "LPU64"\n",
817 obd->obd_name, mount_count + 1);
818 CDEBUG(D_INODE, "%s: server data size: %u\n",
819 obd->obd_name, le32_to_cpu(lsd->lsd_server_size));
820 CDEBUG(D_INODE, "%s: per-client data start: %u\n",
821 obd->obd_name, le32_to_cpu(lsd->lsd_client_start));
822 CDEBUG(D_INODE, "%s: per-client data size: %u\n",
823 obd->obd_name, le32_to_cpu(lsd->lsd_client_size));
824 CDEBUG(D_INODE, "%s: server subdir_count: %u\n",
825 obd->obd_name, le16_to_cpu(lsd->lsd_subdir_count));
826 CDEBUG(D_INODE, "%s: last_rcvd clients: %lu\n", obd->obd_name,
827 last_rcvd_size <= le32_to_cpu(lsd->lsd_client_start) ? 0 :
828 (last_rcvd_size - le32_to_cpu(lsd->lsd_client_start)) /
829 le16_to_cpu(lsd->lsd_client_size));
831 if (!obd->obd_replayable) {
832 CWARN("%s: recovery support OFF\n", obd->obd_name);
838 GOTO(err_client, rc = -ENOMEM);
840 for (cl_idx = 0, off = le32_to_cpu(lsd->lsd_client_start);
841 off < last_rcvd_size; cl_idx++) {
843 struct obd_export *exp;
844 struct filter_export_data *fed;
846 /* Don't assume off is incremented properly by
847 * fsfilt_read_record(), in case sizeof(*lcd)
848 * isn't the same as lsd->lsd_client_size. */
849 off = le32_to_cpu(lsd->lsd_client_start) +
850 cl_idx * le16_to_cpu(lsd->lsd_client_size);
851 rc = fsfilt_read_record(obd, filp, lcd, sizeof(*lcd), &off);
853 CERROR("error reading FILT %s idx %d off %llu: rc %d\n",
854 LAST_RCVD, cl_idx, off, rc);
855 break; /* read error shouldn't cause startup to fail */
858 if (lcd->lcd_uuid[0] == '\0') {
859 CDEBUG(D_INFO, "skipping zeroed client at offset %d\n",
864 check_lcd(obd->obd_name, cl_idx, lcd);
866 last_rcvd = le64_to_cpu(lcd->lcd_last_transno);
868 CDEBUG(D_HA, "RCVRNG CLIENT uuid: %s idx: %d lr: "LPU64
869 " srv lr: "LPU64"\n", lcd->lcd_uuid, cl_idx,
870 last_rcvd, le64_to_cpu(lsd->lsd_last_transno));
872 /* These exports are cleaned up by filter_disconnect(), so they
873 * need to be set up like real exports as filter_connect() does.
875 exp = class_new_export(obd, (struct obd_uuid *)lcd->lcd_uuid);
877 if (PTR_ERR(exp) == -EALREADY) {
878 /* export already exists, zero out this one */
879 CERROR("Duplicate export %s!\n", lcd->lcd_uuid);
883 GOTO(err_client, rc = PTR_ERR(exp));
886 fed = &exp->exp_filter_data;
887 *fed->fed_ted.ted_lcd = *lcd;
888 fed->fed_group = 0; /* will be assigned at connect */
889 filter_export_stats_init(obd, exp, NULL);
890 rc = filter_client_add(obd, exp, cl_idx);
891 /* can't fail for existing client */
892 LASSERTF(rc == 0, "rc = %d\n", rc);
894 /* VBR: set export last committed */
895 exp->exp_last_committed = last_rcvd;
896 cfs_spin_lock(&exp->exp_lock);
897 exp->exp_connecting = 0;
898 exp->exp_in_recovery = 0;
899 cfs_spin_unlock(&exp->exp_lock);
900 obd->obd_max_recoverable_clients++;
901 class_export_put(exp);
903 if (last_rcvd > le64_to_cpu(lsd->lsd_last_transno))
904 lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
908 obd->obd_last_committed = le64_to_cpu(lsd->lsd_last_transno);
910 obd->u.obt.obt_mount_count = mount_count + 1;
911 obd->u.obt.obt_instance = (__u32)obd->u.obt.obt_mount_count;
912 lsd->lsd_mount_count = cpu_to_le64(obd->u.obt.obt_mount_count);
914 /* save it, so mount count and last_transno is current */
915 rc = filter_update_server_data(obd);
917 GOTO(err_client, rc);
922 class_disconnect_exports(obd);
924 filter_free_server_data(&obd->u.obt);
928 static int filter_cleanup_groups(struct obd_device *obd)
930 struct filter_obd *filter = &obd->u.filter;
932 struct dentry *dentry;
936 if (filter->fo_dentry_O_groups != NULL) {
937 for (i = 0; i < filter->fo_group_count; i++) {
938 dentry = filter->fo_dentry_O_groups[i];
942 OBD_FREE(filter->fo_dentry_O_groups,
943 filter->fo_group_count *
944 sizeof(*filter->fo_dentry_O_groups));
945 filter->fo_dentry_O_groups = NULL;
947 if (filter->fo_last_objid_files != NULL) {
948 for (i = 0; i < filter->fo_group_count; i++) {
949 filp = filter->fo_last_objid_files[i];
953 OBD_FREE(filter->fo_last_objid_files,
954 filter->fo_group_count *
955 sizeof(*filter->fo_last_objid_files));
956 filter->fo_last_objid_files = NULL;
958 if (filter->fo_dentry_O_sub != NULL) {
959 for (i = 0; i < filter->fo_group_count; i++) {
960 for (j = 0; j < filter->fo_subdir_count; j++) {
961 dentry = filter->fo_dentry_O_sub[i].dentry[j];
966 OBD_FREE(filter->fo_dentry_O_sub,
967 filter->fo_group_count *
968 sizeof(*filter->fo_dentry_O_sub));
969 filter->fo_dentry_O_sub = NULL;
971 if (filter->fo_last_objids != NULL) {
972 OBD_FREE(filter->fo_last_objids,
973 filter->fo_group_count *
974 sizeof(*filter->fo_last_objids));
975 filter->fo_last_objids = NULL;
977 if (filter->fo_dentry_O != NULL) {
978 f_dput(filter->fo_dentry_O);
979 filter->fo_dentry_O = NULL;
984 static int filter_update_last_group(struct obd_device *obd, int group)
986 struct filter_obd *filter = &obd->u.filter;
987 struct file *filp = NULL;
988 int last_group = 0, rc;
992 if (group <= filter->fo_committed_group)
995 filp = filp_open("LAST_GROUP", O_RDWR, 0700);
999 CERROR("cannot open LAST_GROUP: rc = %d\n", rc);
1003 rc = fsfilt_read_record(obd, filp, &last_group, sizeof(__u32), &off);
1005 CDEBUG(D_INODE, "error reading LAST_GROUP: rc %d\n",rc);
1009 CDEBUG(D_INODE, "%s: previous %d, new %d\n",
1010 obd->obd_name, last_group, group);
1014 /* must be sync: bXXXX */
1015 rc = fsfilt_write_record(obd, filp, &last_group, sizeof(__u32), &off, 1);
1017 CDEBUG(D_INODE, "error updating LAST_GROUP: rc %d\n", rc);
1021 filter->fo_committed_group = group;
1024 filp_close(filp, 0);
1028 static int filter_read_group_internal(struct obd_device *obd, int group,
1031 struct filter_obd *filter = &obd->u.filter;
1032 __u64 *new_objids = NULL;
1033 struct filter_subdirs *new_subdirs = NULL, *tmp_subdirs = NULL;
1034 struct dentry **new_groups = NULL;
1035 struct file **new_files = NULL;
1036 struct dentry *dentry;
1038 int old_count = filter->fo_group_count, rc, stage = 0, i;
1042 int len = group + 1;
1044 snprintf(name, 24, "%d", group);
1048 dentry = ll_lookup_one_len(name, filter->fo_dentry_O,
1050 if (IS_ERR(dentry)) {
1051 CERROR("Cannot lookup expected object group %d: %ld\n",
1052 group, PTR_ERR(dentry));
1053 RETURN(PTR_ERR(dentry));
1056 dentry = simple_mkdir(filter->fo_dentry_O,
1057 obd->u.obt.obt_vfsmnt, name, 0700, 1);
1058 if (IS_ERR(dentry)) {
1059 CERROR("cannot lookup/create O/%s: rc = %ld\n", name,
1061 RETURN(PTR_ERR(dentry));
1066 snprintf(name, 24, "O/%d/LAST_ID", group);
1068 filp = filp_open(name, O_CREAT | O_RDWR, 0700);
1070 CERROR("cannot create %s: rc = %ld\n", name, PTR_ERR(filp));
1071 GOTO(cleanup, rc = PTR_ERR(filp));
1075 rc = fsfilt_read_record(obd, filp, &last_objid, sizeof(__u64), &off);
1077 CDEBUG(D_INODE, "error reading %s: rc %d\n", name, rc);
1081 if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
1082 OBD_ALLOC(tmp_subdirs, sizeof(*tmp_subdirs));
1083 if (tmp_subdirs == NULL)
1084 GOTO(cleanup, rc = -ENOMEM);
1087 for (i = 0; i < filter->fo_subdir_count; i++) {
1089 snprintf(dir, sizeof(dir), "d%u", i);
1091 tmp_subdirs->dentry[i] = simple_mkdir(dentry,
1092 obd->u.obt.obt_vfsmnt,
1094 if (IS_ERR(tmp_subdirs->dentry[i])) {
1095 rc = PTR_ERR(tmp_subdirs->dentry[i]);
1096 CERROR("can't lookup/create O/%d/%s: rc = %d\n",
1101 CDEBUG(D_INODE, "got/created O/%d/%s: %p\n", group, dir,
1102 tmp_subdirs->dentry[i]);
1106 /* 'group' is an index; we need an array of length 'group + 1' */
1107 if (group + 1 > old_count) {
1108 OBD_ALLOC(new_objids, len * sizeof(*new_objids));
1109 OBD_ALLOC(new_subdirs, len * sizeof(*new_subdirs));
1110 OBD_ALLOC(new_groups, len * sizeof(*new_groups));
1111 OBD_ALLOC(new_files, len * sizeof(*new_files));
1113 if (new_objids == NULL || new_subdirs == NULL ||
1114 new_groups == NULL || new_files == NULL)
1115 GOTO(cleanup, rc = -ENOMEM);
1118 memcpy(new_objids, filter->fo_last_objids,
1119 old_count * sizeof(*new_objids));
1120 memcpy(new_subdirs, filter->fo_dentry_O_sub,
1121 old_count * sizeof(*new_subdirs));
1122 memcpy(new_groups, filter->fo_dentry_O_groups,
1123 old_count * sizeof(*new_groups));
1124 memcpy(new_files, filter->fo_last_objid_files,
1125 old_count * sizeof(*new_files));
1127 OBD_FREE(filter->fo_last_objids,
1128 old_count * sizeof(*new_objids));
1129 OBD_FREE(filter->fo_dentry_O_sub,
1130 old_count * sizeof(*new_subdirs));
1131 OBD_FREE(filter->fo_dentry_O_groups,
1132 old_count * sizeof(*new_groups));
1133 OBD_FREE(filter->fo_last_objid_files,
1134 old_count * sizeof(*new_files));
1136 filter->fo_last_objids = new_objids;
1137 filter->fo_dentry_O_sub = new_subdirs;
1138 filter->fo_dentry_O_groups = new_groups;
1139 filter->fo_last_objid_files = new_files;
1140 filter->fo_group_count = len;
1143 filter->fo_dentry_O_groups[group] = dentry;
1144 filter->fo_last_objid_files[group] = filp;
1145 if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
1146 filter->fo_dentry_O_sub[group] = *tmp_subdirs;
1147 OBD_FREE(tmp_subdirs, sizeof(*tmp_subdirs));
1150 filter_update_last_group(obd, group);
1152 if (i_size_read(filp->f_dentry->d_inode) == 0) {
1153 filter->fo_last_objids[group] = FILTER_INIT_OBJID;
1154 rc = filter_update_last_objid(obd, group, 1);
1158 filter->fo_last_objids[group] = le64_to_cpu(last_objid);
1159 CDEBUG(D_INODE, "%s: server last_objid group %d: "LPU64"\n",
1160 obd->obd_name, group, last_objid);
1165 if (new_objids != NULL)
1166 OBD_FREE(new_objids, len * sizeof(*new_objids));
1167 if (new_subdirs != NULL)
1168 OBD_FREE(new_subdirs, len * sizeof(*new_subdirs));
1169 if (new_groups != NULL)
1170 OBD_FREE(new_groups, len * sizeof(*new_groups));
1171 if (new_files != NULL)
1172 OBD_FREE(new_files, len * sizeof(*new_files));
1174 if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
1175 for (i = 0; i < filter->fo_subdir_count; i++) {
1176 if (tmp_subdirs->dentry[i] != NULL)
1177 dput(tmp_subdirs->dentry[i]);
1179 OBD_FREE(tmp_subdirs, sizeof(*tmp_subdirs));
1182 filp_close(filp, 0);
1189 static int filter_read_groups(struct obd_device *obd, int last_group,
1192 struct filter_obd *filter = &obd->u.filter;
1193 int old_count, group, rc = 0;
1195 cfs_mutex_lock(&filter->fo_init_lock);
1196 old_count = filter->fo_group_count;
1197 for (group = old_count; group <= last_group; group++) {
1198 rc = filter_read_group_internal(obd, group, create);
1202 cfs_mutex_unlock(&filter->fo_init_lock);
1206 /* FIXME: object groups */
1207 static int filter_prep_groups(struct obd_device *obd)
1209 struct filter_obd *filter = &obd->u.filter;
1210 struct dentry *O_dentry;
1212 int last_group, rc = 0, cleanup_phase = 0;
1216 O_dentry = simple_mkdir(cfs_fs_pwd(current->fs), obd->u.obt.obt_vfsmnt,
1218 CDEBUG(D_INODE, "%s: got/created O: %p\n", obd->obd_name, O_dentry);
1219 if (IS_ERR(O_dentry)) {
1220 rc = PTR_ERR(O_dentry);
1221 CERROR("%s: cannot open/create O: rc = %d\n", obd->obd_name,rc);
1224 filter->fo_dentry_O = O_dentry;
1225 cleanup_phase = 1; /* O_dentry */
1227 /* we have to initialize all groups before first connections from
1228 * clients because they may send create/destroy for any group -bzzz */
1229 filp = filp_open("LAST_GROUP", O_CREAT | O_RDWR, 0700);
1231 CERROR("%s: cannot create LAST_GROUP: rc = %ld\n",
1232 obd->obd_name, PTR_ERR(filp));
1233 GOTO(cleanup, rc = PTR_ERR(filp));
1235 cleanup_phase = 2; /* filp */
1237 rc = fsfilt_read_record(obd, filp, &last_group, sizeof(__u32), &off);
1239 CERROR("%s: error reading LAST_GROUP: rc %d\n",
1245 last_group = FID_SEQ_OST_MDT0;
1247 CDEBUG(D_INODE, "%s: initialize group %u (max %u)\n", obd->obd_name,
1248 FID_SEQ_OST_MDT0, last_group);
1249 filter->fo_committed_group = last_group;
1250 rc = filter_read_groups(obd, last_group, 1);
1254 filp_close(filp, 0);
1258 switch (cleanup_phase) {
1260 filp_close(filp, 0);
1262 filter_cleanup_groups(obd);
1263 f_dput(filter->fo_dentry_O);
1264 filter->fo_dentry_O = NULL;
1272 /* setup the object store with correct subdirectories */
1273 static int filter_prep(struct obd_device *obd)
1275 struct lvfs_run_ctxt saved;
1276 struct filter_obd *filter = &obd->u.filter;
1278 struct inode *inode;
1282 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1283 file = filp_open(LAST_RCVD, O_RDWR | O_CREAT | O_LARGEFILE, 0700);
1284 if (!file || IS_ERR(file)) {
1286 CERROR("OBD filter: cannot open/create %s: rc = %d\n",
1290 obd->u.obt.obt_rcvd_filp = file;
1291 if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1292 CERROR("%s is not a regular file!: mode = %o\n", LAST_RCVD,
1293 file->f_dentry->d_inode->i_mode);
1294 GOTO(err_filp, rc = -ENOENT);
1297 inode = file->f_dentry->d_parent->d_inode;
1298 /* We use i_op->unlink directly in filter_vfs_unlink() */
1299 if (!inode->i_op || !inode->i_op->create || !inode->i_op->unlink) {
1300 CERROR("%s: filesystem does not support create/unlink ops\n",
1302 GOTO(err_filp, rc = -EOPNOTSUPP);
1305 rc = filter_init_server_data(obd, file);
1307 CERROR("cannot read %s: rc = %d\n", LAST_RCVD, rc);
1310 LASSERT(obd->u.obt.obt_lut);
1311 target_recovery_init(obd->u.obt.obt_lut, ost_handle);
1313 /* open and create health check io file*/
1314 file = filp_open(HEALTH_CHECK, O_RDWR | O_CREAT, 0644);
1317 CERROR("OBD filter: cannot open/create %s rc = %d\n",
1319 GOTO(err_server_data, rc);
1321 filter->fo_obt.obt_health_check_filp = file;
1322 if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1323 CERROR("%s is not a regular file!: mode = %o\n", HEALTH_CHECK,
1324 file->f_dentry->d_inode->i_mode);
1325 GOTO(err_health_check, rc = -ENOENT);
1327 rc = lvfs_check_io_health(obd, file);
1329 GOTO(err_health_check, rc);
1331 rc = filter_prep_groups(obd);
1333 GOTO(err_health_check, rc);
1335 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1340 if (filp_close(filter->fo_obt.obt_health_check_filp, 0))
1341 CERROR("can't close %s after error\n", HEALTH_CHECK);
1342 filter->fo_obt.obt_health_check_filp = NULL;
1344 target_recovery_fini(obd);
1345 filter_free_server_data(&obd->u.obt);
1347 if (filp_close(obd->u.obt.obt_rcvd_filp, 0))
1348 CERROR("can't close %s after error\n", LAST_RCVD);
1349 obd->u.obt.obt_rcvd_filp = NULL;
1353 /* cleanup the filter: write last used object id to status file */
1354 static void filter_post(struct obd_device *obd)
1356 struct lvfs_run_ctxt saved;
1357 struct filter_obd *filter = &obd->u.filter;
1360 /* XXX: filter_update_lastobjid used to call fsync_dev. It might be
1361 * best to start a transaction with h_sync, because we removed this
1364 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1365 rc = filter_update_server_data(obd);
1367 CERROR("error writing server data: rc = %d\n", rc);
1369 for (i = 0; i < filter->fo_group_count; i++) {
1370 rc = filter_update_last_objid(obd, i,
1371 (i == filter->fo_group_count - 1));
1373 CERROR("error writing group %d lastobjid: rc = %d\n",
1377 rc = filp_close(obd->u.obt.obt_rcvd_filp, 0);
1378 obd->u.obt.obt_rcvd_filp = NULL;
1380 CERROR("error closing %s: rc = %d\n", LAST_RCVD, rc);
1382 rc = filp_close(filter->fo_obt.obt_health_check_filp, 0);
1383 filter->fo_obt.obt_health_check_filp = NULL;
1385 CERROR("error closing %s: rc = %d\n", HEALTH_CHECK, rc);
1387 filter_cleanup_groups(obd);
1388 filter_free_server_data(&obd->u.obt);
1389 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1391 filter_free_capa_keys(filter);
1392 cleanup_capa_hash(filter->fo_capa_hash);
1395 static void filter_set_last_id(struct filter_obd *filter,
1396 obd_id id, obd_seq group)
1398 LASSERT(group <= filter->fo_group_count);
1400 cfs_spin_lock(&filter->fo_objidlock);
1401 filter->fo_last_objids[group] = id;
1402 cfs_spin_unlock(&filter->fo_objidlock);
1405 obd_id filter_last_id(struct filter_obd *filter, obd_seq group)
1408 LASSERT(group <= filter->fo_group_count);
1409 LASSERT(filter->fo_last_objids != NULL);
1411 /* FIXME: object groups */
1412 cfs_spin_lock(&filter->fo_objidlock);
1413 id = filter->fo_last_objids[group];
1414 cfs_spin_unlock(&filter->fo_objidlock);
1418 static int filter_lock_dentry(struct obd_device *obd, struct dentry *dparent)
1420 mutex_lock_nested(&dparent->d_inode->i_mutex, I_MUTEX_PARENT);
1424 /* We never dget the object parent, so DON'T dput it either */
1425 struct dentry *filter_parent(struct obd_device *obd, obd_seq group, obd_id objid)
1427 struct filter_obd *filter = &obd->u.filter;
1428 struct filter_subdirs *subdirs;
1430 if (group >= filter->fo_group_count) /* FIXME: object groups */
1431 return ERR_PTR(-EBADF);
1433 if (!fid_seq_is_mdt(group) || filter->fo_subdir_count == 0)
1434 return filter->fo_dentry_O_groups[group];
1436 subdirs = &filter->fo_dentry_O_sub[group];
1437 return subdirs->dentry[objid & (filter->fo_subdir_count - 1)];
1440 /* We never dget the object parent, so DON'T dput it either */
1441 struct dentry *filter_parent_lock(struct obd_device *obd, obd_seq group,
1444 unsigned long now = jiffies;
1445 struct dentry *dparent = filter_parent(obd, group, objid);
1448 if (IS_ERR(dparent))
1450 if (dparent == NULL)
1451 return ERR_PTR(-ENOENT);
1453 rc = filter_lock_dentry(obd, dparent);
1454 fsfilt_check_slow(obd, now, "parent lock");
1455 return rc ? ERR_PTR(rc) : dparent;
1458 /* We never dget the object parent, so DON'T dput it either */
1459 static void filter_parent_unlock(struct dentry *dparent)
1461 mutex_unlock(&dparent->d_inode->i_mutex);
1464 /* How to get files, dentries, inodes from object id's.
1466 * If dir_dentry is passed, the caller has already locked the parent
1467 * appropriately for this operation (normally a write lock). If
1468 * dir_dentry is NULL, we do a read lock while we do the lookup to
1469 * avoid races with create/destroy and such changing the directory
1470 * internal to the filesystem code. */
1471 struct dentry *filter_fid2dentry(struct obd_device *obd,
1472 struct dentry *dir_dentry,
1473 obd_seq group, obd_id id)
1475 struct dentry *dparent = dir_dentry;
1476 struct dentry *dchild;
1481 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT) &&
1482 obd->u.filter.fo_destroys_in_progress == 0) {
1483 /* don't fail lookups for orphan recovery, it causes
1484 * later LBUGs when objects still exist during precreate */
1485 CDEBUG(D_INFO, "*** cfs_fail_loc=%x ***\n",OBD_FAIL_OST_ENOENT);
1486 RETURN(ERR_PTR(-ENOENT));
1489 CERROR("fatal: invalid object id 0\n");
1490 RETURN(ERR_PTR(-ESTALE));
1493 len = sprintf(name, LPU64, id);
1494 if (dir_dentry == NULL) {
1495 dparent = filter_parent_lock(obd, group, id);
1496 if (IS_ERR(dparent)) {
1497 CERROR("%s: error getting object "POSTID
1498 " parent: rc %ld\n", obd->obd_name,
1499 id, group, PTR_ERR(dparent));
1503 CDEBUG(D_INODE, "looking up object O/%.*s/%s\n",
1504 dparent->d_name.len, dparent->d_name.name, name);
1505 /* dparent is already locked here, so we cannot use ll_lookup_one_len() */
1506 dchild = lookup_one_len(name, dparent, len);
1507 if (dir_dentry == NULL)
1508 filter_parent_unlock(dparent);
1509 if (IS_ERR(dchild)) {
1510 CERROR("%s: object "LPU64":"LPU64" lookup error: rc %ld\n",
1511 obd->obd_name, id, group, PTR_ERR(dchild));
1515 if (dchild->d_inode != NULL && is_bad_inode(dchild->d_inode)) {
1516 CERROR("%s: got bad object "LPU64" inode %lu\n",
1517 obd->obd_name, id, dchild->d_inode->i_ino);
1519 RETURN(ERR_PTR(-ENOENT));
1522 CDEBUG(D_INODE, "got child objid %s: %p, count = %d\n",
1523 name, dchild, atomic_read(&dchild->d_count));
1525 LASSERT(atomic_read(&dchild->d_count) > 0);
1530 static int filter_prepare_destroy(struct obd_device *obd, obd_id objid,
1531 obd_id group, struct lustre_handle *lockh)
1533 int flags = LDLM_AST_DISCARD_DATA, rc;
1534 struct ldlm_res_id res_id;
1535 ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
1538 osc_build_res_name(objid, group, &res_id);
1539 /* Tell the clients that the object is gone now and that they should
1540 * throw away any cached pages. */
1541 rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id, LDLM_EXTENT,
1542 &policy, LCK_PW, &flags, ldlm_blocking_ast,
1543 ldlm_completion_ast, NULL, NULL, 0, NULL,
1550 static void filter_fini_destroy(struct obd_device *obd,
1551 struct lustre_handle *lockh)
1553 if (lustre_handle_is_used(lockh))
1554 ldlm_lock_decref(lockh, LCK_PW);
1557 /* This is vfs_unlink() without down(i_sem). If we call regular vfs_unlink()
1558 * we have 2.6 lock ordering issues with filter_commitrw_write() as it takes
1559 * i_sem before starting a handle, while filter_destroy() + vfs_unlink do the
1560 * reverse. Caller must take i_sem before starting the transaction and we
1561 * drop it here before the inode is removed from the dentry. bug 4180/6984 */
1562 int filter_vfs_unlink(struct inode *dir, struct dentry *dentry,
1563 struct vfsmount *mnt)
1568 /* don't need dir->i_zombie for 2.4, it is for rename/unlink of dir
1569 * itself we already hold dir->i_mutex for child create/unlink ops */
1570 LASSERT(dentry->d_inode != NULL);
1571 LASSERT(mutex_trylock(&dir->i_mutex) == 0);
1572 LASSERT(mutex_trylock(&dentry->d_inode->i_mutex) == 0);
1576 if (/*!dentry->d_inode ||*/dentry->d_parent->d_inode != dir)
1577 GOTO(out, rc = -ENOENT);
1579 rc = ll_permission(dir, MAY_WRITE | MAY_EXEC, NULL);
1584 GOTO(out, rc = -EPERM);
1586 /* check_sticky() */
1587 if ((dentry->d_inode->i_uid != cfs_curproc_fsuid() &&
1588 !cfs_capable(CFS_CAP_FOWNER)) || IS_APPEND(dentry->d_inode) ||
1589 IS_IMMUTABLE(dentry->d_inode))
1590 GOTO(out, rc = -EPERM);
1592 /* Locking order: i_mutex -> journal_lock -> dqptr_sem. LU-952 */
1593 ll_vfs_dq_init(dir);
1595 rc = ll_security_inode_unlink(dir, dentry, mnt);
1599 rc = dir->i_op->unlink(dir, dentry);
1601 /* need to drop i_mutex before we lose inode reference */
1602 mutex_unlock(&dentry->d_inode->i_mutex);
1609 /* Caller must hold LCK_PW on parent and push us into kernel context.
1610 * Caller must hold child i_mutex, we drop it always.
1611 * Caller is also required to ensure that dchild->d_inode exists. */
1612 static int filter_destroy_internal(struct obd_device *obd, obd_id objid,
1613 obd_seq group, struct dentry *dparent,
1614 struct dentry *dchild)
1616 struct inode *inode = dchild->d_inode;
1619 /* There should be 2 references to the inode:
1620 * 1) taken by filter_prepare_destroy
1621 * 2) taken by filter_destroy */
1622 if (inode->i_nlink != 1 || atomic_read(&inode->i_count) != 2) {
1623 CERROR("destroying objid %.*s ino %lu nlink %lu count %d\n",
1624 dchild->d_name.len, dchild->d_name.name, inode->i_ino,
1625 (unsigned long)inode->i_nlink,
1626 atomic_read(&inode->i_count));
1629 rc = filter_vfs_unlink(dparent->d_inode, dchild, obd->u.obt.obt_vfsmnt);
1631 CERROR("error unlinking objid %.*s: rc %d\n",
1632 dchild->d_name.len, dchild->d_name.name, rc);
1636 struct filter_intent_args {
1637 struct ldlm_lock **victim;
1642 static enum interval_iter filter_intent_cb(struct interval_node *n,
1645 struct ldlm_interval *node = (struct ldlm_interval *)n;
1646 struct filter_intent_args *arg = (struct filter_intent_args*)args;
1647 __u64 size = arg->size;
1648 struct ldlm_lock **v = arg->victim;
1649 struct ldlm_lock *lck;
1651 /* If the interval is lower than the current file size,
1653 if (interval_high(n) <= size)
1654 return INTERVAL_ITER_STOP;
1656 cfs_list_for_each_entry(lck, &node->li_group, l_sl_policy) {
1657 /* Don't send glimpse ASTs to liblustre clients.
1658 * They aren't listening for them, and they do
1659 * entirely synchronous I/O anyways. */
1660 if (lck->l_export == NULL ||
1661 lck->l_export->exp_libclient == 1)
1664 if (*arg->liblustre)
1665 *arg->liblustre = 0;
1668 *v = LDLM_LOCK_GET(lck);
1669 } else if ((*v)->l_policy_data.l_extent.start <
1670 lck->l_policy_data.l_extent.start) {
1671 LDLM_LOCK_RELEASE(*v);
1672 *v = LDLM_LOCK_GET(lck);
1675 /* the same policy group - every lock has the
1676 * same extent, so needn't do it any more */
1680 return INTERVAL_ITER_CONT;
1683 static int filter_intent_policy(struct ldlm_namespace *ns,
1684 struct ldlm_lock **lockp, void *req_cookie,
1685 ldlm_mode_t mode, int flags, void *data)
1687 struct ptlrpc_request *req = req_cookie;
1688 struct ldlm_lock *lock = *lockp, *l = NULL;
1689 struct ldlm_resource *res = lock->l_resource;
1690 ldlm_processing_policy policy;
1691 struct ost_lvb *res_lvb, *reply_lvb;
1692 struct ldlm_reply *rep;
1694 int idx, rc, tmpflags = 0, only_liblustre = 1;
1695 struct ldlm_interval_tree *tree;
1696 struct filter_intent_args arg;
1697 __u32 repsize[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
1698 [DLM_LOCKREPLY_OFF] = sizeof(*rep),
1699 [DLM_REPLY_REC_OFF] = sizeof(*reply_lvb) };
1700 struct ldlm_glimpse_work gl_work;
1701 CFS_LIST_HEAD(gl_list);
1704 policy = ldlm_get_processing_policy(res);
1705 LASSERT(policy != NULL);
1706 LASSERT(req != NULL);
1708 rc = lustre_pack_reply(req, 3, repsize, NULL);
1710 RETURN(req->rq_status = rc);
1712 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF, sizeof(*rep));
1713 LASSERT(rep != NULL);
1715 reply_lvb = lustre_msg_buf(req->rq_repmsg, DLM_REPLY_REC_OFF,
1716 sizeof(*reply_lvb));
1717 LASSERT(reply_lvb != NULL);
1719 //fixup_handle_for_resent_req(req, lock, &lockh);
1721 /* Call the extent policy function to see if our request can be
1722 * granted, or is blocked.
1723 * If the OST lock has LDLM_FL_HAS_INTENT set, it means a glimpse
1724 * lock, and should not be granted if the lock will be blocked.
1727 if (flags & LDLM_FL_BLOCK_NOWAIT) {
1728 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_AGL_DELAY, 5);
1730 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_AGL_NOLOCK))
1731 RETURN(ELDLM_LOCK_ABORTED);
1734 LASSERT(ns == ldlm_res_to_ns(res));
1736 rc = policy(lock, &tmpflags, 0, &err, NULL);
1737 check_res_locked(res);
1739 /* The lock met with no resistance; we're finished. */
1740 if (rc == LDLM_ITER_CONTINUE) {
1741 /* do not grant locks to the liblustre clients: they cannot
1742 * handle ASTs robustly. We need to do this while still
1743 * holding lr_lock to avoid the lock remaining on the res_link
1744 * list (and potentially being added to l_pending_list by an
1745 * AST) when we are going to drop this lock ASAP. */
1746 if (lock->l_export->exp_libclient ||
1747 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2)) {
1748 ldlm_resource_unlink_lock(lock);
1749 err = ELDLM_LOCK_ABORTED;
1751 err = ELDLM_LOCK_REPLACED;
1755 } else if (flags & LDLM_FL_BLOCK_NOWAIT) {
1756 /* LDLM_FL_BLOCK_NOWAIT means it is for AGL. Do not send glimpse
1757 * callback for glimpse size. The real size user will trigger
1758 * the glimpse callback when necessary. */
1760 RETURN(ELDLM_LOCK_ABORTED);
1763 /* Do not grant any lock, but instead send GL callbacks. The extent
1764 * policy nicely created a list of all PW locks for us. We will choose
1765 * the highest of those which are larger than the size in the LVB, if
1766 * any, and perform a glimpse callback. */
1767 res_lvb = res->lr_lvb_data;
1768 LASSERT(res_lvb != NULL);
1769 *reply_lvb = *res_lvb;
1772 * lr_lock guarantees that no new locks are granted, and,
1773 * therefore, that res->lr_lvb_data cannot increase beyond the
1774 * end of already granted lock. As a result, it is safe to
1775 * check against "stale" reply_lvb->lvb_size value without
1776 * res->lr_lvb_mutex.
1778 arg.size = reply_lvb->lvb_size;
1780 arg.liblustre = &only_liblustre;
1781 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1782 tree = &res->lr_itree[idx];
1783 if (tree->lit_mode == LCK_PR)
1786 interval_iterate_reverse(tree->lit_root,
1787 filter_intent_cb, &arg);
1791 /* There were no PW locks beyond the size in the LVB; finished. */
1793 if (only_liblustre) {
1794 /* If we discovered a liblustre client with a PW lock,
1795 * however, the LVB may be out of date! The LVB is
1796 * updated only on glimpse (which we don't do for
1797 * liblustre clients) and cancel (which the client
1798 * obviously has not yet done). So if it has written
1799 * data but kept the lock, the LVB is stale and needs
1800 * to be updated from disk.
1802 * Of course, this will all disappear when we switch to
1803 * taking liblustre locks on the OST. */
1804 ldlm_res_lvbo_update(res, NULL, 1);
1806 RETURN(ELDLM_LOCK_ABORTED);
1810 * This check is for lock taken in filter_prepare_destroy() that does
1811 * not have l_glimpse_ast set. So the logic is: if there is a lock
1812 * with no l_glimpse_ast set, this object is being destroyed already.
1814 * Hence, if you are grabbing DLM locks on the server, always set
1815 * non-NULL glimpse_ast (e.g., ldlm_request.c:ldlm_glimpse_ast()).
1817 if (l->l_glimpse_ast == NULL) {
1818 /* We are racing with unlink(); just return -ENOENT */
1819 rep->lock_policy_res1 = -ENOENT;
1823 LASSERTF(l->l_glimpse_ast != NULL, "l == %p", l);
1825 /* Populate the gl_work structure.
1826 * Grab additional reference on the lock which will be released in
1827 * ldlm_work_gl_ast_lock() */
1828 gl_work.gl_lock = LDLM_LOCK_GET(l);
1829 /* The glimpse callback is sent to one single extent lock. As a result,
1830 * the gl_work list is just composed of one element */
1831 cfs_list_add_tail(&gl_work.gl_list, &gl_list);
1832 /* There is actually no need for a glimpse descriptor when glimpsing
1834 gl_work.gl_desc = NULL;
1835 /* the ldlm_glimpse_work structure is allocated on the stack */
1836 gl_work.gl_flags = LDLM_GL_WORK_NOFREE;
1838 rc = ldlm_glimpse_locks(res, &gl_list); /* this will update the LVB */
1840 if (!cfs_list_empty(&gl_list))
1841 LDLM_LOCK_RELEASE(l);
1844 *reply_lvb = *res_lvb;
1848 LDLM_LOCK_RELEASE(l);
1850 RETURN(ELDLM_LOCK_ABORTED);
1854 * per-obd_device iobuf pool.
1856 * To avoid memory deadlocks in low-memory setups, amount of dynamic
1857 * allocations in write-path has to be minimized (see bug 5137).
1859 * Pages, niobuf_local's and niobuf_remote's are pre-allocated and attached to
1860 * OST threads (see ost_thread_{init,done}()).
1862 * "iobuf's" used by filter cannot be attached to OST thread, however, because
1863 * at the OST layer there are only (potentially) multiple obd_device of type
1864 * unknown at the time of OST thread creation.
1866 * We create a cfs_hash for struct filter_obd (->fo_iobuf_hash field) on
1867 * initializing, each OST thread will create it's own iobuf on the first
1868 * access and insert it into ->fo_iobuf_hash with thread ID as key,
1869 * so the iobuf can be found again by thread ID.
1873 * filter_iobuf_pool_init()
1875 * filter_iobuf_pool_done()
1877 * filter_iobuf_get()
1879 * operate on this array. They are "generic" in a sense that they don't depend
1880 * on actual type of iobuf's (the latter depending on Linux kernel version).
1884 * destroy pool created by filter_iobuf_pool_init
1886 static void filter_iobuf_pool_done(struct filter_obd *filter)
1890 if (filter->fo_iobuf_hash != NULL) {
1891 cfs_hash_putref(filter->fo_iobuf_hash);
1892 filter->fo_iobuf_hash = NULL;
1897 static int filter_adapt_sptlrpc_conf(struct obd_device *obd, int initial)
1899 struct filter_obd *filter = &obd->u.filter;
1900 struct sptlrpc_rule_set tmp_rset;
1903 sptlrpc_rule_set_init(&tmp_rset);
1904 rc = sptlrpc_conf_target_get_rules(obd, &tmp_rset, initial);
1906 CERROR("obd %s: failed get sptlrpc rules: %d\n",
1911 sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
1913 cfs_write_lock(&filter->fo_sptlrpc_lock);
1914 sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
1915 filter->fo_sptlrpc_rset = tmp_rset;
1916 cfs_write_unlock(&filter->fo_sptlrpc_lock);
1922 filter_iobuf_hop_hash(cfs_hash_t *hs, const void *key, unsigned mask)
1924 __u64 val = *((__u64 *)key);
1926 return cfs_hash_long(val, hs->hs_cur_bits);
1930 filter_iobuf_hop_key(cfs_hlist_node_t *hnode)
1932 struct filter_iobuf *pool;
1934 pool = cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
1935 return &pool->dr_hkey;
1939 filter_iobuf_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
1941 struct filter_iobuf *pool;
1943 pool = cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
1944 return pool->dr_hkey == *((__u64 *)key);
1948 filter_iobuf_hop_object(cfs_hlist_node_t *hnode)
1950 return cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
1954 filter_iobuf_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1956 /* dummy, required by cfs_hash */
1960 filter_iobuf_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1962 /* dummy, required by cfs_hash */
1966 filter_iobuf_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1968 struct filter_iobuf *pool;
1970 pool = cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
1971 filter_free_iobuf(pool);
1974 static struct cfs_hash_ops filter_iobuf_hops = {
1975 .hs_hash = filter_iobuf_hop_hash,
1976 .hs_key = filter_iobuf_hop_key,
1977 .hs_keycmp = filter_iobuf_hop_keycmp,
1978 .hs_object = filter_iobuf_hop_object,
1979 .hs_get = filter_iobuf_hop_get,
1980 .hs_put_locked = filter_iobuf_hop_put_locked,
1981 .hs_exit = filter_iobuf_hop_exit
1984 #define FILTER_IOBUF_HASH_BITS 9
1985 #define FILTER_IOBUF_HBKT_BITS 4
1988 * pre-allocate pool of iobuf's to be used by filter_{prep,commit}rw_write().
1990 static int filter_iobuf_pool_init(struct filter_obd *filter)
1992 filter->fo_iobuf_hash = cfs_hash_create("filter_iobuf",
1993 FILTER_IOBUF_HASH_BITS,
1994 FILTER_IOBUF_HASH_BITS,
1995 FILTER_IOBUF_HBKT_BITS, 0,
1999 CFS_HASH_RW_BKTLOCK |
2000 CFS_HASH_NO_ITEMREF);
2002 return filter->fo_iobuf_hash != NULL ? 0 : -ENOMEM;
2005 /* Return iobuf allocated for @thread_id.
2006 * If we haven't allocated a pool entry for this thread before, do so now and
2007 * insert it into fo_iobuf_hash, otherwise we can find it from fo_iobuf_hash */
2008 void *filter_iobuf_get(struct filter_obd *filter, struct obd_trans_info *oti)
2010 struct filter_iobuf *pool = NULL;
2015 thread_id = (oti && oti->oti_thread) ? oti->oti_thread->t_id : -1;
2016 if (thread_id >= 0) {
2017 struct ptlrpc_service_part *svcpt;
2019 svcpt = oti->oti_thread->t_svcpt;
2020 LASSERT(svcpt != NULL);
2022 key = (__u64)(svcpt->scp_cpt) << 32 | thread_id;
2023 pool = cfs_hash_lookup(filter->fo_iobuf_hash, &key);
2028 pool = filter_alloc_iobuf(filter, OBD_BRW_WRITE, PTLRPC_MAX_BRW_PAGES);
2032 if (thread_id >= 0) {
2033 pool->dr_hkey = key;
2034 rc = cfs_hash_add_unique(filter->fo_iobuf_hash,
2035 &key, &pool->dr_hlist);
2036 /* ptlrpc service thould guarantee thread ID is unique */
2037 LASSERT(rc != -EALREADY);
2043 /* mount the file system (secretly). lustre_cfg parameters are:
2046 * 3 = flags: failover=f, failout=n
2049 int filter_common_setup(struct obd_device *obd, struct lustre_cfg* lcfg,
2052 struct filter_obd *filter = &obd->u.filter;
2053 struct vfsmount *mnt;
2054 struct file_system_type *type;
2055 struct lustre_mount_info *lmi;
2056 struct obd_uuid uuid;
2060 struct request_queue *q;
2064 if (lcfg->lcfg_bufcount < 3 ||
2065 LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
2066 LUSTRE_CFG_BUFLEN(lcfg, 2) < 1)
2069 lmi = server_get_mount(obd->obd_name);
2071 /* We already mounted in lustre_fill_super.
2072 lcfg bufs 1, 2, 4 (device, fstype, mount opts) are ignored.*/
2073 struct lustre_sb_info *lsi = s2lsi(lmi->lmi_sb);
2075 obd->obd_fsops = fsfilt_get_ops(lsi->lsi_fstype);
2077 /* old path - used by lctl */
2078 CERROR("Using old MDS mount method\n");
2079 type = get_fs_type(lustre_cfg_string(lcfg, 2));
2081 CERROR("get_fs_type failed\n");
2084 mnt = vfs_kern_mount(type, MS_NOATIME|MS_NODIRATIME,
2085 lustre_cfg_string(lcfg, 1), option);
2086 cfs_module_put(type->owner);
2089 LCONSOLE_ERROR_MSG(0x135, "Can't mount disk %s (%d)\n",
2090 lustre_cfg_string(lcfg, 1), rc);
2094 obd->obd_fsops = fsfilt_get_ops(lustre_cfg_string(lcfg, 2));
2096 if (IS_ERR(obd->obd_fsops))
2097 GOTO(err_mntput, rc = PTR_ERR(obd->obd_fsops));
2099 rc = filter_iobuf_pool_init(filter);
2103 if (lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb))) {
2104 CERROR("%s: Underlying device is marked as read-only. "
2105 "Setup failed\n", obd->obd_name);
2106 GOTO(err_ops, rc = -EROFS);
2109 /* failover is the default */
2110 obd->obd_replayable = 1;
2112 /* disable connection until configuration finishes */
2113 obd->obd_no_conn = 1;
2115 if (lcfg->lcfg_bufcount > 3 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
2116 str = lustre_cfg_string(lcfg, 3);
2117 if (strchr(str, 'n')) {
2118 CWARN("%s: recovery disabled\n", obd->obd_name);
2119 obd->obd_replayable = 0;
2123 obd->u.obt.obt_magic = OBT_MAGIC;
2124 obd->u.obt.obt_vfsmnt = mnt;
2125 obd->u.obt.obt_sb = mnt->mnt_sb;
2126 filter->fo_fstype = mnt->mnt_sb->s_type->name;
2127 CDEBUG(D_SUPER, "%s: mnt = %p\n", filter->fo_fstype, mnt);
2129 rc = fsfilt_setup(obd, obd->u.obt.obt_sb);
2133 OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
2134 obd->obd_lvfs_ctxt.pwdmnt = mnt;
2135 obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
2136 obd->obd_lvfs_ctxt.fs = get_ds();
2137 obd->obd_lvfs_ctxt.cb_ops = filter_lvfs_ops;
2139 cfs_mutex_init(&filter->fo_init_lock);
2140 filter->fo_committed_group = 0;
2141 filter->fo_destroys_in_progress = 0;
2142 for (i = 0; i < 32; i++)
2143 cfs_mutex_init(&filter->fo_create_locks[i]);
2145 cfs_spin_lock_init(&filter->fo_objidlock);
2146 CFS_INIT_LIST_HEAD(&filter->fo_export_list);
2147 cfs_mutex_init(&filter->fo_alloc_lock);
2148 init_brw_stats(&filter->fo_filter_stats);
2149 cfs_spin_lock_init(&filter->fo_flags_lock);
2150 filter->fo_read_cache = 1; /* enable read-only cache by default */
2151 filter->fo_writethrough_cache = 1; /* enable writethrough cache */
2152 filter->fo_readcache_max_filesize = FILTER_MAX_CACHE_SIZE;
2153 filter->fo_fmd_max_num = FILTER_FMD_MAX_NUM_DEFAULT;
2154 filter->fo_fmd_max_age = FILTER_FMD_MAX_AGE_DEFAULT;
2155 filter->fo_syncjournal = 0; /* Don't sync journals on i/o by default */
2156 filter_slc_set(filter); /* initialize sync on lock cancel */
2158 rc = filter_prep(obd);
2162 CFS_INIT_LIST_HEAD(&filter->fo_llog_list);
2163 cfs_spin_lock_init(&filter->fo_llog_list_lock);
2165 filter->fo_fl_oss_capa = 1;
2167 CFS_INIT_LIST_HEAD(&filter->fo_capa_keys);
2168 filter->fo_capa_hash = init_capa_hash();
2169 if (filter->fo_capa_hash == NULL)
2170 GOTO(err_post, rc = -ENOMEM);
2172 sprintf(ns_name, "filter-%s", obd->obd_uuid.uuid);
2173 obd->obd_namespace = ldlm_namespace_new(obd, ns_name,
2174 LDLM_NAMESPACE_SERVER,
2175 LDLM_NAMESPACE_GREEDY,
2177 if (obd->obd_namespace == NULL)
2178 GOTO(err_post, rc = -ENOMEM);
2179 obd->obd_namespace->ns_lvbp = obd;
2180 obd->obd_namespace->ns_lvbo = &filter_lvbo;
2181 ldlm_register_intent(obd->obd_namespace, filter_intent_policy);
2183 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
2184 "filter_ldlm_cb_client", &obd->obd_ldlm_client);
2186 rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
2188 CERROR("failed to setup llogging subsystems\n");
2192 cfs_rwlock_init(&filter->fo_sptlrpc_lock);
2193 sptlrpc_rule_set_init(&filter->fo_sptlrpc_rset);
2194 /* do this after llog being initialized */
2195 filter_adapt_sptlrpc_conf(obd, 1);
2197 q = bdev_get_queue(mnt->mnt_sb->s_bdev);
2198 if (queue_max_sectors(q) < queue_max_hw_sectors(q) &&
2199 queue_max_sectors(q) < PTLRPC_MAX_BRW_SIZE >> 9)
2200 LCONSOLE_INFO("%s: underlying device %s should be tuned "
2201 "for larger I/O requests: max_sectors = %u "
2202 "could be up to max_hw_sectors=%u\n",
2203 obd->obd_name, mnt->mnt_sb->s_id,
2204 queue_max_sectors(q), queue_max_hw_sectors(q));
2206 uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
2207 if (uuid_ptr != NULL) {
2208 class_uuid_unparse(uuid_ptr, &uuid);
2214 label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
2215 LCONSOLE_INFO("%s: Now serving %s %s%s with recovery %s\n",
2216 obd->obd_name, label ?: str, lmi ? "on " : "",
2217 lmi ? s2lsi(lmi->lmi_sb)->lsi_lmd->lmd_dev : "",
2218 obd->obd_replayable ? "enabled" : "disabled");
2225 fsfilt_put_ops(obd->obd_fsops);
2226 filter_iobuf_pool_done(filter);
2228 server_put_mount(obd->obd_name, mnt);
2229 obd->u.obt.obt_sb = 0;
2233 static int filter_setup(struct obd_device *obd, struct lustre_cfg* lcfg)
2235 struct lprocfs_static_vars lvars;
2236 cfs_proc_dir_entry_t *entry;
2242 CLASSERT(offsetof(struct obd_device, u.obt) ==
2243 offsetof(struct obd_device, u.filter.fo_obt));
2245 if (!LUSTRE_CFG_BUFLEN(lcfg, 1) || !LUSTRE_CFG_BUFLEN(lcfg, 2))
2248 /* lprocfs must be setup before the filter so state can be safely added
2249 * to /proc incrementally as the filter is setup */
2250 lprocfs_filter_init_vars(&lvars);
2251 rc = lprocfs_obd_setup(obd, lvars.obd_vars);
2253 CERROR("%s: lprocfs_obd_setup failed: %d.\n",
2258 rc = lprocfs_alloc_obd_stats(obd, LPROC_FILTER_LAST);
2260 CERROR("%s: lprocfs_alloc_obd_stats failed: %d.\n",
2262 GOTO(obd_cleanup, rc);
2265 /* Init obdfilter private stats here */
2266 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_READ_BYTES,
2267 LPROCFS_CNTR_AVGMINMAX, "read_bytes", "bytes");
2268 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
2269 LPROCFS_CNTR_AVGMINMAX, "write_bytes", "bytes");
2270 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_GET_PAGE,
2271 LPROCFS_CNTR_AVGMINMAX|LPROCFS_CNTR_STDDEV,
2272 "get_page", "usec");
2273 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_NO_PAGE,
2274 LPROCFS_CNTR_AVGMINMAX, "get_page_failures", "num");
2275 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_ACCESS,
2276 LPROCFS_CNTR_AVGMINMAX, "cache_access", "pages");
2277 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_HIT,
2278 LPROCFS_CNTR_AVGMINMAX, "cache_hit", "pages");
2279 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_MISS,
2280 LPROCFS_CNTR_AVGMINMAX, "cache_miss", "pages");
2282 rc = lproc_filter_attach_seqstat(obd);
2284 CERROR("%s: create seqstat failed: %d.\n", obd->obd_name, rc);
2285 GOTO(free_obd_stats, rc);
2288 entry = lprocfs_register("exports", obd->obd_proc_entry, NULL, NULL);
2289 if (IS_ERR(entry)) {
2290 rc = PTR_ERR(entry);
2291 CERROR("%s: error %d setting up lprocfs for %s\n",
2292 obd->obd_name, rc, "exports");
2293 GOTO(free_obd_stats, rc);
2295 obd->obd_proc_exports_entry = entry;
2297 entry = lprocfs_add_simple(obd->obd_proc_exports_entry, "clear",
2298 lprocfs_nid_stats_clear_read,
2299 lprocfs_nid_stats_clear_write, obd, NULL);
2300 if (IS_ERR(entry)) {
2301 rc = PTR_ERR(entry);
2302 CERROR("%s: add proc entry 'clear' failed: %d.\n",
2304 GOTO(free_obd_stats, rc);
2307 rc = lprocfs_job_stats_init(obd, LPROC_FILTER_STATS_LAST,
2308 filter_stats_counter_init);
2310 GOTO(remove_entry_clear, rc);
2312 /* 2.6.9 selinux wants a full option page for do_kern_mount (bug6471) */
2313 OBD_PAGE_ALLOC(page, CFS_ALLOC_STD);
2315 GOTO(job_stats_fini, rc = -ENOMEM);
2316 addr = (unsigned long)cfs_page_address(page);
2317 clear_page((void *)addr);
2318 memcpy((void *)addr, lustre_cfg_buf(lcfg, 4),
2319 LUSTRE_CFG_BUFLEN(lcfg, 4));
2320 rc = filter_common_setup(obd, lcfg, (void *)addr);
2321 OBD_PAGE_FREE(page);
2323 CERROR("%s: filter_common_setup failed: %d.\n",
2325 GOTO(job_stats_fini, rc);
2331 lprocfs_job_stats_fini(obd);
2333 lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
2335 lprocfs_free_obd_stats(obd);
2337 lprocfs_obd_cleanup(obd);
2341 static struct llog_operations filter_mds_ost_repl_logops;
2343 static struct llog_operations filter_size_orig_logops = {};
2345 static int filter_olg_fini(struct obd_llog_group *olg)
2347 struct llog_ctxt *ctxt;
2351 ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
2353 llog_cleanup(NULL, ctxt);
2355 ctxt = llog_group_get_ctxt(olg, LLOG_SIZE_ORIG_CTXT);
2357 llog_cleanup(NULL, ctxt);
2359 ctxt = llog_group_get_ctxt(olg, LLOG_CONFIG_ORIG_CTXT);
2361 llog_cleanup(NULL, ctxt);
2367 filter_olg_init(struct obd_device *obd, struct obd_llog_group *olg,
2368 struct obd_device *tgt)
2370 struct llog_ctxt *ctxt = NULL;
2375 filter_mds_ost_repl_logops = llog_client_ops;
2376 filter_mds_ost_repl_logops.lop_cancel = llog_obd_repl_cancel;
2377 filter_mds_ost_repl_logops.lop_connect = llog_obd_repl_connect;
2378 filter_mds_ost_repl_logops.lop_sync = llog_obd_repl_sync;
2380 rc = llog_setup(NULL, obd, olg, LLOG_MDS_OST_REPL_CTXT, tgt,
2381 &filter_mds_ost_repl_logops);
2385 rc = llog_setup(NULL, obd, olg, LLOG_SIZE_ORIG_CTXT, tgt,
2386 &filter_size_orig_logops);
2392 ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
2393 llog_cleanup(NULL, ctxt);
2398 * Init the default olg, which is embeded in the obd_device, for filter.
2401 filter_default_olg_init(struct obd_device *obd, struct obd_llog_group *olg,
2402 struct obd_device *tgt)
2404 struct filter_obd *filter = &obd->u.filter;
2405 struct llog_ctxt *ctxt;
2409 filter->fo_lcm = llog_recov_thread_init(obd->obd_name);
2410 if (!filter->fo_lcm)
2413 rc = filter_olg_init(obd, olg, tgt);
2415 GOTO(cleanup_lcm, rc);
2417 rc = llog_setup(NULL, obd, olg, LLOG_CONFIG_ORIG_CTXT, tgt,
2420 GOTO(cleanup_olg, rc);
2422 ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
2424 CERROR("Can't get ctxt for %p:%x\n", olg,
2425 LLOG_MDS_OST_REPL_CTXT);
2426 GOTO(cleanup_olg, rc = -ENODEV);
2428 ctxt->loc_lcm = lcm_get(filter->fo_lcm);
2429 ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
2430 llog_ctxt_put(ctxt);
2434 filter_olg_fini(olg);
2436 llog_recov_thread_fini(filter->fo_lcm, 1);
2437 filter->fo_lcm = NULL;
2442 filter_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
2443 struct obd_device *tgt, int *index)
2445 struct filter_obd *filter = &obd->u.filter;
2446 struct llog_ctxt *ctxt;
2450 LASSERT(olg != NULL);
2451 if (olg == &obd->obd_olg)
2452 return filter_default_olg_init(obd, olg, tgt);
2454 LASSERT(filter->fo_lcm != NULL);
2455 rc = filter_olg_init(obd, olg, tgt);
2458 ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
2460 CERROR("Can't get ctxt for %p:%x\n", olg,
2461 LLOG_MDS_OST_REPL_CTXT);
2462 filter_olg_fini(olg);
2465 ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
2466 ctxt->loc_lcm = lcm_get(filter->fo_lcm);
2467 llog_ctxt_put(ctxt);
2471 static int filter_llog_finish(struct obd_device *obd, int count)
2473 struct filter_obd *filter = &obd->u.filter;
2474 struct llog_ctxt *ctxt;
2477 ctxt = llog_group_get_ctxt(&obd->obd_olg, LLOG_MDS_OST_REPL_CTXT);
2480 * Make sure that no cached llcds left in recov_thread.
2481 * We actually do sync in disconnect time, but disconnect
2482 * may not come being marked rq_no_resend = 1.
2484 llog_sync(ctxt, NULL, OBD_LLOG_FL_EXIT);
2487 * Balance class_import_get() in llog_receptor_accept().
2488 * This is safe to do, as llog is already synchronized
2489 * and its import may go.
2491 cfs_mutex_lock(&ctxt->loc_mutex);
2492 if (ctxt->loc_imp) {
2493 class_import_put(ctxt->loc_imp);
2494 ctxt->loc_imp = NULL;
2497 if (filter->fo_lcm) {
2498 llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
2499 filter->fo_lcm = NULL;
2502 cfs_mutex_unlock(&ctxt->loc_mutex);
2503 llog_ctxt_put(ctxt);
2506 RETURN(filter_olg_fini(&obd->obd_olg));
2510 * Find the group llog according to group index in the llog group list.
2512 static struct obd_llog_group *
2513 filter_find_olg_internal(struct filter_obd *filter, int group)
2515 struct obd_llog_group *olg;
2517 LASSERT_SPIN_LOCKED(&filter->fo_llog_list_lock);
2518 cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
2519 if (olg->olg_seq == group)
2526 * Find the group llog according to group index on the filter
2528 struct obd_llog_group *filter_find_olg(struct obd_device *obd, int group)
2530 struct obd_llog_group *olg = NULL;
2531 struct filter_obd *filter;
2533 filter = &obd->u.filter;
2535 if (group == FID_SEQ_LLOG)
2536 RETURN(&obd->obd_olg);
2538 cfs_spin_lock(&filter->fo_llog_list_lock);
2539 olg = filter_find_olg_internal(filter, group);
2540 cfs_spin_unlock(&filter->fo_llog_list_lock);
2545 * Find the llog_group of the filter according to the group. If it can not
2546 * find, create the llog_group, which only happens when mds is being synced
2549 struct obd_llog_group *filter_find_create_olg(struct obd_device *obd, int group)
2551 struct obd_llog_group *olg = NULL, *olg_new = NULL;
2552 struct filter_obd *filter;
2555 filter = &obd->u.filter;
2557 if (group == FID_SEQ_LLOG)
2558 RETURN(&obd->obd_olg);
2560 OBD_ALLOC_PTR(olg_new);
2561 if (olg_new == NULL)
2562 RETURN(ERR_PTR(-ENOMEM));
2564 cfs_spin_lock(&filter->fo_llog_list_lock);
2565 olg = filter_find_olg_internal(filter, group);
2567 if (olg->olg_initializing) {
2568 GOTO(out_unlock, olg = ERR_PTR(-EBUSY));
2570 GOTO(out_unlock, olg);
2573 /* set as the newly allocated one */
2578 llog_group_init(olg, group);
2579 cfs_list_add(&olg->olg_list, &filter->fo_llog_list);
2580 olg->olg_initializing = 1;
2581 cfs_spin_unlock(&filter->fo_llog_list_lock);
2583 rc = obd_llog_init(obd, olg, obd, NULL);
2585 cfs_spin_lock(&filter->fo_llog_list_lock);
2586 cfs_list_del(&olg->olg_list);
2587 cfs_spin_unlock(&filter->fo_llog_list_lock);
2589 GOTO(out, olg = ERR_PTR(-ENOMEM));
2591 cfs_spin_lock(&filter->fo_llog_list_lock);
2592 olg->olg_initializing = 0;
2593 cfs_spin_unlock(&filter->fo_llog_list_lock);
2594 CDEBUG(D_OTHER, "%s: new llog group %u (0x%p)\n",
2595 obd->obd_name, group, olg);
2600 cfs_spin_unlock(&filter->fo_llog_list_lock);
2602 OBD_FREE_PTR(olg_new);
2606 static int filter_llog_connect(struct obd_export *exp,
2607 struct llogd_conn_body *body)
2609 struct obd_device *obd = exp->exp_obd;
2610 struct llog_ctxt *ctxt;
2611 struct obd_llog_group *olg;
2615 CDEBUG(D_OTHER, "%s: LLog connect for: "LPX64"/"LPX64":%x\n",
2616 obd->obd_name, body->lgdc_logid.lgl_oid,
2617 body->lgdc_logid.lgl_oseq, body->lgdc_logid.lgl_ogen);
2619 olg = filter_find_olg(obd, body->lgdc_logid.lgl_oseq);
2621 CERROR(" %s: can not find olg of group %d\n",
2622 obd->obd_name, (int)body->lgdc_logid.lgl_oseq);
2625 llog_group_set_export(olg, exp);
2627 ctxt = llog_group_get_ctxt(olg, body->lgdc_ctxt_idx);
2628 LASSERTF(ctxt != NULL, "ctxt is not null, ctxt idx %d \n",
2629 body->lgdc_ctxt_idx);
2631 CDEBUG(D_HA, "%s: Recovery from log "LPX64"/"LPX64":%x\n",
2632 obd->obd_name, body->lgdc_logid.lgl_oid,
2633 body->lgdc_logid.lgl_oseq, body->lgdc_logid.lgl_ogen);
2635 cfs_spin_lock(&obd->u.filter.fo_flags_lock);
2636 obd->u.filter.fo_mds_ost_sync = 1;
2637 cfs_spin_unlock(&obd->u.filter.fo_flags_lock);
2638 rc = llog_connect(ctxt, &body->lgdc_logid,
2639 &body->lgdc_gen, NULL);
2640 llog_ctxt_put(ctxt);
2642 CERROR("failed to connect rc %d idx %d\n", rc,
2643 body->lgdc_ctxt_idx);
2648 static int filter_llog_preclean(struct obd_device *obd)
2650 struct obd_llog_group *olg, *tmp;
2651 struct filter_obd *filter;
2652 cfs_list_t remove_list;
2656 rc = obd_llog_finish(obd, 0);
2658 CERROR("failed to cleanup llogging subsystem\n");
2660 filter = &obd->u.filter;
2661 CFS_INIT_LIST_HEAD(&remove_list);
2663 cfs_spin_lock(&filter->fo_llog_list_lock);
2664 while (!cfs_list_empty(&filter->fo_llog_list)) {
2665 olg = cfs_list_entry(filter->fo_llog_list.next,
2666 struct obd_llog_group, olg_list);
2667 cfs_list_del(&olg->olg_list);
2668 cfs_list_add(&olg->olg_list, &remove_list);
2670 cfs_spin_unlock(&filter->fo_llog_list_lock);
2672 cfs_list_for_each_entry_safe(olg, tmp, &remove_list, olg_list) {
2673 cfs_list_del_init(&olg->olg_list);
2674 rc = filter_olg_fini(olg);
2676 CERROR("failed to cleanup llogging subsystem for %u\n",
2684 static int filter_precleanup(struct obd_device *obd,
2685 enum obd_cleanup_stage stage)
2691 case OBD_CLEANUP_EARLY:
2693 case OBD_CLEANUP_EXPORTS:
2694 /* Stop recovery before namespace cleanup. */
2695 target_recovery_fini(obd);
2697 obd_exports_barrier(obd);
2698 obd_zombie_barrier();
2700 rc = filter_llog_preclean(obd);
2701 lprocfs_job_stats_fini(obd);
2702 lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
2703 lprocfs_free_per_client_stats(obd);
2704 lprocfs_obd_cleanup(obd);
2705 lprocfs_free_obd_stats(obd);
2711 static int filter_cleanup(struct obd_device *obd)
2713 struct filter_obd *filter = &obd->u.filter;
2717 LCONSOLE_WARN("%s: shutting down for failover; client state "
2718 "will be preserved.\n", obd->obd_name);
2720 ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
2721 obd->obd_namespace = NULL;
2723 sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
2725 if (obd->u.obt.obt_sb == NULL)
2730 ll_vfs_dq_off(obd->u.obt.obt_sb, 0);
2731 shrink_dcache_sb(obd->u.obt.obt_sb);
2733 server_put_mount(obd->obd_name, obd->u.obt.obt_vfsmnt);
2734 obd->u.obt.obt_sb = NULL;
2736 fsfilt_put_ops(obd->obd_fsops);
2738 filter_iobuf_pool_done(filter);
2740 LCONSOLE_INFO("OST %s has stopped.\n", obd->obd_name);
2745 static int filter_connect_internal(struct obd_export *exp,
2746 struct obd_connect_data *data,
2749 struct filter_export_data *fed = &exp->exp_filter_data;
2754 CDEBUG(D_RPCTRACE, "%s: cli %s/%p ocd_connect_flags: "LPX64
2755 " ocd_version: %x ocd_grant: %d ocd_index: %u\n",
2756 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
2757 data->ocd_connect_flags, data->ocd_version,
2758 data->ocd_grant, data->ocd_index);
2760 if (fed->fed_group != 0 && fed->fed_group != data->ocd_group) {
2761 CWARN("!!! This export (nid %s) used object group %d "
2762 "earlier; now it's trying to use group %d! This could "
2763 "be a bug in the MDS. Please report to "
2764 "http://bugs.whamcloud.com/\n",
2765 obd_export_nid2str(exp), fed->fed_group,data->ocd_group);
2768 fed->fed_group = data->ocd_group;
2770 data->ocd_connect_flags &= OST_CONNECT_SUPPORTED;
2771 exp->exp_connect_flags = data->ocd_connect_flags;
2772 data->ocd_version = LUSTRE_VERSION_CODE;
2774 /* Kindly make sure the SKIP_ORPHAN flag is from MDS. */
2775 if (data->ocd_connect_flags & OBD_CONNECT_MDS)
2776 CDEBUG(D_HA, "%s: Received MDS connection for group %u\n",
2777 exp->exp_obd->obd_name, data->ocd_group);
2778 else if (data->ocd_connect_flags & OBD_CONNECT_SKIP_ORPHAN)
2781 if (exp->exp_connect_flags & OBD_CONNECT_GRANT) {
2782 struct filter_obd *filter = &exp->exp_obd->u.filter;
2783 obd_size left, want;
2785 cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
2786 left = filter_grant_space_left(exp);
2787 want = data->ocd_grant;
2788 filter_grant(exp, fed->fed_grant, want, left, (reconnect == 0));
2789 data->ocd_grant = fed->fed_grant;
2790 cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
2792 CDEBUG(D_CACHE, "%s: cli %s/%p ocd_grant: %d want: "
2793 LPU64" left: "LPU64"\n", exp->exp_obd->obd_name,
2794 exp->exp_client_uuid.uuid, exp,
2795 data->ocd_grant, want, left);
2797 filter->fo_tot_granted_clients ++;
2800 if (data->ocd_connect_flags & OBD_CONNECT_INDEX) {
2801 struct lr_server_data *lsd = class_server_data(exp->exp_obd);
2802 int index = le32_to_cpu(lsd->lsd_ost_index);
2804 if (!(lsd->lsd_feature_compat &
2805 cpu_to_le32(OBD_COMPAT_OST))) {
2806 /* this will only happen on the first connect */
2807 lsd->lsd_ost_index = cpu_to_le32(data->ocd_index);
2808 lsd->lsd_feature_compat |= cpu_to_le32(OBD_COMPAT_OST);
2809 /* sync is not needed here as filter_client_add will
2810 * set exp_need_sync flag */
2811 filter_update_server_data(exp->exp_obd);
2812 } else if (index != data->ocd_index) {
2813 LCONSOLE_ERROR_MSG(0x136, "Connection from %s to index"
2814 " %u doesn't match actual OST index"
2815 " %u in last_rcvd file, bad "
2817 obd_export_nid2str(exp), index,
2821 /* FIXME: Do the same with the MDS UUID and lsd_peeruuid.
2822 * FIXME: We don't strictly need the COMPAT flag for that,
2823 * FIXME: as lsd_peeruuid[0] will tell us if that is set.
2824 * FIXME: We needed it for the index, as index 0 is valid. */
2827 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_SIZE)) {
2828 data->ocd_brw_size = 65536;
2829 } else if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) {
2830 data->ocd_brw_size = min(data->ocd_brw_size,
2831 (__u32)(PTLRPC_MAX_BRW_PAGES << CFS_PAGE_SHIFT));
2832 if (data->ocd_brw_size == 0) {
2833 CERROR("%s: cli %s/%p ocd_connect_flags: "LPX64
2834 " ocd_version: %x ocd_grant: %d ocd_index: %u "
2835 "ocd_brw_size is unexpectedly zero, "
2836 "network data corruption?"
2837 "Refusing connection of this client\n",
2838 exp->exp_obd->obd_name,
2839 exp->exp_client_uuid.uuid,
2840 exp, data->ocd_connect_flags, data->ocd_version,
2841 data->ocd_grant, data->ocd_index);
2846 if (data->ocd_connect_flags & OBD_CONNECT_CKSUM) {
2847 __u32 cksum_types = data->ocd_cksum_types;
2849 /* The client set in ocd_cksum_types the checksum types it
2850 * supports. We have to mask off the algorithms that we don't
2852 data->ocd_cksum_types &= cksum_types_supported_server();
2854 /* 1.6.4 clients are not supported any more */
2856 CDEBUG(D_RPCTRACE, "%s: cli %s supports cksum type %x, return "
2857 "%x\n", exp->exp_obd->obd_name,
2858 obd_export_nid2str(exp), cksum_types,
2859 data->ocd_cksum_types);
2861 /* This client does not support OBD_CONNECT_CKSUM
2862 * fall back to CRC32 */
2863 CDEBUG(D_RPCTRACE, "%s: cli %s does not support "
2864 "OBD_CONNECT_CKSUM, CRC32 will be used\n",
2865 exp->exp_obd->obd_name,
2866 obd_export_nid2str(exp));
2869 if (data->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
2870 data->ocd_maxbytes = exp->exp_obd->u.obt.obt_sb->s_maxbytes;
2875 static int filter_reconnect(const struct lu_env *env,
2876 struct obd_export *exp, struct obd_device *obd,
2877 struct obd_uuid *cluuid,
2878 struct obd_connect_data *data,
2884 if (exp == NULL || obd == NULL || cluuid == NULL)
2887 rc = filter_connect_internal(exp, data, 1);
2889 filter_export_stats_init(obd, exp, localdata);
2894 static int filter_connect(const struct lu_env *env,
2895 struct obd_export **exp, struct obd_device *obd,
2896 struct obd_uuid *cluuid,
2897 struct obd_connect_data *data, void *localdata)
2899 struct lvfs_run_ctxt saved;
2900 struct lustre_handle conn = { 0 };
2901 struct obd_export *lexp;
2905 if (exp == NULL || obd == NULL || cluuid == NULL)
2908 rc = class_connect(&conn, obd, cluuid);
2911 lexp = class_conn2export(&conn);
2912 LASSERT(lexp != NULL);
2914 rc = filter_connect_internal(lexp, data, 0);
2918 filter_export_stats_init(obd, lexp, localdata);
2919 if (obd->obd_replayable) {
2920 struct lsd_client_data *lcd = lexp->exp_target_data.ted_lcd;
2922 memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
2923 rc = filter_client_add(obd, lexp, -1);
2928 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2929 rc = filter_read_groups(obd, data->ocd_group, 1);
2930 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2932 CERROR("can't read group %u\n", data->ocd_group);
2940 class_disconnect(lexp);
2949 /* Do extra sanity checks for grant accounting. We do this at connect,
2950 * disconnect, and statfs RPC time, so it shouldn't be too bad. We can
2951 * always get rid of it or turn it off when we know accounting is good. */
2952 static void filter_grant_sanity_check(struct obd_device *obd, const char *func)
2954 struct filter_export_data *fed;
2955 struct obd_export *exp;
2956 obd_size maxsize = obd->obd_osfs.os_blocks * obd->obd_osfs.os_bsize;
2957 obd_size tot_dirty = 0, tot_pending = 0, tot_granted = 0;
2958 obd_size fo_tot_dirty, fo_tot_pending, fo_tot_granted;
2960 if (cfs_list_empty(&obd->obd_exports))
2963 /* We don't want to do this for large machines that do lots of
2964 mounts or unmounts. It burns... */
2965 if (obd->obd_num_exports > 100)
2968 cfs_spin_lock(&obd->obd_osfs_lock);
2969 cfs_spin_lock(&obd->obd_dev_lock);
2970 cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2972 fed = &exp->exp_filter_data;
2973 if (fed->fed_grant < 0 || fed->fed_pending < 0 ||
2976 if (maxsize > 0) { /* we may not have done a statfs yet */
2977 LASSERTF(fed->fed_grant + fed->fed_pending <= maxsize,
2978 "%s: cli %s/%p %ld+%ld > "LPU64"\n", func,
2979 exp->exp_client_uuid.uuid, exp,
2980 fed->fed_grant, fed->fed_pending, maxsize);
2981 LASSERTF(fed->fed_dirty <= maxsize,
2982 "%s: cli %s/%p %ld > "LPU64"\n", func,
2983 exp->exp_client_uuid.uuid, exp,
2984 fed->fed_dirty, maxsize);
2987 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
2988 obd->obd_name, exp->exp_client_uuid.uuid, exp,
2989 fed->fed_dirty, fed->fed_pending,fed->fed_grant);
2991 CDEBUG(D_CACHE, "%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
2992 obd->obd_name, exp->exp_client_uuid.uuid, exp,
2993 fed->fed_dirty, fed->fed_pending,fed->fed_grant);
2994 tot_granted += fed->fed_grant + fed->fed_pending;
2995 tot_pending += fed->fed_pending;
2996 tot_dirty += fed->fed_dirty;
2998 fo_tot_granted = obd->u.filter.fo_tot_granted;
2999 fo_tot_pending = obd->u.filter.fo_tot_pending;
3000 fo_tot_dirty = obd->u.filter.fo_tot_dirty;
3001 cfs_spin_unlock(&obd->obd_dev_lock);
3002 cfs_spin_unlock(&obd->obd_osfs_lock);
3004 /* Do these assertions outside the spinlocks so we don't kill system */
3005 if (tot_granted != fo_tot_granted)
3006 CERROR("%s: tot_granted "LPU64" != fo_tot_granted "LPU64"\n",
3007 func, tot_granted, fo_tot_granted);
3008 if (tot_pending != fo_tot_pending)
3009 CERROR("%s: tot_pending "LPU64" != fo_tot_pending "LPU64"\n",
3010 func, tot_pending, fo_tot_pending);
3011 if (tot_dirty != fo_tot_dirty)
3012 CERROR("%s: tot_dirty "LPU64" != fo_tot_dirty "LPU64"\n",
3013 func, tot_dirty, fo_tot_dirty);
3014 if (tot_pending > tot_granted)
3015 CERROR("%s: tot_pending "LPU64" > tot_granted "LPU64"\n",
3016 func, tot_pending, tot_granted);
3017 if (tot_granted > maxsize)
3018 CERROR("%s: tot_granted "LPU64" > maxsize "LPU64"\n",
3019 func, tot_granted, maxsize);
3020 if (tot_dirty > maxsize)
3021 CERROR("%s: tot_dirty "LPU64" > maxsize "LPU64"\n",
3022 func, tot_dirty, maxsize);
3025 /* Remove this client from the grant accounting totals. We also remove
3026 * the export from the obd device under the osfs and dev locks to ensure
3027 * that the filter_grant_sanity_check() calculations are always valid.
3028 * The client should do something similar when it invalidates its import. */
3029 static void filter_grant_discard(struct obd_export *exp)
3031 struct obd_device *obd = exp->exp_obd;
3032 struct filter_obd *filter = &obd->u.filter;
3033 struct filter_export_data *fed = &exp->exp_filter_data;
3035 cfs_spin_lock(&obd->obd_osfs_lock);
3036 LASSERTF(filter->fo_tot_granted >= fed->fed_grant,
3037 "%s: tot_granted "LPU64" cli %s/%p fed_grant %ld\n",
3038 obd->obd_name, filter->fo_tot_granted,
3039 exp->exp_client_uuid.uuid, exp, fed->fed_grant);
3040 filter->fo_tot_granted -= fed->fed_grant;
3041 LASSERTF(filter->fo_tot_pending >= fed->fed_pending,
3042 "%s: tot_pending "LPU64" cli %s/%p fed_pending %ld\n",
3043 obd->obd_name, filter->fo_tot_pending,
3044 exp->exp_client_uuid.uuid, exp, fed->fed_pending);
3045 /* fo_tot_pending is handled in filter_grant_commit as bulk finishes */
3046 LASSERTF(filter->fo_tot_dirty >= fed->fed_dirty,
3047 "%s: tot_dirty "LPU64" cli %s/%p fed_dirty %ld\n",
3048 obd->obd_name, filter->fo_tot_dirty,
3049 exp->exp_client_uuid.uuid, exp, fed->fed_dirty);
3050 filter->fo_tot_dirty -= fed->fed_dirty;
3054 cfs_spin_unlock(&obd->obd_osfs_lock);
3057 static int filter_destroy_export(struct obd_export *exp)
3059 struct filter_export_data *fed = &exp->exp_filter_data;
3062 if (fed->fed_pending)
3063 CERROR("%s: cli %s/%p has %lu pending on destroyed export\n",
3064 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid,
3065 exp, fed->fed_pending);
3067 target_destroy_export(exp);
3069 if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
3070 &exp->exp_client_uuid)))
3073 ldlm_destroy_export(exp);
3074 lut_client_free(exp);
3076 if (!exp->exp_obd->obd_replayable)
3077 fsfilt_sync(exp->exp_obd, exp->exp_obd->u.obt.obt_sb);
3079 filter_grant_discard(exp);
3080 filter_fmd_cleanup(exp);
3082 if (exp->exp_connect_flags & OBD_CONNECT_GRANT_SHRINK) {
3083 struct filter_obd *filter = &exp->exp_obd->u.filter;
3084 if (filter->fo_tot_granted_clients > 0)
3085 filter->fo_tot_granted_clients --;
3088 if (!(exp->exp_flags & OBD_OPT_FORCE))
3089 filter_grant_sanity_check(exp->exp_obd, __func__);
3094 static void filter_sync_llogs(struct obd_device *obd, struct obd_export *dexp)
3096 struct obd_llog_group *olg_min, *olg;
3097 struct filter_obd *filter;
3098 int worked = -1, group;
3099 struct llog_ctxt *ctxt;
3102 filter = &obd->u.filter;
3104 /* we can't sync log holding spinlock. also, we do not want to get
3105 * into livelock. so we do following: loop over MDS's exports in
3106 * group order and skip already synced llogs -bzzz */
3108 /* look for group with min. number, but > worked */
3111 cfs_spin_lock(&filter->fo_llog_list_lock);
3112 cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
3113 if (olg->olg_seq <= worked) {
3114 /* this group is already synced */
3117 if (group < olg->olg_seq) {
3118 /* we have group with smaller number to sync */
3121 /* store current minimal group */
3123 group = olg->olg_seq;
3125 cfs_spin_unlock(&filter->fo_llog_list_lock);
3127 if (olg_min == NULL)
3130 worked = olg_min->olg_seq;
3131 if (olg_min->olg_exp &&
3132 (dexp == olg_min->olg_exp || dexp == NULL)) {
3134 ctxt = llog_group_get_ctxt(olg_min,
3135 LLOG_MDS_OST_REPL_CTXT);
3137 err = llog_sync(ctxt, olg_min->olg_exp, 0);
3138 llog_ctxt_put(ctxt);
3140 CERROR("error flushing logs to MDS: "
3145 } while (olg_min != NULL);
3148 /* Also incredibly similar to mds_disconnect */
3149 static int filter_disconnect(struct obd_export *exp)
3151 struct obd_device *obd = exp->exp_obd;
3156 class_export_get(exp);
3158 if (!(exp->exp_flags & OBD_OPT_FORCE))
3159 filter_grant_sanity_check(obd, __func__);
3160 filter_grant_discard(exp);
3162 /* Flush any remaining cancel messages out to the target */
3163 filter_sync_llogs(obd, exp);
3165 rc = server_disconnect_export(exp);
3167 /* Do not erase record for recoverable client. */
3168 if (obd->obd_replayable && (!obd->obd_fail || exp->exp_failed))
3169 filter_client_del(exp);
3171 fsfilt_sync(obd, obd->u.obt.obt_sb);
3173 class_export_put(exp);
3177 /* reverse import is changed, sync all cancels */
3178 static void filter_revimp_update(struct obd_export *exp)
3183 class_export_get(exp);
3185 /* flush any remaining cancel messages out to the target */
3186 filter_sync_llogs(exp->exp_obd, exp);
3187 class_export_put(exp);
3191 static int filter_ping(const struct lu_env *env, struct obd_export *exp)
3193 filter_fmd_expire(exp);
3197 struct dentry *__filter_oa2dentry(struct obd_device *obd, struct ost_id *ostid,
3198 const char *what, int quiet)
3200 struct dentry *dchild = NULL;
3202 dchild = filter_fid2dentry(obd, NULL, ostid->oi_seq, ostid->oi_id);
3204 if (IS_ERR(dchild)) {
3205 CERROR("%s error looking up object: "POSTID"\n",
3206 what, ostid->oi_id, ostid->oi_seq);
3210 if (dchild->d_inode == NULL) {
3212 CERROR("%s: %s on non-existent object: "POSTID" \n",
3213 obd->obd_name, what, ostid->oi_id,ostid->oi_seq);
3215 RETURN(ERR_PTR(-ENOENT));
3218 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
3219 /* Try to correct for a bug in 2.1.0 (LU-221) that caused negative
3220 * timestamps to appear to be in the far future, due old timestamp
3221 * being stored on disk as an unsigned value. This fixes up any
3222 * bad values stored on disk before returning them to the client,
3223 * and ensures any timestamp updates are correct. LU-1042 */
3224 if (unlikely(LTIME_S(dchild->d_inode->i_atime) == LU221_BAD_TIME))
3225 LTIME_S(dchild->d_inode->i_atime) = 0;
3226 if (unlikely(LTIME_S(dchild->d_inode->i_mtime) == LU221_BAD_TIME))
3227 LTIME_S(dchild->d_inode->i_mtime) = 0;
3228 if (unlikely(LTIME_S(dchild->d_inode->i_ctime) == LU221_BAD_TIME))
3229 LTIME_S(dchild->d_inode->i_ctime) = 0;
3231 #warning "remove old LU-221/LU-1042 workaround code"
3237 static int filter_getattr(const struct lu_env *env, struct obd_export *exp,
3238 struct obd_info *oinfo)
3240 struct dentry *dentry = NULL;
3241 struct obd_device *obd;
3246 rc = filter_auth_capa(exp, NULL, oinfo->oi_oa->o_seq,
3247 oinfo_capa(oinfo), CAPA_OPC_META_READ);
3251 obd = class_exp2obd(exp);
3253 CDEBUG(D_IOCTL, "invalid client export %p\n", exp);
3257 dentry = filter_oa2dentry(obd, &oinfo->oi_oa->o_oi);
3259 RETURN(PTR_ERR(dentry));
3261 /* Limit the valid bits in the return data to what we actually use */
3262 oinfo->oi_oa->o_valid = OBD_MD_FLID;
3263 obdo_from_inode(oinfo->oi_oa, dentry->d_inode, FILTER_VALID_FLAGS);
3265 /* Store inode version in reply */
3266 curr_version = fsfilt_get_version(exp->exp_obd, dentry->d_inode);
3267 if ((__s64)curr_version != -EOPNOTSUPP) {
3268 oinfo->oi_oa->o_valid |= OBD_MD_FLDATAVERSION;
3269 oinfo->oi_oa->o_data_version = curr_version;
3276 /* this should be enabled/disabled in condition to enabled/disabled large
3277 * inodes (fast EAs) in backing store FS. */
3278 int filter_update_fidea(struct obd_export *exp, struct inode *inode,
3279 void *handle, struct obdo *oa)
3281 struct obd_device *obd = exp->exp_obd;
3285 if (oa->o_valid & OBD_MD_FLFID) {
3286 struct filter_fid ff;
3288 if (!(oa->o_valid & OBD_MD_FLGROUP))
3290 /* packing fid and converting it to LE for storing into EA.
3291 * Here ->o_stripe_idx should be filled by LOV and rest of
3292 * fields - by client. */
3293 ff.ff_parent.f_seq = cpu_to_le64(oa->o_parent_seq);
3294 ff.ff_parent.f_oid = cpu_to_le32(oa->o_parent_oid);
3295 /* XXX: we are ignoring o_parent_ver here, since this should
3296 * be the same for all objects in this fileset. */
3297 ff.ff_parent.f_ver = cpu_to_le32(oa->o_stripe_idx);
3298 ff.ff_objid = cpu_to_le64(oa->o_id);
3299 ff.ff_seq = cpu_to_le64(oa->o_seq);
3301 CDEBUG(D_INODE, "storing filter fid EA (parent "DFID" "
3302 LPU64"/"LPU64")\n", PFID(&ff.ff_parent), oa->o_id,
3305 rc = fsfilt_set_md(obd, inode, handle, &ff, sizeof(ff), "fid");
3307 CERROR("store fid in object failed! rc: %d\n", rc);
3309 CDEBUG(D_HA, "OSS object without fid info!\n");
3315 /* this is called from filter_truncate() until we have filter_punch() */
3316 int filter_setattr_internal(struct obd_export *exp, struct dentry *dentry,
3317 struct obdo *oa, struct obd_trans_info *oti)
3319 struct llog_cookie *fcc = NULL;
3320 struct filter_obd *filter;
3321 int rc, err, sync = 0;
3322 loff_t old_size = 0;
3323 unsigned int ia_valid;
3324 struct inode *inode;
3325 struct page *page = NULL;
3330 LASSERT(dentry != NULL);
3331 LASSERT(!IS_ERR(dentry));
3333 inode = dentry->d_inode;
3334 LASSERT(inode != NULL);
3336 filter = &exp->exp_obd->u.filter;
3337 iattr_from_obdo(&iattr, oa, oa->o_valid);
3338 ia_valid = iattr.ia_valid;
3340 if (oa->o_valid & OBD_MD_FLCOOKIE) {
3341 OBD_ALLOC(fcc, sizeof(*fcc));
3343 *fcc = oa->o_lcookie;
3345 if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
3346 unsigned long now = jiffies;
3347 /* Filter truncates and writes are serialized by
3348 * i_alloc_sem, see the comment in
3349 * filter_preprw_write.*/
3350 if (ia_valid & ATTR_SIZE)
3351 down_write(&inode->i_alloc_sem);
3352 mutex_lock(&inode->i_mutex);
3353 fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem and i_mutex");
3354 old_size = i_size_read(inode);
3357 /* VBR: version recovery check */
3358 rc = filter_version_get_check(exp, oti, inode);
3360 GOTO(out_unlock, rc);
3362 /* Let's pin the last page so that ldiskfs_truncate
3363 * should not start GFP_FS allocation. */
3364 if (ia_valid & ATTR_SIZE) {
3365 page = grab_cache_page(inode->i_mapping,
3366 iattr.ia_size >> PAGE_CACHE_SHIFT);
3368 GOTO(out_unlock, rc = -ENOMEM);
3373 /* If the inode still has SUID+SGID bits set (see filter_precreate())
3374 * then we will accept the UID+GID sent by the client during write for
3375 * initializing the ownership of this inode. We only allow this to
3376 * happen once so clear these bits in setattr. In 2.6 kernels it is
3377 * possible to get ATTR_UID and ATTR_GID separately, so we only clear
3378 * the flags that are actually being set. */
3379 if (ia_valid & (ATTR_UID | ATTR_GID)) {
3380 CDEBUG(D_INODE, "update UID/GID to %lu/%lu\n",
3381 (unsigned long)oa->o_uid, (unsigned long)oa->o_gid);
3383 if ((inode->i_mode & S_ISUID) && (ia_valid & ATTR_UID)) {
3384 if (!(ia_valid & ATTR_MODE)) {
3385 iattr.ia_mode = inode->i_mode;
3386 iattr.ia_valid |= ATTR_MODE;
3388 iattr.ia_mode &= ~S_ISUID;
3390 if ((inode->i_mode & S_ISGID) && (ia_valid & ATTR_GID)) {
3391 if (!(iattr.ia_valid & ATTR_MODE)) {
3392 iattr.ia_mode = inode->i_mode;
3393 iattr.ia_valid |= ATTR_MODE;
3395 iattr.ia_mode &= ~S_ISGID;
3398 handle = fsfilt_start_log(exp->exp_obd, inode,
3399 FSFILT_OP_SETATTR, oti, 1);
3401 GOTO(out_unlock, rc = PTR_ERR(handle));
3403 /* update inode EA only once when inode is suid bit marked. As
3404 * on 2.6.x UID and GID may be set separately, we check here
3405 * only one of them to avoid double setting. */
3406 if (inode->i_mode & S_ISUID)
3407 filter_update_fidea(exp, inode, handle, oa);
3409 handle = fsfilt_start(exp->exp_obd, inode,
3410 FSFILT_OP_SETATTR, oti);
3412 GOTO(out_unlock, rc = PTR_ERR(handle));
3415 /* Locking order: i_mutex -> journal_lock -> dqptr_sem. LU-952 */
3416 if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID))
3417 ll_vfs_dq_init(inode);
3419 if (oa->o_valid & OBD_MD_FLFLAGS) {
3420 rc = fsfilt_iocontrol(exp->exp_obd, dentry,
3421 FSFILT_IOC_SETFLAGS, (long)&oa->o_flags);
3423 rc = fsfilt_setattr(exp->exp_obd, dentry, handle, &iattr, 1);
3425 /* set cancel cookie callback function */
3426 sync = fsfilt_add_journal_cb(exp->exp_obd, 0, handle,
3427 filter_cancel_cookies_cb,
3431 if (OBD_FAIL_CHECK(OBD_FAIL_OST_SETATTR_CREDITS))
3432 fsfilt_extend(exp->exp_obd, inode, 0, handle);
3434 /* The truncate might have used up our transaction credits. Make sure
3435 * we have two left for the last_rcvd and VBR inode version updates. */
3436 err = fsfilt_extend(exp->exp_obd, inode, 2, handle);
3438 /* Update inode version only if data has changed => size has changed */
3439 rc = filter_finish_transno(exp, ia_valid & ATTR_SIZE ? inode : NULL,
3443 filter_cancel_cookies_cb(exp->exp_obd, 0, fcc, rc);
3447 err = fsfilt_commit(exp->exp_obd, inode, handle, 0);
3449 CERROR("error on commit, err = %d\n", err);
3456 /* For a partial-page truncate flush the page to disk immediately
3457 * to avoid data corruption during direct disk write. b=17397 */
3458 if (!sync && (iattr.ia_valid & ATTR_SIZE) &&
3459 old_size != iattr.ia_size && (iattr.ia_size & ~CFS_PAGE_MASK)) {
3460 err = filemap_fdatawrite_range(inode->i_mapping, iattr.ia_size,
3470 page_cache_release(page);
3472 if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID))
3473 mutex_unlock(&inode->i_mutex);
3474 if (ia_valid & ATTR_SIZE)
3475 up_write(&inode->i_alloc_sem);
3477 OBD_FREE(fcc, sizeof(*fcc));
3481 /* this is called from filter_truncate() until we have filter_punch() */
3482 int filter_setattr(const struct lu_env *env, struct obd_export *exp,
3483 struct obd_info *oinfo, struct obd_trans_info *oti)
3485 struct obdo *oa = oinfo->oi_oa;
3486 struct lustre_capa *capa = oinfo_capa(oinfo);
3487 struct ldlm_res_id res_id;
3488 struct filter_mod_data *fmd;
3489 struct lvfs_run_ctxt saved;
3490 struct filter_obd *filter;
3491 struct ldlm_resource *res;
3492 struct dentry *dentry;
3493 __u64 opc = CAPA_OPC_META_WRITE;
3497 if (oinfo->oi_flags & OBD_FL_PUNCH)
3498 opc |= CAPA_OPC_OSS_TRUNC;
3500 rc = filter_auth_capa(exp, NULL, oa->o_seq, capa, opc);
3504 if (oa->o_valid & (OBD_MD_FLUID | OBD_MD_FLGID)) {
3505 rc = filter_capa_fixoa(exp, oa, oa->o_seq, capa);
3510 osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
3511 /* This would be very bad - accidentally truncating a file when
3512 * changing the time or similar - bug 12203. */
3513 if (oa->o_valid & OBD_MD_FLSIZE &&
3514 oinfo->oi_policy.l_extent.end != OBD_OBJECT_EOF) {
3515 static char mdsinum[48];
3517 if (oa->o_valid & OBD_MD_FLFID)
3518 snprintf(mdsinum, sizeof(mdsinum) - 1, " of inode "DFID,
3519 oa->o_parent_seq, oa->o_parent_oid,
3524 CERROR("%s: setattr from %s trying to truncate objid "POSTID
3525 "%s\n", exp->exp_obd->obd_name, obd_export_nid2str(exp),
3526 oa->o_id, oa->o_seq, mdsinum);
3530 dentry = __filter_oa2dentry(exp->exp_obd, &oinfo->oi_oa->o_oi, __func__, 1);
3532 RETURN(PTR_ERR(dentry));
3534 filter = &exp->exp_obd->u.filter;
3535 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
3538 * We need to be atomic against a concurrent write
3539 * (which takes the semaphore for reading). fmd_mactime_xid
3540 * checks will have no effect if a write request with lower
3541 * xid starts just before a setattr and finishes later than
3542 * the setattr (see bug 21489, comment 27).
3545 (OBD_MD_FLMTIME | OBD_MD_FLATIME | OBD_MD_FLCTIME)) {
3546 unsigned long now = jiffies;
3547 down_write(&dentry->d_inode->i_alloc_sem);
3548 fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem");
3549 fmd = filter_fmd_get(exp, oa->o_id, oa->o_seq);
3550 if (fmd && fmd->fmd_mactime_xid < oti->oti_xid)
3551 fmd->fmd_mactime_xid = oti->oti_xid;
3552 filter_fmd_put(exp, fmd);
3553 up_write(&dentry->d_inode->i_alloc_sem);
3556 /* setting objects attributes (including owner/group) */
3557 rc = filter_setattr_internal(exp, dentry, oa, oti);
3559 GOTO(out_unlock, rc);
3561 res = ldlm_resource_get(exp->exp_obd->obd_namespace, NULL,
3562 &res_id, LDLM_EXTENT, 0);
3565 LDLM_RESOURCE_ADDREF(res);
3566 rc = ldlm_res_lvbo_update(res, NULL, 0);
3567 LDLM_RESOURCE_DELREF(res);
3568 ldlm_resource_putref(res);
3571 oa->o_valid = OBD_MD_FLID;
3573 /* Quota release need uid/gid info */
3574 obdo_from_inode(oa, dentry->d_inode,
3575 FILTER_VALID_FLAGS | OBD_MD_FLUID | OBD_MD_FLGID);
3577 filter_counter_incr(exp, LPROC_FILTER_STATS_SETATTR,
3578 oti ? oti->oti_jobid : NULL, 1);
3582 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
3586 /* XXX identical to osc_unpackmd */
3587 static int filter_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
3588 struct lov_mds_md *lmm, int lmm_bytes)
3594 if (lmm_bytes < sizeof (*lmm)) {
3595 CERROR("lov_mds_md too small: %d, need %d\n",
3596 lmm_bytes, (int)sizeof(*lmm));
3599 /* XXX LOV_MAGIC etc check? */
3601 if (lmm->lmm_object_id == cpu_to_le64(0)) {
3602 CERROR("lov_mds_md: zero lmm_object_id\n");
3607 lsm_size = lov_stripe_md_size(1);
3611 if (*lsmp != NULL && lmm == NULL) {
3612 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
3613 OBD_FREE(*lsmp, lsm_size);
3618 if (*lsmp == NULL) {
3619 OBD_ALLOC(*lsmp, lsm_size);
3623 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
3624 if ((*lsmp)->lsm_oinfo[0] == NULL) {
3625 OBD_FREE(*lsmp, lsm_size);
3628 loi_init((*lsmp)->lsm_oinfo[0]);
3632 /* XXX zero *lsmp? */
3633 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
3634 LASSERT((*lsmp)->lsm_object_id);
3637 (*lsmp)->lsm_maxbytes = exp->exp_obd->u.obt.obt_sb->s_maxbytes;
3642 /* caller must hold fo_create_locks[oa->o_seq] */
3643 static int filter_destroy_precreated(struct obd_export *exp, struct obdo *oa,
3644 struct filter_obd *filter)
3646 struct obdo doa = { 0 }; /* XXX obdo on stack */
3652 LASSERT_MUTEX_LOCKED(&filter->fo_create_locks[oa->o_seq]);
3654 memset(&doa, 0, sizeof(doa));
3656 doa.o_valid |= OBD_MD_FLGROUP;
3657 doa.o_seq = oa->o_seq;
3658 doa.o_mode = S_IFREG;
3660 if (!cfs_test_bit(doa.o_seq, &filter->fo_destroys_in_progress)) {
3661 CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
3662 exp->exp_obd->obd_name, doa.o_seq);
3666 last = filter_last_id(filter, doa.o_seq);
3668 skip_orphan = !!(exp->exp_connect_flags & OBD_CONNECT_SKIP_ORPHAN);
3670 CDEBUG(D_HA, "%s: deleting orphan objects from "LPU64" to "LPU64"%s\n",
3671 exp->exp_obd->obd_name, oa->o_id + 1, last,
3672 skip_orphan ? ", orphan objids won't be reused any more." : ".");
3674 for (id = last; id > oa->o_id; id--) {
3676 rc = filter_destroy(NULL, exp, &doa, NULL, NULL, NULL, NULL);
3677 if (rc && rc != -ENOENT) /* this is pretty fatal... */
3678 CEMERG("error destroying precreate objid "LPU64": %d\n",
3681 /* update last_id on disk periodically so that if we restart
3682 * we don't need to re-scan all of the just-deleted objects. */
3683 if ((id & 511) == 0 && !skip_orphan) {
3684 filter_set_last_id(filter, id - 1, doa.o_seq);
3685 filter_update_last_objid(exp->exp_obd, doa.o_seq, 0);
3689 CDEBUG(D_HA, "%s: after destroy: set last_objids["LPU64"] = "LPU64"\n",
3690 exp->exp_obd->obd_name, doa.o_seq, oa->o_id);
3693 filter_set_last_id(filter, id, doa.o_seq);
3694 rc = filter_update_last_objid(exp->exp_obd, doa.o_seq, 1);
3697 * We have destroyed orphan objects, but don't want to reuse
3698 * them. Therefore we don't reset last_id to the last created
3699 * objects. Instead, we report back to the MDS the object id
3700 * of the last orphan, so that the MDS can restart allocating
3701 * objects from this id + 1 and thus skip the whole orphan
3707 cfs_clear_bit(doa.o_seq, &filter->fo_destroys_in_progress);
3712 static int filter_precreate(struct obd_device *obd, struct obdo *oa,
3713 obd_seq group, int *num);
3714 /* returns a negative error or a nonnegative number of files to create */
3715 static int filter_handle_precreate(struct obd_export *exp, struct obdo *oa,
3716 obd_seq group, struct obd_trans_info *oti)
3718 struct obd_device *obd = exp->exp_obd;
3719 struct filter_obd *filter = &obd->u.filter;
3723 /* delete orphans request */
3724 if ((oa->o_valid & OBD_MD_FLFLAGS) && (oa->o_flags & OBD_FL_DELORPHAN)){
3725 obd_id last = filter_last_id(filter, group);
3727 if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
3728 CERROR("%s: dropping old orphan cleanup request\n",
3732 /* This causes inflight precreates to abort and drop lock */
3733 cfs_set_bit(group, &filter->fo_destroys_in_progress);
3734 cfs_mutex_lock(&filter->fo_create_locks[group]);
3735 if (!cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
3736 CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
3737 exp->exp_obd->obd_name, group);
3738 cfs_mutex_unlock(&filter->fo_create_locks[group]);
3741 diff = oa->o_id - last;
3742 CDEBUG(D_HA, "filter_last_id() = "LPU64" -> diff = %d\n",
3745 if (-diff > (OST_MAX_PRECREATE * 3) / 2) {
3746 CERROR("%s: ignoring bogus orphan destroy request: "
3747 "obdid "LPU64" last_id "LPU64"\n", obd->obd_name,
3749 /* FIXME: should reset precreate_next_id on MDS */
3750 GOTO(out, rc = -EINVAL);
3753 rc = filter_destroy_precreated(exp, oa, filter);
3755 CERROR("%s: unable to write lastobjid, but "
3756 "orphans were deleted\n", obd->obd_name);
3759 /* XXX: Used by MDS for the first time! */
3760 cfs_clear_bit(group, &filter->fo_destroys_in_progress);
3763 cfs_mutex_lock(&filter->fo_create_locks[group]);
3764 if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
3765 CERROR("%s: dropping old precreate request\n",
3769 /* only precreate if group == 0 and o_id is specfied */
3770 if (!fid_seq_is_mdt(group) || oa->o_id == 0)
3773 diff = oa->o_id - filter_last_id(filter, group);
3774 CDEBUG(D_RPCTRACE, "filter_last_id() = "LPU64" -> diff = %d\n",
3775 filter_last_id(filter, group), diff);
3778 * Check obd->obd_recovering to handle the race condition
3779 * while recreating missing precreated objects through
3780 * filter_preprw_write() and mds_lov_clear_orphans()
3783 LASSERTF(ergo(!obd->obd_recovering, diff >= 0),
3784 "%s: "LPU64" - "LPU64" = %d\n", obd->obd_name,
3785 oa->o_id, filter_last_id(filter, group), diff);
3789 oa->o_id = filter_last_id(&obd->u.filter, group);
3790 rc = filter_precreate(obd, oa, group, &diff);
3791 oa->o_id = filter_last_id(&obd->u.filter, group);
3793 oa->o_valid |= (OBD_MD_FLID | OBD_MD_FLGROUP);
3796 /* else diff == 0 */
3799 cfs_mutex_unlock(&filter->fo_create_locks[group]);
3803 static int filter_statfs(const struct lu_env *env, struct obd_export *exp,
3804 struct obd_statfs *osfs, __u64 max_age, __u32 flags)
3806 struct obd_device *obd = class_exp2obd(exp);
3807 struct filter_obd *filter = &obd->u.filter;
3808 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
3809 struct lr_server_data *lsd = class_server_data(obd);
3813 /* at least try to account for cached pages. its still racey and
3814 * might be under-reporting if clients haven't announced their
3815 * caches with brw recently */
3816 cfs_spin_lock(&obd->obd_osfs_lock);
3817 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
3818 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
3819 cfs_spin_unlock(&obd->obd_osfs_lock);
3821 CDEBUG(D_SUPER | D_CACHE, "blocks cached "LPU64" granted "LPU64
3822 " pending "LPU64" free "LPU64" avail "LPU64"\n",
3823 filter->fo_tot_dirty, filter->fo_tot_granted,
3824 filter->fo_tot_pending,
3825 osfs->os_bfree << blockbits, osfs->os_bavail << blockbits);
3827 filter_grant_sanity_check(obd, __func__);
3829 osfs->os_bavail -= min(osfs->os_bavail, GRANT_FOR_LLOG(obd) +
3830 ((filter->fo_tot_dirty + filter->fo_tot_pending +
3831 osfs->os_bsize - 1) >> blockbits));
3833 if (OBD_FAIL_CHECK_VALUE(OBD_FAIL_OST_ENOSPC,
3834 le32_to_cpu(lsd->lsd_ost_index)))
3835 osfs->os_bfree = osfs->os_bavail = 2;
3837 if (OBD_FAIL_CHECK_VALUE(OBD_FAIL_OST_ENOINO,
3838 le32_to_cpu(lsd->lsd_ost_index)))
3841 /* set EROFS to state field if FS is mounted as RDONLY. The goal is to
3842 * stop creating files on MDS if OST is not good shape to create
3846 if (filter->fo_obt.obt_sb->s_flags & MS_RDONLY)
3847 osfs->os_state = OS_STATE_READONLY;
3849 if (filter->fo_raid_degraded)
3850 osfs->os_state |= OS_STATE_DEGRADED;
3854 static int filter_use_existing_obj(struct obd_device *obd,
3855 struct dentry *dchild, void **handle,
3858 struct inode *inode = dchild->d_inode;
3862 if ((inode->i_mode & (S_ISUID | S_ISGID)) == (S_ISUID|S_ISGID))
3865 *handle = fsfilt_start_log(obd, inode, FSFILT_OP_SETATTR, NULL, 1);
3866 if (IS_ERR(*handle))
3867 return PTR_ERR(*handle);
3869 iattr.ia_valid = ATTR_MODE;
3870 iattr.ia_mode = S_ISUID | S_ISGID |0666;
3871 rc = fsfilt_setattr(obd, dchild, *handle, &iattr, 1);
3878 static __u64 filter_calc_free_inodes(struct obd_device *obd)
3881 __u64 os_ffree = -1;
3883 cfs_spin_lock(&obd->obd_osfs_lock);
3884 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, cfs_time_shift_64(1));
3886 os_ffree = obd->obd_osfs.os_ffree;
3887 cfs_spin_unlock(&obd->obd_osfs_lock);
3892 /* We rely on the fact that only one thread will be creating files in a given
3893 * group at a time, which is why we don't need an atomic filter_get_new_id.
3894 * Even if we had that atomic function, the following race would exist:
3896 * thread 1: gets id x from filter_next_id
3897 * thread 2: gets id (x + 1) from filter_next_id
3898 * thread 2: creates object (x + 1)
3899 * thread 1: tries to create object x, gets -ENOSPC
3901 * Caller must hold fo_create_locks[group]
3903 static int filter_precreate(struct obd_device *obd, struct obdo *oa,
3904 obd_seq group, int *num)
3906 struct dentry *dchild = NULL, *dparent = NULL;
3907 struct filter_obd *filter;
3908 struct obd_statfs *osfs;
3910 int err = 0, rc = 0, recreate_obj = 0, i;
3911 cfs_time_t enough_time = cfs_time_shift(DISK_TIMEOUT/2);
3914 void *handle = NULL;
3917 filter = &obd->u.filter;
3919 LASSERT_MUTEX_LOCKED(&filter->fo_create_locks[group]);
3921 OBD_FAIL_TIMEOUT(OBD_FAIL_TGT_DELAY_PRECREATE, obd_timeout / 2);
3923 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
3924 (oa->o_flags & OBD_FL_RECREATE_OBJS)) {
3927 OBD_ALLOC(osfs, sizeof(*osfs));
3930 rc = filter_statfs(NULL, obd->obd_self_export, osfs,
3931 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
3933 if (rc == 0 && osfs->os_bavail < (osfs->os_blocks >> 10)) {
3934 CDEBUG(D_RPCTRACE,"%s: not enough space for create "
3935 LPU64"\n", obd->obd_name, osfs->os_bavail <<
3936 obd->u.obt.obt_vfsmnt->mnt_sb->s_blocksize_bits);
3938 if (oa->o_valid & OBD_MD_FLFLAGS)
3939 oa->o_flags |= OBD_FL_NOSPC_BLK;
3941 oa->o_valid |= OBD_MD_FLFLAGS;
3942 oa->o_flags = OBD_FL_NOSPC_BLK;
3947 OBD_FREE(osfs, sizeof(*osfs));
3952 CDEBUG(D_RPCTRACE, "%s: precreating %d objects in group "LPU64
3953 " at "LPU64"\n", obd->obd_name, *num, group, oa->o_id);
3955 for (i = 0; i < *num && err == 0; i++) {
3956 int cleanup_phase = 0;
3958 if (cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
3959 CWARN("%s: create aborted by destroy\n",
3968 last_id = filter_last_id(filter, group);
3969 if (next_id > last_id) {
3970 CERROR("Error: Trying to recreate obj greater"
3971 "than last id "LPD64" > "LPD64"\n",
3973 GOTO(cleanup, rc = -EINVAL);
3976 next_id = filter_last_id(filter, group) + 1;
3978 /* Don't create objects beyond the valid range for this SEQ */
3979 if (unlikely(fid_seq_is_mdt0(group) &&
3980 next_id >= IDIF_MAX_OID)) {
3981 CERROR("%s:"POSTID" hit the IDIF_MAX_OID (1<<48)!\n",
3982 obd->obd_name, next_id, group);
3983 GOTO(cleanup, rc = -ENOSPC);
3984 } else if (unlikely(!fid_seq_is_mdt0(group) &&
3985 next_id >= OBIF_MAX_OID)) {
3986 CERROR("%s:"POSTID" hit the OBIF_MAX_OID (1<<32)!\n",
3987 obd->obd_name, next_id, group);
3988 GOTO(cleanup, rc = -ENOSPC);
3991 dparent = filter_parent_lock(obd, group, next_id);
3992 if (IS_ERR(dparent))
3993 GOTO(cleanup, rc = PTR_ERR(dparent));
3994 cleanup_phase = 1; /* filter_parent_unlock(dparent) */
3996 dchild = filter_fid2dentry(obd, dparent, group, next_id);
3998 GOTO(cleanup, rc = PTR_ERR(dchild));
3999 cleanup_phase = 2; /* f_dput(dchild) */
4001 if (dchild->d_inode != NULL) {
4002 /* This would only happen if lastobjid was bad on disk*/
4003 /* Could also happen if recreating missing obj but it
4004 * already exists. */
4006 CERROR("%s: recreating existing object %.*s?\n",
4007 obd->obd_name, dchild->d_name.len,
4008 dchild->d_name.name);
4010 /* Use these existing objects if they are
4012 if (i_size_read(dchild->d_inode) == 0) {
4013 rc = filter_use_existing_obj(obd,dchild,
4014 &handle, &cleanup_phase);
4021 CERROR("%s: Serious error: objid %.*s already "
4022 "exists; is this filesystem corrupt?\n",
4023 obd->obd_name, dchild->d_name.len,
4024 dchild->d_name.name);
4027 GOTO(cleanup, rc = -EEXIST);
4030 handle = fsfilt_start_log(obd, dparent->d_inode,
4031 FSFILT_OP_CREATE, NULL, 1);
4033 GOTO(cleanup, rc = PTR_ERR(handle));
4036 CDEBUG(D_INODE, "%s: filter_precreate(od->o_seq="LPU64
4037 ",od->o_id="LPU64")\n", obd->obd_name, group,
4040 /* We mark object SUID+SGID to flag it for accepting UID+GID
4041 * from client on first write. Currently the permission bits
4042 * on the OST are never used, so this is OK. */
4043 rc = ll_vfs_create(dparent->d_inode, dchild,
4044 S_IFREG | S_ISUID | S_ISGID | 0666, NULL);
4046 CWARN("%s: create failed: rc = %d\n", obd->obd_name,rc);
4047 if (rc == -ENOSPC) {
4048 os_ffree = filter_calc_free_inodes(obd);
4052 if (obd->obd_osfs.os_bavail <
4053 (obd->obd_osfs.os_blocks >> 10)) {
4054 if (oa->o_valid & OBD_MD_FLFLAGS) {
4055 oa->o_flags |= OBD_FL_NOSPC_BLK;
4057 oa->o_valid |= OBD_MD_FLFLAGS;
4058 oa->o_flags = OBD_FL_NOSPC_BLK;
4061 CWARN("%s: free inode "LPU64"\n",
4062 obd->obd_name, os_ffree);
4068 if (dchild->d_inode)
4069 CDEBUG(D_INFO, "objid "LPU64" got inum %lu\n", next_id,
4070 dchild->d_inode->i_ino);
4073 /* Initialize a/c/m time so any client timestamp will always
4074 * be newer and update the inode. ctime = 0 is also handled
4075 * specially in fsfilt_ext3_setattr(). See LU-221, LU-1042 */
4076 iattr.ia_valid = ATTR_ATIME | ATTR_MTIME | ATTR_CTIME;
4077 LTIME_S(iattr.ia_atime) = 0;
4078 LTIME_S(iattr.ia_mtime) = 0;
4079 LTIME_S(iattr.ia_ctime) = 0;
4080 err = fsfilt_setattr(obd, dchild, handle, &iattr, 0);
4082 CWARN("%s: unable to initialize a/c/m time of newly "
4083 "created object %.*s: rc = %d\n",
4084 obd->obd_name, dchild->d_name.len,
4085 dchild->d_name.name, err);
4087 if (!recreate_obj) {
4088 filter_set_last_id(filter, next_id, group);
4089 err = filter_update_last_objid(obd, group, 0);
4091 CERROR("%s: unable to write lastobjid "
4092 "but file created: rc = %d\n",
4093 obd->obd_name, err);
4097 switch(cleanup_phase) {
4099 err = fsfilt_commit(obd, dparent->d_inode, handle, 0);
4101 CERROR("error on commit, err = %d\n", err);
4108 filter_parent_unlock(dparent);
4115 if (cfs_time_after(jiffies, enough_time)) {
4118 "%s: precreate slow - want %d got %d \n",
4119 obd->obd_name, *num, i);
4126 "%s: created %d objects for group "POSTID" rc %d\n",
4127 obd->obd_name, i, filter->fo_last_objids[group], group, rc);
4132 int filter_create(const struct lu_env *env, struct obd_export *exp,
4133 struct obdo *oa, struct lov_stripe_md **ea,
4134 struct obd_trans_info *oti)
4136 struct obd_device *obd = exp->exp_obd;
4137 struct filter_export_data *fed;
4138 struct filter_obd *filter;
4139 struct lvfs_run_ctxt saved;
4140 struct lov_stripe_md *lsm = NULL;
4144 CDEBUG(D_INODE, "%s: filter_create(group="LPU64",id="
4145 LPU64")\n", obd->obd_name, oa->o_seq, oa->o_id);
4147 fed = &exp->exp_filter_data;
4148 filter = &obd->u.filter;
4150 /* 1.8 client doesn't carry the ocd_group with connect request,
4151 * so the fed_group will always be zero for 1.8 client. */
4152 if (!(exp->exp_connect_flags & OBD_CONNECT_FULL20)) {
4153 if (oa->o_seq != FID_SEQ_OST_MDT0 &&
4154 oa->o_seq != FID_SEQ_LLOG &&
4155 oa->o_seq != FID_SEQ_ECHO) {
4156 CERROR("The request from older client has invalid"
4157 " group "LPU64"!\n", oa->o_seq);
4160 } else if (fed->fed_group != oa->o_seq) {
4161 CERROR("%s: this export (nid %s) used object group %d "
4162 "earlier; now it's trying to use group "LPU64"!"
4163 " This could be a bug in the MDS. Please report to "
4164 "http://bugzilla.lustre.org/\n", obd->obd_name,
4165 obd_export_nid2str(exp), fed->fed_group, oa->o_seq);
4172 rc = obd_alloc_memmd(exp, &lsm);
4179 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4181 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
4182 (oa->o_flags & OBD_FL_RECREATE_OBJS)) {
4183 if (!obd->obd_recovering ||
4184 oa->o_id > filter_last_id(filter, oa->o_seq)) {
4185 CERROR("recreate objid "LPU64" > last id "LPU64"\n",
4186 oa->o_id, filter_last_id(filter, oa->o_seq));
4190 cfs_mutex_lock(&filter->fo_create_locks[oa->o_seq]);
4191 rc = filter_precreate(obd, oa, oa->o_seq, &diff);
4192 cfs_mutex_unlock(&filter->fo_create_locks[oa->o_seq]);
4195 rc = filter_handle_precreate(exp, oa, oa->o_seq, oti);
4198 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4199 if (rc && ea != NULL && *ea != lsm) {
4200 obd_free_memmd(exp, &lsm);
4201 } else if (rc == 0 && ea != NULL) {
4202 /* XXX LOV STACKING: the lsm that is passed to us from
4203 * LOV does not have valid lsm_oinfo data structs, so
4204 * don't go touching that. This needs to be fixed in a
4206 lsm->lsm_object_id = oa->o_id;
4213 int filter_destroy(const struct lu_env *env, struct obd_export *exp,
4214 struct obdo *oa, struct lov_stripe_md *md,
4215 struct obd_trans_info *oti, struct obd_export *md_exp,
4218 struct obd_device *obd;
4219 struct filter_obd *filter;
4220 struct dentry *dchild = NULL, *dparent = NULL;
4221 struct lustre_handle lockh = { 0 };
4222 struct lvfs_run_ctxt saved;
4223 void *handle = NULL;
4224 struct llog_cookie *fcc = NULL;
4225 int rc, rc2, cleanup_phase = 0, sync = 0;
4230 rc = filter_auth_capa(exp, NULL, oa->o_seq,
4231 (struct lustre_capa *)capa, CAPA_OPC_OSS_DESTROY);
4236 filter = &obd->u.filter;
4238 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4241 CDEBUG(D_INODE, "%s: filter_destroy(group="LPU64",oid="
4242 LPU64")\n", obd->obd_name, oa->o_seq, oa->o_id);
4244 dchild = filter_fid2dentry(obd, NULL, oa->o_seq, oa->o_id);
4246 GOTO(cleanup, rc = PTR_ERR(dchild));
4249 if (dchild->d_inode == NULL) {
4250 CDEBUG(D_INODE, "destroying non-existent object "POSTID"\n",
4251 oa->o_id, oa->o_seq);
4252 /* If object already gone, cancel cookie right now */
4253 if (oa->o_valid & OBD_MD_FLCOOKIE) {
4254 struct llog_ctxt *ctxt;
4255 struct obd_llog_group *olg;
4257 olg = filter_find_olg(obd, oa->o_seq);
4259 CERROR(" %s: can not find olg of group %d\n",
4260 obd->obd_name, (int)oa->o_seq);
4261 GOTO(cleanup, rc = PTR_ERR(olg));
4263 fcc = &oa->o_lcookie;
4264 ctxt = llog_group_get_ctxt(olg, fcc->lgc_subsys + 1);
4265 llog_cancel(NULL, ctxt, NULL, 1, fcc, 0);
4266 llog_ctxt_put(ctxt);
4267 fcc = NULL; /* we didn't allocate fcc, don't free it */
4269 GOTO(cleanup, rc = -ENOENT);
4272 rc = filter_prepare_destroy(obd, oa->o_id, oa->o_seq, &lockh);
4276 /* Our MDC connection is established by the MDS to us */
4277 if (oa->o_valid & OBD_MD_FLCOOKIE) {
4278 OBD_ALLOC(fcc, sizeof(*fcc));
4280 *fcc = oa->o_lcookie;
4283 /* we're gonna truncate it first in order to avoid possible deadlock:
4285 * open trasaction open transaction
4286 * down(i_zombie) down(i_zombie)
4287 * restart transaction
4288 * (see BUG 4180) -bzzz
4290 * take i_alloc_sem too to prevent other threads from writing to the
4291 * file while we are truncating it. This can cause lock ordering issue
4292 * between page lock, i_mutex & starting new journal handle.
4293 * (see bug 20321) -johann
4296 down_write(&dchild->d_inode->i_alloc_sem);
4297 mutex_lock(&dchild->d_inode->i_mutex);
4298 fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem and i_mutex");
4300 /* VBR: version recovery check */
4301 rc = filter_version_get_check(exp, oti, dchild->d_inode);
4303 mutex_unlock(&dchild->d_inode->i_mutex);
4304 up_write(&dchild->d_inode->i_alloc_sem);
4308 handle = fsfilt_start_log(obd, dchild->d_inode, FSFILT_OP_SETATTR,
4310 if (IS_ERR(handle)) {
4311 mutex_unlock(&dchild->d_inode->i_mutex);
4312 up_write(&dchild->d_inode->i_alloc_sem);
4313 GOTO(cleanup, rc = PTR_ERR(handle));
4316 /* Locking order: i_mutex -> journal_lock -> dqptr_sem. LU-952 */
4317 ll_vfs_dq_init(dchild->d_inode);
4319 iattr.ia_valid = ATTR_SIZE;
4321 rc = fsfilt_setattr(obd, dchild, handle, &iattr, 1);
4322 rc2 = fsfilt_commit(obd, dchild->d_inode, handle, 0);
4323 mutex_unlock(&dchild->d_inode->i_mutex);
4324 up_write(&dchild->d_inode->i_alloc_sem);
4328 GOTO(cleanup, rc = rc2);
4330 /* We don't actually need to lock the parent until we are unlinking
4331 * here, and not while truncating above. That avoids holding the
4332 * parent lock for a long time during truncate, which can block other
4333 * threads from doing anything to objects in that directory. bug 7171 */
4334 dparent = filter_parent_lock(obd, oa->o_seq, oa->o_id);
4335 if (IS_ERR(dparent))
4336 GOTO(cleanup, rc = PTR_ERR(dparent));
4337 cleanup_phase = 3; /* filter_parent_unlock */
4339 mutex_lock(&dchild->d_inode->i_mutex);
4340 handle = fsfilt_start_log(obd, dparent->d_inode,
4341 FSFILT_OP_UNLINK, oti, 1);
4342 if (IS_ERR(handle)) {
4343 mutex_unlock(&dchild->d_inode->i_mutex);
4344 GOTO(cleanup, rc = PTR_ERR(handle));
4346 cleanup_phase = 4; /* fsfilt_commit */
4348 /* Quota release need uid/gid of inode */
4349 obdo_from_inode(oa, dchild->d_inode, OBD_MD_FLUID | OBD_MD_FLGID);
4351 filter_fmd_drop(exp, oa->o_id, oa->o_seq);
4353 /* this drops dchild->d_inode->i_mutex unconditionally */
4354 rc = filter_destroy_internal(obd, oa->o_id, oa->o_seq, dparent, dchild);
4358 switch(cleanup_phase) {
4361 sync = fsfilt_add_journal_cb(obd, 0, oti ?
4362 oti->oti_handle : handle,
4363 filter_cancel_cookies_cb,
4365 /* If add_journal_cb failed, then filter_finish_transno
4366 * will commit the handle and we will do a sync
4367 * on commit. then we call callback directly to free
4370 rc = filter_finish_transno(exp, NULL, oti, rc, sync);
4372 filter_cancel_cookies_cb(obd, 0, fcc, rc);
4375 rc2 = fsfilt_commit(obd, dparent->d_inode, handle, 0);
4377 CERROR("error on commit, err = %d\n", rc2);
4384 filter_parent_unlock(dparent);
4386 filter_fini_destroy(obd, &lockh);
4390 OBD_FREE(fcc, sizeof(*fcc));
4392 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4395 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
4402 /* NB start and end are used for punch, but not truncate */
4403 static int filter_truncate(const struct lu_env *env, struct obd_export *exp,
4404 struct obd_info *oinfo, struct obd_trans_info *oti,
4405 struct ptlrpc_request_set *rqset)
4410 if (oinfo->oi_policy.l_extent.end != OBD_OBJECT_EOF) {
4411 CERROR("PUNCH not supported, only truncate: end = "LPX64"\n",
4412 oinfo->oi_policy.l_extent.end);
4416 CDEBUG(D_INODE, "calling truncate for object "LPU64", valid = "LPX64
4417 ", o_size = "LPD64"\n", oinfo->oi_oa->o_id,oinfo->oi_oa->o_valid,
4418 oinfo->oi_policy.l_extent.start);
4420 oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start;
4421 rc = filter_setattr(env, exp, oinfo, oti);
4425 static int filter_sync(const struct lu_env *env, struct obd_export *exp,
4426 struct obd_info *oinfo, obd_off start, obd_off end,
4427 struct ptlrpc_request_set *set)
4429 struct lvfs_run_ctxt saved;
4430 struct obd_device_target *obt;
4431 struct dentry *dentry;
4435 rc = filter_auth_capa(exp, NULL, oinfo->oi_oa->o_seq,
4436 (struct lustre_capa *)oinfo->oi_capa,
4437 CAPA_OPC_OSS_WRITE);
4441 obt = &exp->exp_obd->u.obt;
4443 /* An objid of zero is taken to mean "sync whole filesystem" */
4444 if (!oinfo->oi_oa || !(oinfo->oi_oa->o_valid & OBD_MD_FLID)) {
4445 rc = fsfilt_sync(exp->exp_obd, obt->obt_sb);
4446 /* Flush any remaining cancel messages out to the target */
4447 filter_sync_llogs(exp->exp_obd, exp);
4451 dentry = filter_oa2dentry(exp->exp_obd, &oinfo->oi_oa->o_oi);
4453 RETURN(PTR_ERR(dentry));
4455 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
4457 mutex_lock(&dentry->d_inode->i_mutex);
4459 rc = filemap_fdatawrite(dentry->d_inode->i_mapping);
4461 /* just any file to grab fsync method - "file" arg unused */
4462 struct file *file = obt->obt_rcvd_filp;
4464 if (file->f_op && file->f_op->fsync)
4465 rc = file->f_op->fsync(NULL, dentry, 1);
4467 rc2 = filemap_fdatawait(dentry->d_inode->i_mapping);
4471 mutex_unlock(&dentry->d_inode->i_mutex);
4473 oinfo->oi_oa->o_valid = OBD_MD_FLID;
4474 obdo_from_inode(oinfo->oi_oa, dentry->d_inode, FILTER_VALID_FLAGS);
4476 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
4478 filter_counter_incr(exp, LPROC_FILTER_STATS_SYNC, oinfo->oi_jobid, 1);
4483 static int filter_get_info(const struct lu_env *env, struct obd_export *exp,
4484 __u32 keylen, void *key, __u32 *vallen, void *val,
4485 struct lov_stripe_md *lsm)
4487 struct obd_device *obd;
4490 obd = class_exp2obd(exp);
4492 CDEBUG(D_IOCTL, "invalid client export %p\n", exp);
4496 if (KEY_IS(KEY_BLOCKSIZE)) {
4497 __u32 *blocksize = val;
4499 if (*vallen < sizeof(*blocksize))
4501 *blocksize = obd->u.obt.obt_sb->s_blocksize;
4503 *vallen = sizeof(*blocksize);
4507 if (KEY_IS(KEY_BLOCKSIZE_BITS)) {
4508 __u32 *blocksize_bits = val;
4509 if (blocksize_bits) {
4510 if (*vallen < sizeof(*blocksize_bits))
4512 *blocksize_bits = obd->u.obt.obt_sb->s_blocksize_bits;
4514 *vallen = sizeof(*blocksize_bits);
4518 if (KEY_IS(KEY_LAST_ID)) {
4519 obd_id *last_id = val;
4520 /* FIXME: object groups */
4522 if (*vallen < sizeof(*last_id))
4524 *last_id = filter_last_id(&obd->u.filter,
4525 exp->exp_filter_data.fed_group);
4527 *vallen = sizeof(*last_id);
4531 if (KEY_IS(KEY_FIEMAP)) {
4532 struct ll_fiemap_info_key *fm_key = key;
4533 struct dentry *dentry;
4534 struct ll_user_fiemap *fiemap = val;
4535 struct lvfs_run_ctxt saved;
4538 if (fiemap == NULL) {
4539 *vallen = fiemap_count_to_size(
4540 fm_key->fiemap.fm_extent_count);
4544 dentry = __filter_oa2dentry(exp->exp_obd, &fm_key->oa.o_oi,
4547 RETURN(PTR_ERR(dentry));
4549 memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
4550 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4551 rc = fsfilt_iocontrol(obd, dentry, FSFILT_IOC_FIEMAP,
4553 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4559 if (KEY_IS(KEY_SYNC_LOCK_CANCEL)) {
4560 *((__u32 *) val) = obd->u.filter.fo_sync_lock_cancel;
4561 *vallen = sizeof(__u32);
4565 CDEBUG(D_IOCTL, "invalid key\n");
4569 static inline int filter_setup_llog_group(struct obd_export *exp,
4570 struct obd_device *obd,
4573 struct obd_llog_group *olg;
4574 struct llog_ctxt *ctxt;
4577 olg = filter_find_create_olg(obd, group);
4579 RETURN(PTR_ERR(olg));
4581 llog_group_set_export(olg, exp);
4583 ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
4584 LASSERTF(ctxt != NULL, "ctxt is null\n");
4586 rc = llog_receptor_accept(ctxt, exp->exp_imp_reverse);
4587 llog_ctxt_put(ctxt);
4591 static int filter_set_grant_shrink(struct obd_export *exp,
4592 struct ost_body *body)
4594 /* handle shrink grant */
4595 cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
4596 filter_grant_incoming(exp, &body->oa);
4597 cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
4603 static int filter_set_mds_conn(struct obd_export *exp, void *val)
4605 struct obd_device *obd;
4611 CDEBUG(D_IOCTL, "invalid export %p\n", exp);
4615 LCONSOLE_WARN("%s: received MDS connection from %s\n", obd->obd_name,
4616 obd_export_nid2str(exp));
4617 obd->u.filter.fo_mdc_conn.cookie = exp->exp_handle.h_cookie;
4619 /* setup llog imports */
4621 group = (int)(*(__u32 *)val);
4623 group = 0; /* default value */
4625 LASSERT_SEQ_IS_MDT(group);
4626 rc = filter_setup_llog_group(exp, obd, group);
4630 if (group == FID_SEQ_OST_MDT0) {
4631 /* setup llog group 1 for interop */
4632 filter_setup_llog_group(exp, obd, FID_SEQ_LLOG);
4638 static int filter_set_info_async(const struct lu_env *env,
4639 struct obd_export *exp, __u32 keylen,
4640 void *key, __u32 vallen, void *val,
4641 struct ptlrpc_request_set *set)
4643 struct obd_device *obd;
4648 CDEBUG(D_IOCTL, "invalid export %p\n", exp);
4652 if (KEY_IS(KEY_CAPA_KEY)) {
4654 rc = filter_update_capa_key(obd, (struct lustre_capa_key *)val);
4656 CERROR("filter update capability key failed: %d\n", rc);
4660 if (KEY_IS(KEY_REVIMP_UPD)) {
4661 filter_revimp_update(exp);
4665 if (KEY_IS(KEY_SPTLRPC_CONF)) {
4666 filter_adapt_sptlrpc_conf(obd, 0);
4670 if (KEY_IS(KEY_MDS_CONN))
4671 RETURN(filter_set_mds_conn(exp, val));
4673 if (KEY_IS(KEY_GRANT_SHRINK))
4674 RETURN(filter_set_grant_shrink(exp, val));
4679 int filter_iocontrol(unsigned int cmd, struct obd_export *exp,
4680 int len, void *karg, void *uarg)
4682 struct obd_device *obd = exp->exp_obd;
4683 struct obd_ioctl_data *data = karg;
4687 case OBD_IOC_ABORT_RECOVERY: {
4688 LCONSOLE_WARN("%s: Aborting recovery.\n", obd->obd_name);
4689 target_stop_recovery_thread(obd);
4693 case OBD_IOC_SYNC: {
4694 CDEBUG(D_RPCTRACE, "syncing ost %s\n", obd->obd_name);
4695 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
4699 case OBD_IOC_SET_READONLY: {
4701 struct super_block *sb = obd->u.obt.obt_sb;
4702 struct inode *inode = sb->s_root->d_inode;
4703 BDEVNAME_DECLARE_STORAGE(tmp);
4704 CERROR("*** setting device %s read-only ***\n",
4705 ll_bdevname(sb, tmp));
4707 handle = fsfilt_start(obd, inode, FSFILT_OP_MKNOD, NULL);
4708 if (!IS_ERR(handle))
4709 rc = fsfilt_commit(obd, inode, handle, 1);
4711 CDEBUG(D_HA, "syncing ost %s\n", obd->obd_name);
4712 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
4714 rc = lvfs_set_rdonly(obd, obd->u.obt.obt_sb);
4718 case OBD_IOC_CATLOGLIST: {
4719 rc = llog_catalog_list(obd, 1, data);
4723 case OBD_IOC_LLOG_CANCEL:
4724 case OBD_IOC_LLOG_REMOVE:
4725 case OBD_IOC_LLOG_INFO:
4726 case OBD_IOC_LLOG_PRINT: {
4727 /* FIXME to be finished */
4728 RETURN(-EOPNOTSUPP);
4730 struct llog_ctxt *ctxt = NULL;
4732 push_ctxt(&saved, &ctxt->loc_exp->exp_obd->obd_lvfs_ctxt, NULL);
4733 rc = llog_ioctl(ctxt, cmd, data);
4734 pop_ctxt(&saved, &ctxt->loc_exp->exp_obd->obd_lvfs_ctxt, NULL);
4747 static int filter_health_check(const struct lu_env *env, struct obd_device *obd)
4749 #ifdef USE_HEALTH_CHECK_WRITE
4750 struct filter_obd *filter = &obd->u.filter;
4755 * health_check to return 0 on healthy
4756 * and 1 on unhealthy.
4758 if (obd->u.obt.obt_sb->s_flags & MS_RDONLY)
4761 #ifdef USE_HEALTH_CHECK_WRITE
4762 LASSERT(filter->fo_obt.obt_health_check_filp != NULL);
4763 rc |= !!lvfs_check_io_health(obd, filter->fo_obt.obt_health_check_filp);
4768 static struct dentry *filter_lvfs_fid2dentry(__u64 id, __u32 gen, __u64 gr,
4771 return filter_fid2dentry(data, NULL, gr, id);
4774 static int filter_process_config(struct obd_device *obd, obd_count len,
4777 struct lustre_cfg *lcfg = buf;
4778 struct lprocfs_static_vars lvars;
4781 switch (lcfg->lcfg_command) {
4783 lprocfs_filter_init_vars(&lvars);
4785 rc = class_process_proc_param(PARAM_OST, lvars.obd_vars,
4795 static int filter_notify(struct obd_device *obd,
4796 struct obd_device *unused,
4797 enum obd_notify_event ev, void *data)
4800 case OBD_NOTIFY_CONFIG:
4801 LASSERT(obd->obd_no_conn);
4802 cfs_spin_lock(&obd->obd_dev_lock);
4803 obd->obd_no_conn = 0;
4804 cfs_spin_unlock(&obd->obd_dev_lock);
4807 CDEBUG(D_INFO, "%s: Unhandled notification %#x\n",
4813 static struct lvfs_callback_ops filter_lvfs_ops = {
4814 l_fid2dentry: filter_lvfs_fid2dentry,
4817 static struct obd_ops filter_obd_ops = {
4818 .o_owner = THIS_MODULE,
4819 .o_get_info = filter_get_info,
4820 .o_set_info_async = filter_set_info_async,
4821 .o_setup = filter_setup,
4822 .o_precleanup = filter_precleanup,
4823 .o_cleanup = filter_cleanup,
4824 .o_connect = filter_connect,
4825 .o_reconnect = filter_reconnect,
4826 .o_disconnect = filter_disconnect,
4827 .o_ping = filter_ping,
4828 .o_init_export = filter_init_export,
4829 .o_destroy_export = filter_destroy_export,
4830 .o_statfs = filter_statfs,
4831 .o_getattr = filter_getattr,
4832 .o_unpackmd = filter_unpackmd,
4833 .o_create = filter_create,
4834 .o_setattr = filter_setattr,
4835 .o_destroy = filter_destroy,
4836 .o_punch = filter_truncate,
4837 .o_sync = filter_sync,
4838 .o_preprw = filter_preprw,
4839 .o_commitrw = filter_commitrw,
4840 .o_llog_init = filter_llog_init,
4841 .o_llog_connect = filter_llog_connect,
4842 .o_llog_finish = filter_llog_finish,
4843 .o_iocontrol = filter_iocontrol,
4844 .o_health_check = filter_health_check,
4845 .o_process_config = filter_process_config,
4846 .o_notify = filter_notify,
4849 static int __init obdfilter_init(void)
4851 struct lprocfs_static_vars lvars;
4854 /** sanity check for group<->mdsno conversion */
4855 for (i = 0; i < MAX_MDT_COUNT; i++)
4856 LASSERT(objseq_to_mdsno(mdt_to_obd_objseq(i)) == i);
4858 lprocfs_filter_init_vars(&lvars);
4860 OBD_ALLOC(obdfilter_created_scratchpad,
4861 OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
4862 sizeof(*obdfilter_created_scratchpad));
4863 if (obdfilter_created_scratchpad == NULL)
4866 ll_fmd_cachep = cfs_mem_cache_create("ll_fmd_cache",
4867 sizeof(struct filter_mod_data),
4870 GOTO(out, rc = -ENOMEM);
4872 rc = class_register_type(&filter_obd_ops, NULL, lvars.module_vars,
4873 LUSTRE_OST_NAME, NULL);
4877 err = cfs_mem_cache_destroy(ll_fmd_cachep);
4878 LASSERTF(err == 0, "Cannot destroy ll_fmd_cachep: rc %d\n",err);
4879 ll_fmd_cachep = NULL;
4881 OBD_FREE(obdfilter_created_scratchpad,
4882 OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
4883 sizeof(*obdfilter_created_scratchpad));
4889 static void __exit obdfilter_exit(void)
4891 if (ll_fmd_cachep) {
4892 int rc = cfs_mem_cache_destroy(ll_fmd_cachep);
4893 LASSERTF(rc == 0, "Cannot destroy ll_fmd_cachep: rc %d\n", rc);
4894 ll_fmd_cachep = NULL;
4897 class_unregister_type(LUSTRE_OST_NAME);
4898 OBD_FREE(obdfilter_created_scratchpad,
4899 OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
4900 sizeof(*obdfilter_created_scratchpad));
4903 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4904 MODULE_DESCRIPTION("Lustre Filtering OBD driver");
4905 MODULE_LICENSE("GPL");
4907 module_init(obdfilter_init);
4908 module_exit(obdfilter_exit);