1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdfilter/filter.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Andreas Dilger <adilger@clusterfs.com>
43 * Invariant: Get O/R i_mutex for lookup, if needed, before any journal ops
44 * (which need to get journal_lock, may block if journal full).
46 * Invariant: Call filter_start_transno() before any journal ops to avoid the
47 * same deadlock problem. We can (and want) to get rid of the
48 * transno sem in favour of the dir/inode i_mutex to avoid single
49 * threaded operation on the OST.
52 #define DEBUG_SUBSYSTEM S_FILTER
54 #ifndef AUTOCONF_INCLUDED
55 #include <linux/config.h>
57 #include <linux/module.h>
59 #include <linux/dcache.h>
60 #include <linux/init.h>
61 #include <linux/version.h>
62 #include <linux/sched.h>
63 #include <linux/mount.h>
64 #include <linux/buffer_head.h>
66 #include <obd_cksum.h>
67 #include <obd_class.h>
69 #include <lustre_dlm.h>
70 #include <lustre_fsfilt.h>
71 #include <lprocfs_status.h>
72 #include <lustre_log.h>
73 #include <libcfs/list.h>
74 #include <lustre_disk.h>
75 #include <lustre_quota.h>
76 #include <linux/slab.h>
77 #include <lustre_param.h>
78 #include <lustre/ll_fiemap.h>
80 #include "filter_internal.h"
82 static struct lvfs_callback_ops filter_lvfs_ops;
83 cfs_mem_cache_t *ll_fmd_cachep;
85 static void filter_commit_cb(struct obd_device *obd, __u64 transno,
86 void *cb_data, int error)
88 struct obd_export *exp = cb_data;
89 LASSERT(exp->exp_obd == obd);
90 obd_transno_commit_cb(obd, transno, exp, error);
91 class_export_cb_put(exp);
94 int filter_version_get_check(struct obd_export *exp,
95 struct obd_trans_info *oti, struct inode *inode)
99 if (inode == NULL || oti == NULL)
102 curr_version = fsfilt_get_version(exp->exp_obd, inode);
103 if ((__s64)curr_version == -EOPNOTSUPP)
105 /* VBR: version is checked always because costs nothing */
106 if (oti->oti_pre_version != 0 &&
107 oti->oti_pre_version != curr_version) {
108 CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
109 oti->oti_pre_version, curr_version);
110 cfs_spin_lock(&exp->exp_lock);
111 exp->exp_vbr_failed = 1;
112 cfs_spin_unlock(&exp->exp_lock);
115 oti->oti_pre_version = curr_version;
119 /* Assumes caller has already pushed us into the kernel context. */
120 int filter_finish_transno(struct obd_export *exp, struct inode *inode,
121 struct obd_trans_info *oti, int rc, int force_sync)
123 struct obd_device_target *obt = &exp->exp_obd->u.obt;
124 struct tg_export_data *ted = &exp->exp_target_data;
125 struct lr_server_data *lsd = class_server_data(exp->exp_obd);
126 struct lsd_client_data *lcd;
129 int err, log_pri = D_RPCTRACE;
131 /* Propagate error code. */
135 if (!exp->exp_obd->obd_replayable || oti == NULL)
138 cfs_mutex_down(&ted->ted_lcd_lock);
140 /* if the export has already been disconnected, we have no last_rcvd slot,
141 * update server data with latest transno then */
143 cfs_mutex_up(&ted->ted_lcd_lock);
144 CWARN("commit transaction for disconnected client %s: rc %d\n",
145 exp->exp_client_uuid.uuid, rc);
146 err = filter_update_server_data(exp->exp_obd);
150 /* we don't allocate new transnos for replayed requests */
151 cfs_spin_lock(&obt->obt_lut->lut_translock);
152 if (oti->oti_transno == 0) {
153 last_rcvd = le64_to_cpu(lsd->lsd_last_transno) + 1;
154 lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
155 LASSERT(last_rcvd >= le64_to_cpu(lcd->lcd_last_transno));
157 last_rcvd = oti->oti_transno;
158 if (last_rcvd > le64_to_cpu(lsd->lsd_last_transno))
159 lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
160 if (unlikely(last_rcvd < le64_to_cpu(lcd->lcd_last_transno))) {
161 CERROR("Trying to overwrite bigger transno, on-disk: "
162 LPU64", new: "LPU64"\n",
163 le64_to_cpu(lcd->lcd_last_transno), last_rcvd);
164 cfs_spin_lock(&exp->exp_lock);
165 exp->exp_vbr_failed = 1;
166 cfs_spin_unlock(&exp->exp_lock);
167 cfs_spin_unlock(&obt->obt_lut->lut_translock);
168 cfs_mutex_up(&ted->ted_lcd_lock);
172 oti->oti_transno = last_rcvd;
174 lcd->lcd_last_transno = cpu_to_le64(last_rcvd);
175 lcd->lcd_pre_versions[0] = cpu_to_le64(oti->oti_pre_version);
176 lcd->lcd_last_xid = cpu_to_le64(oti->oti_xid);
177 cfs_spin_unlock(&obt->obt_lut->lut_translock);
180 fsfilt_set_version(exp->exp_obd, inode, last_rcvd);
182 off = ted->ted_lr_off;
184 CERROR("%s: client idx %d is %lld\n", exp->exp_obd->obd_name,
185 ted->ted_lr_idx, ted->ted_lr_off);
188 class_export_cb_get(exp); /* released when the cb is called */
190 force_sync = fsfilt_add_journal_cb(exp->exp_obd,
196 err = fsfilt_write_record(exp->exp_obd, obt->obt_rcvd_filp,
197 lcd, sizeof(*lcd), &off,
198 force_sync | exp->exp_need_sync);
200 filter_commit_cb(exp->exp_obd, last_rcvd, exp, err);
208 CDEBUG(log_pri, "wrote trans "LPU64" for client %s at #%d: err = %d\n",
209 last_rcvd, lcd->lcd_uuid, ted->ted_lr_idx, err);
210 cfs_mutex_up(&ted->ted_lcd_lock);
214 void f_dput(struct dentry *dentry)
216 /* Can't go inside filter_ddelete because it can block */
217 CDEBUG(D_INODE, "putting %s: %p, count = %d\n",
218 dentry->d_name.name, dentry, atomic_read(&dentry->d_count) - 1);
219 LASSERT(atomic_read(&dentry->d_count) > 0);
224 static void init_brw_stats(struct brw_stats *brw_stats)
227 for (i = 0; i < BRW_LAST; i++)
228 cfs_spin_lock_init(&brw_stats->hist[i].oh_lock);
231 static int lprocfs_init_rw_stats(struct obd_device *obd,
232 struct lprocfs_stats **stats)
236 num_stats = (sizeof(*obd->obd_type->typ_dt_ops) / sizeof(void *)) +
237 LPROC_FILTER_LAST - 1;
238 *stats = lprocfs_alloc_stats(num_stats, LPROCFS_STATS_FLAG_NOPERCPU);
242 lprocfs_init_ops_stats(LPROC_FILTER_LAST, *stats);
243 lprocfs_counter_init(*stats, LPROC_FILTER_READ_BYTES,
244 LPROCFS_CNTR_AVGMINMAX, "read_bytes", "bytes");
245 lprocfs_counter_init(*stats, LPROC_FILTER_WRITE_BYTES,
246 LPROCFS_CNTR_AVGMINMAX, "write_bytes", "bytes");
251 /* brw_stats are 2128, ops are 3916, ldlm are 204, so 6248 bytes per client,
252 plus the procfs overhead :( */
253 static int filter_export_stats_init(struct obd_device *obd,
254 struct obd_export *exp,
260 if (obd_uuid_equals(&exp->exp_client_uuid, &obd->obd_uuid))
261 /* Self-export gets no proc entry */
264 rc = lprocfs_exp_setup(exp, client_nid, &newnid);
266 /* Mask error for already created
274 struct nid_stat *tmp = exp->exp_nid_stats;
275 LASSERT(tmp != NULL);
277 OBD_ALLOC(tmp->nid_brw_stats, sizeof(struct brw_stats));
278 if (tmp->nid_brw_stats == NULL)
279 GOTO(clean, rc = -ENOMEM);
281 init_brw_stats(tmp->nid_brw_stats);
282 rc = lprocfs_seq_create(exp->exp_nid_stats->nid_proc, "brw_stats",
283 0644, &filter_per_nid_stats_fops,
286 CWARN("Error adding the brw_stats file\n");
288 rc = lprocfs_init_rw_stats(obd, &exp->exp_nid_stats->nid_stats);
292 rc = lprocfs_register_stats(tmp->nid_proc, "stats",
296 rc = lprocfs_nid_ldlm_stats_init(tmp);
306 /* Add client data to the FILTER. We use a bitmap to locate a free space
307 * in the last_rcvd file if cl_idx is -1 (i.e. a new client).
308 * Otherwise, we have just read the data from the last_rcvd file and
309 * we know its offset. */
310 static int filter_client_add(struct obd_device *obd, struct obd_export *exp,
313 struct obd_device_target *obt = &obd->u.obt;
314 struct tg_export_data *ted = &exp->exp_target_data;
315 struct lr_server_data *lsd = class_server_data(obd);
316 unsigned long *bitmap = obt->obt_lut->lut_client_bitmap;
317 int new_client = (cl_idx == -1);
321 LASSERT(bitmap != NULL);
322 LASSERTF(cl_idx > -2, "%d\n", cl_idx);
325 if (strcmp(ted->ted_lcd->lcd_uuid, obd->obd_uuid.uuid) == 0)
328 /* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
329 * there's no need for extra complication here
332 cl_idx = cfs_find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
334 if (cl_idx >= LR_MAX_CLIENTS) {
335 CERROR("no room for %u client - fix LR_MAX_CLIENTS\n",
339 if (cfs_test_and_set_bit(cl_idx, bitmap)) {
340 cl_idx = cfs_find_next_zero_bit(bitmap, LR_MAX_CLIENTS,
345 if (cfs_test_and_set_bit(cl_idx, bitmap)) {
346 CERROR("FILTER client %d: bit already set in bitmap!\n",
352 ted->ted_lr_idx = cl_idx;
353 ted->ted_lr_off = le32_to_cpu(lsd->lsd_client_start) +
354 cl_idx * le16_to_cpu(lsd->lsd_client_size);
355 cfs_init_mutex(&ted->ted_lcd_lock);
356 LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
358 CDEBUG(D_INFO, "client at index %d (%llu) with UUID '%s' added\n",
359 ted->ted_lr_idx, ted->ted_lr_off, ted->ted_lcd->lcd_uuid);
362 struct lvfs_run_ctxt saved;
363 loff_t off = ted->ted_lr_off;
367 CDEBUG(D_INFO, "writing client lcd at idx %u (%llu) (len %u)\n",
368 ted->ted_lr_idx,off,(unsigned int)sizeof(*ted->ted_lcd));
370 if (OBD_FAIL_CHECK(OBD_FAIL_TGT_CLIENT_ADD))
373 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
374 /* Transaction needed to fix bug 1403 */
375 handle = fsfilt_start(obd,
376 obt->obt_rcvd_filp->f_dentry->d_inode,
377 FSFILT_OP_SETATTR, NULL);
378 if (IS_ERR(handle)) {
379 rc = PTR_ERR(handle);
380 CERROR("unable to start transaction: rc %d\n", rc);
382 ted->ted_lcd->lcd_last_epoch = lsd->lsd_start_epoch;
383 exp->exp_last_request_time = cfs_time_current_sec();
384 rc = fsfilt_add_journal_cb(obd, 0, handle,
385 target_client_add_cb,
386 class_export_cb_get(exp));
388 cfs_spin_lock(&exp->exp_lock);
389 exp->exp_need_sync = 1;
390 cfs_spin_unlock(&exp->exp_lock);
392 rc = fsfilt_write_record(obd, obt->obt_rcvd_filp,
394 sizeof(*ted->ted_lcd),
395 &off, rc /* sync if no cb */);
397 obt->obt_rcvd_filp->f_dentry->d_inode,
400 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
403 CERROR("error writing %s client idx %u: rc %d\n",
404 LAST_RCVD, ted->ted_lr_idx, rc);
411 static int filter_client_del(struct obd_export *exp)
413 struct tg_export_data *ted = &exp->exp_target_data;
414 struct obd_device_target *obt = &exp->exp_obd->u.obt;
415 struct lvfs_run_ctxt saved;
420 if (ted->ted_lcd == NULL)
423 /* XXX if lcd_uuid were a real obd_uuid, I could use obd_uuid_equals */
424 if (strcmp(ted->ted_lcd->lcd_uuid, exp->exp_obd->obd_uuid.uuid ) == 0)
427 LASSERT(obt->obt_lut->lut_client_bitmap != NULL);
429 off = ted->ted_lr_off;
431 CDEBUG(D_INFO, "freeing client at idx %u, offset %lld with UUID '%s'\n",
432 ted->ted_lr_idx, ted->ted_lr_off, ted->ted_lcd->lcd_uuid);
434 /* Don't clear ted_lr_idx here as it is likely also unset. At worst
435 * we leak a client slot that will be cleaned on the next recovery. */
437 CERROR("%s: client idx %d has med_off %lld\n",
438 exp->exp_obd->obd_name, ted->ted_lr_idx, off);
439 GOTO(free, rc = -EINVAL);
442 /* Clear the bit _after_ zeroing out the client so we don't
443 race with filter_client_add and zero out new clients.*/
444 if (!cfs_test_bit(ted->ted_lr_idx, obt->obt_lut->lut_client_bitmap)) {
445 CERROR("FILTER client %u: bit already clear in bitmap!!\n",
450 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
451 /* Make sure the server's last_transno is up to date.
452 * This should be done before zeroing client slot so last_transno will
453 * be in server data or in client data in case of failure */
454 filter_update_server_data(exp->exp_obd);
456 cfs_mutex_down(&ted->ted_lcd_lock);
457 memset(ted->ted_lcd->lcd_uuid, 0, sizeof ted->ted_lcd->lcd_uuid);
458 rc = fsfilt_write_record(exp->exp_obd, obt->obt_rcvd_filp,
460 sizeof(*ted->ted_lcd), &off, 0);
461 cfs_mutex_up(&ted->ted_lcd_lock);
462 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
464 CDEBUG(rc == 0 ? D_INFO : D_ERROR,
465 "zero out client %s at idx %u/%llu in %s, rc %d\n",
466 ted->ted_lcd->lcd_uuid, ted->ted_lr_idx, ted->ted_lr_off,
473 /* drop fmd reference, free it if last ref. must be called with fed_lock held.*/
474 static inline void filter_fmd_put_nolock(struct filter_export_data *fed,
475 struct filter_mod_data *fmd)
477 LASSERT_SPIN_LOCKED(&fed->fed_lock);
478 if (--fmd->fmd_refcount == 0) {
479 /* XXX when we have persistent reservations and the handle
480 * is stored herein we need to drop it here. */
481 fed->fed_mod_count--;
482 cfs_list_del(&fmd->fmd_list);
483 OBD_SLAB_FREE(fmd, ll_fmd_cachep, sizeof(*fmd));
487 /* drop fmd reference, free it if last ref */
488 void filter_fmd_put(struct obd_export *exp, struct filter_mod_data *fmd)
490 struct filter_export_data *fed;
495 fed = &exp->exp_filter_data;
496 cfs_spin_lock(&fed->fed_lock);
497 filter_fmd_put_nolock(fed, fmd); /* caller reference */
498 cfs_spin_unlock(&fed->fed_lock);
501 /* expire entries from the end of the list if there are too many
502 * or they are too old */
503 static void filter_fmd_expire_nolock(struct filter_obd *filter,
504 struct filter_export_data *fed,
505 struct filter_mod_data *keep)
507 struct filter_mod_data *fmd, *tmp;
509 cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
513 if (cfs_time_before(jiffies, fmd->fmd_expire) &&
514 fed->fed_mod_count < filter->fo_fmd_max_num)
517 cfs_list_del_init(&fmd->fmd_list);
518 filter_fmd_put_nolock(fed, fmd); /* list reference */
522 void filter_fmd_expire(struct obd_export *exp)
524 cfs_spin_lock(&exp->exp_filter_data.fed_lock);
525 filter_fmd_expire_nolock(&exp->exp_obd->u.filter,
526 &exp->exp_filter_data, NULL);
527 cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
530 /* find specified objid, group in export fmd list.
531 * caller must hold fed_lock and take fmd reference itself */
532 static struct filter_mod_data *filter_fmd_find_nolock(struct filter_obd *filter,
533 struct filter_export_data *fed,
534 obd_id objid, obd_seq group)
536 struct filter_mod_data *found = NULL, *fmd;
538 LASSERT_SPIN_LOCKED(&fed->fed_lock);
540 cfs_list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
541 if (fmd->fmd_id == objid && fmd->fmd_gr == group) {
543 cfs_list_del(&fmd->fmd_list);
544 cfs_list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
545 fmd->fmd_expire = jiffies + filter->fo_fmd_max_age;
550 filter_fmd_expire_nolock(filter, fed, found);
555 /* Find fmd based on objid and group, or return NULL if not found. */
556 struct filter_mod_data *filter_fmd_find(struct obd_export *exp,
557 obd_id objid, obd_seq group)
559 struct filter_mod_data *fmd;
561 cfs_spin_lock(&exp->exp_filter_data.fed_lock);
562 fmd = filter_fmd_find_nolock(&exp->exp_obd->u.filter,
563 &exp->exp_filter_data, objid, group);
565 fmd->fmd_refcount++; /* caller reference */
566 cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
571 /* Find fmd based on objid and group, or create a new one if none is found.
572 * It is possible for this function to return NULL under memory pressure,
573 * or if objid = 0 is passed (which will only cause old entries to expire).
574 * Currently this is not fatal because any fmd state is transient and
575 * may also be freed when it gets sufficiently old. */
576 struct filter_mod_data *filter_fmd_get(struct obd_export *exp,
577 obd_id objid, obd_seq group)
579 struct filter_export_data *fed = &exp->exp_filter_data;
580 struct filter_mod_data *found = NULL, *fmd_new = NULL;
582 OBD_SLAB_ALLOC_PTR_GFP(fmd_new, ll_fmd_cachep, CFS_ALLOC_IO);
584 cfs_spin_lock(&fed->fed_lock);
585 found = filter_fmd_find_nolock(&exp->exp_obd->u.filter,fed,objid,group);
588 cfs_list_add_tail(&fmd_new->fmd_list,
590 fmd_new->fmd_id = objid;
591 fmd_new->fmd_gr = group;
592 fmd_new->fmd_refcount++; /* list reference */
594 fed->fed_mod_count++;
596 OBD_SLAB_FREE(fmd_new, ll_fmd_cachep, sizeof(*fmd_new));
600 found->fmd_refcount++; /* caller reference */
601 found->fmd_expire = jiffies +
602 exp->exp_obd->u.filter.fo_fmd_max_age;
605 cfs_spin_unlock(&fed->fed_lock);
611 /* drop fmd list reference so it will disappear when last reference is put.
612 * This isn't so critical because it would in fact only affect the one client
613 * that is doing the unlink and at worst we have an stale entry referencing
614 * an object that should never be used again. */
615 static void filter_fmd_drop(struct obd_export *exp, obd_id objid, obd_seq group)
617 struct filter_mod_data *found = NULL;
619 cfs_spin_lock(&exp->exp_filter_data.fed_lock);
620 found = filter_fmd_find_nolock(&exp->exp_filter_data, objid, group);
622 cfs_list_del_init(&found->fmd_list);
623 filter_fmd_put_nolock(&exp->exp_filter_data, found);
625 cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
628 #define filter_fmd_drop(exp, objid, group)
631 /* remove all entries from fmd list */
632 static void filter_fmd_cleanup(struct obd_export *exp)
634 struct filter_export_data *fed = &exp->exp_filter_data;
635 struct filter_mod_data *fmd = NULL, *tmp;
637 cfs_spin_lock(&fed->fed_lock);
638 cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
639 cfs_list_del_init(&fmd->fmd_list);
640 filter_fmd_put_nolock(fed, fmd);
642 cfs_spin_unlock(&fed->fed_lock);
645 static int filter_init_export(struct obd_export *exp)
648 cfs_spin_lock_init(&exp->exp_filter_data.fed_lock);
649 CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
651 cfs_spin_lock(&exp->exp_lock);
652 exp->exp_connecting = 1;
653 cfs_spin_unlock(&exp->exp_lock);
654 rc = lut_client_alloc(exp);
656 rc = ldlm_init_export(exp);
661 static int filter_free_server_data(struct obd_device_target *obt)
663 lut_fini(NULL, obt->obt_lut);
664 OBD_FREE_PTR(obt->obt_lut);
668 /* assumes caller is already in kernel ctxt */
669 int filter_update_server_data(struct obd_device *obd)
671 struct file *filp = obd->u.obt.obt_rcvd_filp;
672 struct lr_server_data *lsd = class_server_data(obd);
677 CDEBUG(D_INODE, "server uuid : %s\n", lsd->lsd_uuid);
678 CDEBUG(D_INODE, "server last_rcvd : "LPU64"\n",
679 le64_to_cpu(lsd->lsd_last_transno));
680 CDEBUG(D_INODE, "server last_mount: "LPU64"\n",
681 le64_to_cpu(lsd->lsd_mount_count));
683 rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off, 0);
685 CERROR("error writing lr_server_data: rc = %d\n", rc);
690 int filter_update_last_objid(struct obd_device *obd, obd_seq group,
693 struct filter_obd *filter = &obd->u.filter;
699 if (filter->fo_last_objid_files[group] == NULL) {
700 CERROR("Object seq "LPU64" not fully setup; not updating "
701 "last_objid\n", group);
705 CDEBUG(D_INODE, "%s: server last_objid for "POSTID"\n",
706 obd->obd_name, filter->fo_last_objids[group], group);
708 tmp = cpu_to_le64(filter->fo_last_objids[group]);
709 rc = fsfilt_write_record(obd, filter->fo_last_objid_files[group],
710 &tmp, sizeof(tmp), &off, force_sync);
712 CERROR("error writing seq "LPU64" last objid: rc = %d\n",
716 extern int ost_handle(struct ptlrpc_request *req);
717 /* assumes caller has already in kernel ctxt */
718 static int filter_init_server_data(struct obd_device *obd, struct file * filp)
720 struct filter_obd *filter = &obd->u.filter;
721 struct lr_server_data *lsd;
722 struct lsd_client_data *lcd = NULL;
723 struct inode *inode = filp->f_dentry->d_inode;
724 unsigned long last_rcvd_size = i_size_read(inode);
725 struct lu_target *lut;
732 /* ensure padding in the struct is the correct size */
733 CLASSERT (offsetof(struct lr_server_data, lsd_padding) +
734 sizeof(lsd->lsd_padding) == LR_SERVER_SIZE);
735 CLASSERT (offsetof(struct lsd_client_data, lcd_padding) +
736 sizeof(lcd->lcd_padding) == LR_CLIENT_SIZE);
738 /* allocate and initialize lu_target */
742 rc = lut_init(NULL, lut, obd, NULL);
745 lsd = class_server_data(obd);
746 if (last_rcvd_size == 0) {
747 LCONSOLE_WARN("%s: new disk, initializing\n", obd->obd_name);
749 memcpy(lsd->lsd_uuid, obd->obd_uuid.uuid,sizeof(lsd->lsd_uuid));
750 lsd->lsd_last_transno = 0;
751 mount_count = lsd->lsd_mount_count = 0;
752 lsd->lsd_server_size = cpu_to_le32(LR_SERVER_SIZE);
753 lsd->lsd_client_start = cpu_to_le32(LR_CLIENT_START);
754 lsd->lsd_client_size = cpu_to_le16(LR_CLIENT_SIZE);
755 lsd->lsd_subdir_count = cpu_to_le16(FILTER_SUBDIR_COUNT);
756 filter->fo_subdir_count = FILTER_SUBDIR_COUNT;
757 /* OBD_COMPAT_OST is set in filter_connect_internal when the
758 * MDS first connects and assigns the OST index number. */
759 lsd->lsd_feature_incompat = cpu_to_le32(OBD_INCOMPAT_COMMON_LR|
762 rc = fsfilt_read_record(obd, filp, lsd, sizeof(*lsd), &off);
764 CDEBUG(D_INODE,"OBD filter: error reading %s: rc %d\n",
768 if (strcmp(lsd->lsd_uuid, obd->obd_uuid.uuid) != 0) {
769 LCONSOLE_ERROR_MSG(0x134, "Trying to start OBD %s "
770 "using the wrong disk %s. Were the "
771 "/dev/ assignments rearranged?\n",
772 obd->obd_uuid.uuid, lsd->lsd_uuid);
773 GOTO(err_lut, rc = -EINVAL);
775 mount_count = le64_to_cpu(lsd->lsd_mount_count);
776 filter->fo_subdir_count = le16_to_cpu(lsd->lsd_subdir_count);
778 /* Assume old last_rcvd format unless I_C_LR is set */
779 if (!(lsd->lsd_feature_incompat &
780 cpu_to_le32(OBD_INCOMPAT_COMMON_LR)))
781 lsd->lsd_last_transno = lsd->lsd_compat14;
783 /* OBD_COMPAT_OST is set in filter_connect_internal when the
784 * MDS first connects and assigns the OST index number. */
785 lsd->lsd_feature_incompat |= cpu_to_le32(OBD_INCOMPAT_COMMON_LR|
789 if (lsd->lsd_feature_incompat & ~cpu_to_le32(FILTER_INCOMPAT_SUPP)) {
790 CERROR("%s: unsupported incompat filesystem feature(s) %x\n",
791 obd->obd_name, le32_to_cpu(lsd->lsd_feature_incompat) &
792 ~FILTER_INCOMPAT_SUPP);
793 GOTO(err_lut, rc = -EINVAL);
795 if (lsd->lsd_feature_rocompat & ~cpu_to_le32(FILTER_ROCOMPAT_SUPP)) {
796 CERROR("%s: unsupported read-only filesystem feature(s) %x\n",
797 obd->obd_name, le32_to_cpu(lsd->lsd_feature_rocompat) &
798 ~FILTER_ROCOMPAT_SUPP);
799 /* Do something like remount filesystem read-only */
800 GOTO(err_lut, rc = -EINVAL);
803 start_epoch = le32_to_cpu(lsd->lsd_start_epoch);
805 CDEBUG(D_INODE, "%s: server start_epoch : %#x\n",
806 obd->obd_name, start_epoch);
807 CDEBUG(D_INODE, "%s: server last_transno : "LPX64"\n",
808 obd->obd_name, le64_to_cpu(lsd->lsd_last_transno));
809 CDEBUG(D_INODE, "%s: server mount_count: "LPU64"\n",
810 obd->obd_name, mount_count + 1);
811 CDEBUG(D_INODE, "%s: server data size: %u\n",
812 obd->obd_name, le32_to_cpu(lsd->lsd_server_size));
813 CDEBUG(D_INODE, "%s: per-client data start: %u\n",
814 obd->obd_name, le32_to_cpu(lsd->lsd_client_start));
815 CDEBUG(D_INODE, "%s: per-client data size: %u\n",
816 obd->obd_name, le32_to_cpu(lsd->lsd_client_size));
817 CDEBUG(D_INODE, "%s: server subdir_count: %u\n",
818 obd->obd_name, le16_to_cpu(lsd->lsd_subdir_count));
819 CDEBUG(D_INODE, "%s: last_rcvd clients: %lu\n", obd->obd_name,
820 last_rcvd_size <= le32_to_cpu(lsd->lsd_client_start) ? 0 :
821 (last_rcvd_size - le32_to_cpu(lsd->lsd_client_start)) /
822 le16_to_cpu(lsd->lsd_client_size));
824 if (!obd->obd_replayable) {
825 CWARN("%s: recovery support OFF\n", obd->obd_name);
831 GOTO(err_client, rc = -ENOMEM);
833 for (cl_idx = 0, off = le32_to_cpu(lsd->lsd_client_start);
834 off < last_rcvd_size; cl_idx++) {
836 struct obd_export *exp;
837 struct filter_export_data *fed;
839 /* Don't assume off is incremented properly by
840 * fsfilt_read_record(), in case sizeof(*lcd)
841 * isn't the same as lsd->lsd_client_size. */
842 off = le32_to_cpu(lsd->lsd_client_start) +
843 cl_idx * le16_to_cpu(lsd->lsd_client_size);
844 rc = fsfilt_read_record(obd, filp, lcd, sizeof(*lcd), &off);
846 CERROR("error reading FILT %s idx %d off %llu: rc %d\n",
847 LAST_RCVD, cl_idx, off, rc);
848 break; /* read error shouldn't cause startup to fail */
851 if (lcd->lcd_uuid[0] == '\0') {
852 CDEBUG(D_INFO, "skipping zeroed client at offset %d\n",
857 check_lcd(obd->obd_name, cl_idx, lcd);
859 last_rcvd = le64_to_cpu(lcd->lcd_last_transno);
861 CDEBUG(D_HA, "RCVRNG CLIENT uuid: %s idx: %d lr: "LPU64
862 " srv lr: "LPU64"\n", lcd->lcd_uuid, cl_idx,
863 last_rcvd, le64_to_cpu(lsd->lsd_last_transno));
865 /* These exports are cleaned up by filter_disconnect(), so they
866 * need to be set up like real exports as filter_connect() does.
868 exp = class_new_export(obd, (struct obd_uuid *)lcd->lcd_uuid);
870 if (PTR_ERR(exp) == -EALREADY) {
871 /* export already exists, zero out this one */
872 CERROR("Duplicate export %s!\n", lcd->lcd_uuid);
876 GOTO(err_client, rc = PTR_ERR(exp));
879 fed = &exp->exp_filter_data;
880 *fed->fed_ted.ted_lcd = *lcd;
881 fed->fed_group = 0; /* will be assigned at connect */
882 filter_export_stats_init(obd, exp, NULL);
883 rc = filter_client_add(obd, exp, cl_idx);
884 /* can't fail for existing client */
885 LASSERTF(rc == 0, "rc = %d\n", rc);
887 /* VBR: set export last committed */
888 exp->exp_last_committed = last_rcvd;
889 cfs_spin_lock(&exp->exp_lock);
890 exp->exp_connecting = 0;
891 exp->exp_in_recovery = 0;
892 cfs_spin_unlock(&exp->exp_lock);
893 obd->obd_max_recoverable_clients++;
894 class_export_put(exp);
896 if (last_rcvd > le64_to_cpu(lsd->lsd_last_transno))
897 lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
901 obd->obd_last_committed = le64_to_cpu(lsd->lsd_last_transno);
903 obd->u.obt.obt_mount_count = mount_count + 1;
904 lsd->lsd_mount_count = cpu_to_le64(obd->u.obt.obt_mount_count);
906 /* save it, so mount count and last_transno is current */
907 rc = filter_update_server_data(obd);
909 GOTO(err_client, rc);
914 class_disconnect_exports(obd);
916 filter_free_server_data(&obd->u.obt);
920 static int filter_cleanup_groups(struct obd_device *obd)
922 struct filter_obd *filter = &obd->u.filter;
924 struct dentry *dentry;
928 if (filter->fo_dentry_O_groups != NULL) {
929 for (i = 0; i < filter->fo_group_count; i++) {
930 dentry = filter->fo_dentry_O_groups[i];
934 OBD_FREE(filter->fo_dentry_O_groups,
935 filter->fo_group_count *
936 sizeof(*filter->fo_dentry_O_groups));
937 filter->fo_dentry_O_groups = NULL;
939 if (filter->fo_last_objid_files != NULL) {
940 for (i = 0; i < filter->fo_group_count; i++) {
941 filp = filter->fo_last_objid_files[i];
945 OBD_FREE(filter->fo_last_objid_files,
946 filter->fo_group_count *
947 sizeof(*filter->fo_last_objid_files));
948 filter->fo_last_objid_files = NULL;
950 if (filter->fo_dentry_O_sub != NULL) {
951 for (i = 0; i < filter->fo_group_count; i++) {
952 for (j = 0; j < filter->fo_subdir_count; j++) {
953 dentry = filter->fo_dentry_O_sub[i].dentry[j];
958 OBD_FREE(filter->fo_dentry_O_sub,
959 filter->fo_group_count *
960 sizeof(*filter->fo_dentry_O_sub));
961 filter->fo_dentry_O_sub = NULL;
963 if (filter->fo_last_objids != NULL) {
964 OBD_FREE(filter->fo_last_objids,
965 filter->fo_group_count *
966 sizeof(*filter->fo_last_objids));
967 filter->fo_last_objids = NULL;
969 if (filter->fo_dentry_O != NULL) {
970 f_dput(filter->fo_dentry_O);
971 filter->fo_dentry_O = NULL;
976 static int filter_update_last_group(struct obd_device *obd, int group)
978 struct filter_obd *filter = &obd->u.filter;
979 struct file *filp = NULL;
980 int last_group = 0, rc;
984 if (group <= filter->fo_committed_group)
987 filp = filp_open("LAST_GROUP", O_RDWR, 0700);
991 CERROR("cannot open LAST_GROUP: rc = %d\n", rc);
995 rc = fsfilt_read_record(obd, filp, &last_group, sizeof(__u32), &off);
997 CDEBUG(D_INODE, "error reading LAST_GROUP: rc %d\n",rc);
1001 CDEBUG(D_INODE, "%s: previous %d, new %d\n",
1002 obd->obd_name, last_group, group);
1006 /* must be sync: bXXXX */
1007 rc = fsfilt_write_record(obd, filp, &last_group, sizeof(__u32), &off, 1);
1009 CDEBUG(D_INODE, "error updating LAST_GROUP: rc %d\n", rc);
1013 filter->fo_committed_group = group;
1016 filp_close(filp, 0);
1020 static int filter_read_group_internal(struct obd_device *obd, int group,
1023 struct filter_obd *filter = &obd->u.filter;
1024 __u64 *new_objids = NULL;
1025 struct filter_subdirs *new_subdirs = NULL, *tmp_subdirs = NULL;
1026 struct dentry **new_groups = NULL;
1027 struct file **new_files = NULL;
1028 struct dentry *dentry;
1030 int old_count = filter->fo_group_count, rc, stage = 0, i;
1034 int len = group + 1;
1036 snprintf(name, 24, "%d", group);
1040 dentry = ll_lookup_one_len(name, filter->fo_dentry_O,
1042 if (IS_ERR(dentry)) {
1043 CERROR("Cannot lookup expected object group %d: %ld\n",
1044 group, PTR_ERR(dentry));
1045 RETURN(PTR_ERR(dentry));
1048 dentry = simple_mkdir(filter->fo_dentry_O,
1049 obd->u.obt.obt_vfsmnt, name, 0700, 1);
1050 if (IS_ERR(dentry)) {
1051 CERROR("cannot lookup/create O/%s: rc = %ld\n", name,
1053 RETURN(PTR_ERR(dentry));
1058 snprintf(name, 24, "O/%d/LAST_ID", group);
1060 filp = filp_open(name, O_CREAT | O_RDWR, 0700);
1062 CERROR("cannot create %s: rc = %ld\n", name, PTR_ERR(filp));
1063 GOTO(cleanup, rc = PTR_ERR(filp));
1067 rc = fsfilt_read_record(obd, filp, &last_objid, sizeof(__u64), &off);
1069 CDEBUG(D_INODE, "error reading %s: rc %d\n", name, rc);
1073 if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
1074 OBD_ALLOC(tmp_subdirs, sizeof(*tmp_subdirs));
1075 if (tmp_subdirs == NULL)
1076 GOTO(cleanup, rc = -ENOMEM);
1079 for (i = 0; i < filter->fo_subdir_count; i++) {
1081 snprintf(dir, sizeof(dir), "d%u", i);
1083 tmp_subdirs->dentry[i] = simple_mkdir(dentry,
1084 obd->u.obt.obt_vfsmnt,
1086 if (IS_ERR(tmp_subdirs->dentry[i])) {
1087 rc = PTR_ERR(tmp_subdirs->dentry[i]);
1088 CERROR("can't lookup/create O/%d/%s: rc = %d\n",
1093 CDEBUG(D_INODE, "got/created O/%d/%s: %p\n", group, dir,
1094 tmp_subdirs->dentry[i]);
1098 /* 'group' is an index; we need an array of length 'group + 1' */
1099 if (group + 1 > old_count) {
1100 OBD_ALLOC(new_objids, len * sizeof(*new_objids));
1101 OBD_ALLOC(new_subdirs, len * sizeof(*new_subdirs));
1102 OBD_ALLOC(new_groups, len * sizeof(*new_groups));
1103 OBD_ALLOC(new_files, len * sizeof(*new_files));
1105 if (new_objids == NULL || new_subdirs == NULL ||
1106 new_groups == NULL || new_files == NULL)
1107 GOTO(cleanup, rc = -ENOMEM);
1110 memcpy(new_objids, filter->fo_last_objids,
1111 old_count * sizeof(*new_objids));
1112 memcpy(new_subdirs, filter->fo_dentry_O_sub,
1113 old_count * sizeof(*new_subdirs));
1114 memcpy(new_groups, filter->fo_dentry_O_groups,
1115 old_count * sizeof(*new_groups));
1116 memcpy(new_files, filter->fo_last_objid_files,
1117 old_count * sizeof(*new_files));
1119 OBD_FREE(filter->fo_last_objids,
1120 old_count * sizeof(*new_objids));
1121 OBD_FREE(filter->fo_dentry_O_sub,
1122 old_count * sizeof(*new_subdirs));
1123 OBD_FREE(filter->fo_dentry_O_groups,
1124 old_count * sizeof(*new_groups));
1125 OBD_FREE(filter->fo_last_objid_files,
1126 old_count * sizeof(*new_files));
1128 filter->fo_last_objids = new_objids;
1129 filter->fo_dentry_O_sub = new_subdirs;
1130 filter->fo_dentry_O_groups = new_groups;
1131 filter->fo_last_objid_files = new_files;
1132 filter->fo_group_count = len;
1135 filter->fo_dentry_O_groups[group] = dentry;
1136 filter->fo_last_objid_files[group] = filp;
1137 if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
1138 filter->fo_dentry_O_sub[group] = *tmp_subdirs;
1139 OBD_FREE(tmp_subdirs, sizeof(*tmp_subdirs));
1142 filter_update_last_group(obd, group);
1144 if (i_size_read(filp->f_dentry->d_inode) == 0) {
1145 filter->fo_last_objids[group] = FILTER_INIT_OBJID;
1146 rc = filter_update_last_objid(obd, group, 1);
1150 filter->fo_last_objids[group] = le64_to_cpu(last_objid);
1151 CDEBUG(D_INODE, "%s: server last_objid group %d: "LPU64"\n",
1152 obd->obd_name, group, last_objid);
1157 if (new_objids != NULL)
1158 OBD_FREE(new_objids, len * sizeof(*new_objids));
1159 if (new_subdirs != NULL)
1160 OBD_FREE(new_subdirs, len * sizeof(*new_subdirs));
1161 if (new_groups != NULL)
1162 OBD_FREE(new_groups, len * sizeof(*new_groups));
1163 if (new_files != NULL)
1164 OBD_FREE(new_files, len * sizeof(*new_files));
1166 if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
1167 for (i = 0; i < filter->fo_subdir_count; i++) {
1168 if (tmp_subdirs->dentry[i] != NULL)
1169 dput(tmp_subdirs->dentry[i]);
1171 OBD_FREE(tmp_subdirs, sizeof(*tmp_subdirs));
1174 filp_close(filp, 0);
1181 static int filter_read_groups(struct obd_device *obd, int last_group,
1184 struct filter_obd *filter = &obd->u.filter;
1185 int old_count, group, rc = 0;
1187 cfs_down(&filter->fo_init_lock);
1188 old_count = filter->fo_group_count;
1189 for (group = old_count; group <= last_group; group++) {
1190 rc = filter_read_group_internal(obd, group, create);
1194 cfs_up(&filter->fo_init_lock);
1198 /* FIXME: object groups */
1199 static int filter_prep_groups(struct obd_device *obd)
1201 struct filter_obd *filter = &obd->u.filter;
1202 struct dentry *O_dentry;
1204 int last_group, rc = 0, cleanup_phase = 0;
1208 O_dentry = simple_mkdir(cfs_fs_pwd(current->fs), obd->u.obt.obt_vfsmnt,
1210 CDEBUG(D_INODE, "got/created O: %p\n", O_dentry);
1211 if (IS_ERR(O_dentry)) {
1212 rc = PTR_ERR(O_dentry);
1213 CERROR("cannot open/create O: rc = %d\n", rc);
1216 filter->fo_dentry_O = O_dentry;
1217 cleanup_phase = 1; /* O_dentry */
1219 /* we have to initialize all groups before first connections from
1220 * clients because they may send create/destroy for any group -bzzz */
1221 filp = filp_open("LAST_GROUP", O_CREAT | O_RDWR, 0700);
1223 CERROR("cannot create LAST_GROUP: rc = %ld\n", PTR_ERR(filp));
1224 GOTO(cleanup, rc = PTR_ERR(filp));
1226 cleanup_phase = 2; /* filp */
1228 rc = fsfilt_read_record(obd, filp, &last_group, sizeof(__u32), &off);
1230 CDEBUG(D_INODE, "error reading LAST_GROUP: rc %d\n", rc);
1235 last_group = FID_SEQ_OST_MDT0;
1237 CWARN("%s: initialize groups [%d,%d]\n", obd->obd_name,
1238 FID_SEQ_OST_MDT0, last_group);
1239 filter->fo_committed_group = last_group;
1240 rc = filter_read_groups(obd, last_group, 1);
1244 filp_close(filp, 0);
1248 switch (cleanup_phase) {
1250 filp_close(filp, 0);
1252 filter_cleanup_groups(obd);
1253 f_dput(filter->fo_dentry_O);
1254 filter->fo_dentry_O = NULL;
1262 /* setup the object store with correct subdirectories */
1263 static int filter_prep(struct obd_device *obd)
1265 struct lvfs_run_ctxt saved;
1266 struct filter_obd *filter = &obd->u.filter;
1268 struct inode *inode;
1272 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1273 file = filp_open(LAST_RCVD, O_RDWR | O_CREAT | O_LARGEFILE, 0700);
1274 if (!file || IS_ERR(file)) {
1276 CERROR("OBD filter: cannot open/create %s: rc = %d\n",
1280 obd->u.obt.obt_rcvd_filp = file;
1281 if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1282 CERROR("%s is not a regular file!: mode = %o\n", LAST_RCVD,
1283 file->f_dentry->d_inode->i_mode);
1284 GOTO(err_filp, rc = -ENOENT);
1287 inode = file->f_dentry->d_parent->d_inode;
1288 /* We use i_op->unlink directly in filter_vfs_unlink() */
1289 if (!inode->i_op || !inode->i_op->create || !inode->i_op->unlink) {
1290 CERROR("%s: filesystem does not support create/unlink ops\n",
1292 GOTO(err_filp, rc = -EOPNOTSUPP);
1295 rc = filter_init_server_data(obd, file);
1297 CERROR("cannot read %s: rc = %d\n", LAST_RCVD, rc);
1300 LASSERT(obd->u.obt.obt_lut);
1301 target_recovery_init(obd->u.obt.obt_lut, ost_handle);
1303 /* open and create health check io file*/
1304 file = filp_open(HEALTH_CHECK, O_RDWR | O_CREAT, 0644);
1307 CERROR("OBD filter: cannot open/create %s rc = %d\n",
1309 GOTO(err_server_data, rc);
1311 filter->fo_obt.obt_health_check_filp = file;
1312 if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1313 CERROR("%s is not a regular file!: mode = %o\n", HEALTH_CHECK,
1314 file->f_dentry->d_inode->i_mode);
1315 GOTO(err_health_check, rc = -ENOENT);
1317 rc = lvfs_check_io_health(obd, file);
1319 GOTO(err_health_check, rc);
1321 rc = filter_prep_groups(obd);
1323 GOTO(err_health_check, rc);
1325 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1330 if (filp_close(filter->fo_obt.obt_health_check_filp, 0))
1331 CERROR("can't close %s after error\n", HEALTH_CHECK);
1332 filter->fo_obt.obt_health_check_filp = NULL;
1334 target_recovery_fini(obd);
1335 filter_free_server_data(&obd->u.obt);
1337 if (filp_close(obd->u.obt.obt_rcvd_filp, 0))
1338 CERROR("can't close %s after error\n", LAST_RCVD);
1339 obd->u.obt.obt_rcvd_filp = NULL;
1343 /* cleanup the filter: write last used object id to status file */
1344 static void filter_post(struct obd_device *obd)
1346 struct lvfs_run_ctxt saved;
1347 struct filter_obd *filter = &obd->u.filter;
1350 /* XXX: filter_update_lastobjid used to call fsync_dev. It might be
1351 * best to start a transaction with h_sync, because we removed this
1354 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1355 rc = filter_update_server_data(obd);
1357 CERROR("error writing server data: rc = %d\n", rc);
1359 for (i = 0; i < filter->fo_group_count; i++) {
1360 rc = filter_update_last_objid(obd, i,
1361 (i == filter->fo_group_count - 1));
1363 CERROR("error writing group %d lastobjid: rc = %d\n",
1367 rc = filp_close(obd->u.obt.obt_rcvd_filp, 0);
1368 obd->u.obt.obt_rcvd_filp = NULL;
1370 CERROR("error closing %s: rc = %d\n", LAST_RCVD, rc);
1372 rc = filp_close(filter->fo_obt.obt_health_check_filp, 0);
1373 filter->fo_obt.obt_health_check_filp = NULL;
1375 CERROR("error closing %s: rc = %d\n", HEALTH_CHECK, rc);
1377 filter_cleanup_groups(obd);
1378 filter_free_server_data(&obd->u.obt);
1379 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1381 filter_free_capa_keys(filter);
1382 cleanup_capa_hash(filter->fo_capa_hash);
1385 static void filter_set_last_id(struct filter_obd *filter,
1386 obd_id id, obd_seq group)
1388 LASSERT(group <= filter->fo_group_count);
1390 cfs_spin_lock(&filter->fo_objidlock);
1391 filter->fo_last_objids[group] = id;
1392 cfs_spin_unlock(&filter->fo_objidlock);
1395 obd_id filter_last_id(struct filter_obd *filter, obd_seq group)
1398 LASSERT(group <= filter->fo_group_count);
1399 LASSERT(filter->fo_last_objids != NULL);
1401 /* FIXME: object groups */
1402 cfs_spin_lock(&filter->fo_objidlock);
1403 id = filter->fo_last_objids[group];
1404 cfs_spin_unlock(&filter->fo_objidlock);
1408 static int filter_lock_dentry(struct obd_device *obd, struct dentry *dparent)
1410 LOCK_INODE_MUTEX_PARENT(dparent->d_inode);
1414 /* We never dget the object parent, so DON'T dput it either */
1415 struct dentry *filter_parent(struct obd_device *obd, obd_seq group, obd_id objid)
1417 struct filter_obd *filter = &obd->u.filter;
1418 struct filter_subdirs *subdirs;
1420 if (group >= filter->fo_group_count) /* FIXME: object groups */
1421 return ERR_PTR(-EBADF);
1423 if (!fid_seq_is_mdt(group) || filter->fo_subdir_count == 0)
1424 return filter->fo_dentry_O_groups[group];
1426 subdirs = &filter->fo_dentry_O_sub[group];
1427 return subdirs->dentry[objid & (filter->fo_subdir_count - 1)];
1430 /* We never dget the object parent, so DON'T dput it either */
1431 struct dentry *filter_parent_lock(struct obd_device *obd, obd_seq group,
1434 unsigned long now = jiffies;
1435 struct dentry *dparent = filter_parent(obd, group, objid);
1438 if (IS_ERR(dparent))
1440 if (dparent == NULL)
1441 return ERR_PTR(-ENOENT);
1443 rc = filter_lock_dentry(obd, dparent);
1444 fsfilt_check_slow(obd, now, "parent lock");
1445 return rc ? ERR_PTR(rc) : dparent;
1448 /* We never dget the object parent, so DON'T dput it either */
1449 static void filter_parent_unlock(struct dentry *dparent)
1451 UNLOCK_INODE_MUTEX(dparent->d_inode);
1454 /* How to get files, dentries, inodes from object id's.
1456 * If dir_dentry is passed, the caller has already locked the parent
1457 * appropriately for this operation (normally a write lock). If
1458 * dir_dentry is NULL, we do a read lock while we do the lookup to
1459 * avoid races with create/destroy and such changing the directory
1460 * internal to the filesystem code. */
1461 struct dentry *filter_fid2dentry(struct obd_device *obd,
1462 struct dentry *dir_dentry,
1463 obd_seq group, obd_id id)
1465 struct dentry *dparent = dir_dentry;
1466 struct dentry *dchild;
1471 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT) &&
1472 obd->u.filter.fo_destroys_in_progress == 0) {
1473 /* don't fail lookups for orphan recovery, it causes
1474 * later LBUGs when objects still exist during precreate */
1475 CDEBUG(D_INFO, "*** obd_fail_loc=%x ***\n",OBD_FAIL_OST_ENOENT);
1476 RETURN(ERR_PTR(-ENOENT));
1479 CERROR("fatal: invalid object id 0\n");
1480 RETURN(ERR_PTR(-ESTALE));
1483 len = sprintf(name, LPU64, id);
1484 if (dir_dentry == NULL) {
1485 dparent = filter_parent_lock(obd, group, id);
1486 if (IS_ERR(dparent)) {
1487 CERROR("%s: error getting object "POSTID
1488 " parent: rc %ld\n", obd->obd_name,
1489 id, group, PTR_ERR(dparent));
1493 CDEBUG(D_INODE, "looking up object O/%.*s/%s\n",
1494 dparent->d_name.len, dparent->d_name.name, name);
1495 /* dparent is already locked here, so we cannot use ll_lookup_one_len() */
1496 dchild = lookup_one_len(name, dparent, len);
1497 if (dir_dentry == NULL)
1498 filter_parent_unlock(dparent);
1499 if (IS_ERR(dchild)) {
1500 CERROR("%s: object "LPU64":"LPU64" lookup error: rc %ld\n",
1501 obd->obd_name, id, group, PTR_ERR(dchild));
1505 if (dchild->d_inode != NULL && is_bad_inode(dchild->d_inode)) {
1506 CERROR("%s: got bad object "LPU64" inode %lu\n",
1507 obd->obd_name, id, dchild->d_inode->i_ino);
1509 RETURN(ERR_PTR(-ENOENT));
1512 CDEBUG(D_INODE, "got child objid %s: %p, count = %d\n",
1513 name, dchild, atomic_read(&dchild->d_count));
1515 LASSERT(atomic_read(&dchild->d_count) > 0);
1520 static int filter_prepare_destroy(struct obd_device *obd, obd_id objid,
1521 obd_id group, struct lustre_handle *lockh)
1523 int flags = LDLM_AST_DISCARD_DATA, rc;
1524 struct ldlm_res_id res_id;
1525 ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
1528 osc_build_res_name(objid, group, &res_id);
1529 /* Tell the clients that the object is gone now and that they should
1530 * throw away any cached pages. */
1531 rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id, LDLM_EXTENT,
1532 &policy, LCK_PW, &flags, ldlm_blocking_ast,
1533 ldlm_completion_ast, NULL, NULL, 0, NULL,
1540 static void filter_fini_destroy(struct obd_device *obd,
1541 struct lustre_handle *lockh)
1543 if (lustre_handle_is_used(lockh))
1544 ldlm_lock_decref(lockh, LCK_PW);
1547 /* This is vfs_unlink() without down(i_sem). If we call regular vfs_unlink()
1548 * we have 2.6 lock ordering issues with filter_commitrw_write() as it takes
1549 * i_sem before starting a handle, while filter_destroy() + vfs_unlink do the
1550 * reverse. Caller must take i_sem before starting the transaction and we
1551 * drop it here before the inode is removed from the dentry. bug 4180/6984 */
1552 int filter_vfs_unlink(struct inode *dir, struct dentry *dentry,
1553 struct vfsmount *mnt)
1558 /* don't need dir->i_zombie for 2.4, it is for rename/unlink of dir
1559 * itself we already hold dir->i_mutex for child create/unlink ops */
1560 LASSERT(dentry->d_inode != NULL);
1561 LASSERT(TRYLOCK_INODE_MUTEX(dir) == 0);
1562 LASSERT(TRYLOCK_INODE_MUTEX(dentry->d_inode) == 0);
1566 if (/*!dentry->d_inode ||*/dentry->d_parent->d_inode != dir)
1567 GOTO(out, rc = -ENOENT);
1569 rc = ll_permission(dir, MAY_WRITE | MAY_EXEC, NULL);
1574 GOTO(out, rc = -EPERM);
1576 /* check_sticky() */
1577 if ((dentry->d_inode->i_uid != cfs_curproc_fsuid() &&
1578 !cfs_capable(CFS_CAP_FOWNER)) || IS_APPEND(dentry->d_inode) ||
1579 IS_IMMUTABLE(dentry->d_inode))
1580 GOTO(out, rc = -EPERM);
1582 /* NOTE: This might need to go outside i_mutex, though it isn't clear if
1583 * that was done because of journal_start (which is already done
1584 * here) or some other ordering issue. */
1585 ll_vfs_dq_init(dir);
1587 rc = ll_security_inode_unlink(dir, dentry, mnt);
1591 rc = dir->i_op->unlink(dir, dentry);
1593 /* need to drop i_mutex before we lose inode reference */
1594 UNLOCK_INODE_MUTEX(dentry->d_inode);
1601 /* Caller must hold LCK_PW on parent and push us into kernel context.
1602 * Caller must hold child i_mutex, we drop it always.
1603 * Caller is also required to ensure that dchild->d_inode exists. */
1604 static int filter_destroy_internal(struct obd_device *obd, obd_id objid,
1605 obd_seq group, struct dentry *dparent,
1606 struct dentry *dchild)
1608 struct inode *inode = dchild->d_inode;
1611 /* There should be 2 references to the inode:
1612 * 1) taken by filter_prepare_destroy
1613 * 2) taken by filter_destroy */
1614 if (inode->i_nlink != 1 || atomic_read(&inode->i_count) != 2) {
1615 CERROR("destroying objid %.*s ino %lu nlink %lu count %d\n",
1616 dchild->d_name.len, dchild->d_name.name, inode->i_ino,
1617 (unsigned long)inode->i_nlink,
1618 atomic_read(&inode->i_count));
1621 rc = filter_vfs_unlink(dparent->d_inode, dchild, obd->u.obt.obt_vfsmnt);
1623 CERROR("error unlinking objid %.*s: rc %d\n",
1624 dchild->d_name.len, dchild->d_name.name, rc);
1628 struct filter_intent_args {
1629 struct ldlm_lock **victim;
1634 static enum interval_iter filter_intent_cb(struct interval_node *n,
1637 struct ldlm_interval *node = (struct ldlm_interval *)n;
1638 struct filter_intent_args *arg = (struct filter_intent_args*)args;
1639 __u64 size = arg->size;
1640 struct ldlm_lock **v = arg->victim;
1641 struct ldlm_lock *lck;
1643 /* If the interval is lower than the current file size,
1645 if (interval_high(n) <= size)
1646 return INTERVAL_ITER_STOP;
1648 cfs_list_for_each_entry(lck, &node->li_group, l_sl_policy) {
1649 /* Don't send glimpse ASTs to liblustre clients.
1650 * They aren't listening for them, and they do
1651 * entirely synchronous I/O anyways. */
1652 if (lck->l_export == NULL ||
1653 lck->l_export->exp_libclient == 1)
1656 if (*arg->liblustre)
1657 *arg->liblustre = 0;
1660 *v = LDLM_LOCK_GET(lck);
1661 } else if ((*v)->l_policy_data.l_extent.start <
1662 lck->l_policy_data.l_extent.start) {
1663 LDLM_LOCK_RELEASE(*v);
1664 *v = LDLM_LOCK_GET(lck);
1667 /* the same policy group - every lock has the
1668 * same extent, so needn't do it any more */
1672 return INTERVAL_ITER_CONT;
1675 static int filter_intent_policy(struct ldlm_namespace *ns,
1676 struct ldlm_lock **lockp, void *req_cookie,
1677 ldlm_mode_t mode, int flags, void *data)
1679 CFS_LIST_HEAD(rpc_list);
1680 struct ptlrpc_request *req = req_cookie;
1681 struct ldlm_lock *lock = *lockp, *l = NULL;
1682 struct ldlm_resource *res = lock->l_resource;
1683 ldlm_processing_policy policy;
1684 struct ost_lvb *res_lvb, *reply_lvb;
1685 struct ldlm_reply *rep;
1687 int idx, rc, tmpflags = 0, only_liblustre = 1;
1688 struct ldlm_interval_tree *tree;
1689 struct filter_intent_args arg;
1690 __u32 repsize[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
1691 [DLM_LOCKREPLY_OFF] = sizeof(*rep),
1692 [DLM_REPLY_REC_OFF] = sizeof(*reply_lvb) };
1695 policy = ldlm_get_processing_policy(res);
1696 LASSERT(policy != NULL);
1697 LASSERT(req != NULL);
1699 rc = lustre_pack_reply(req, 3, repsize, NULL);
1701 RETURN(req->rq_status = rc);
1703 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF, sizeof(*rep));
1704 LASSERT(rep != NULL);
1706 reply_lvb = lustre_msg_buf(req->rq_repmsg, DLM_REPLY_REC_OFF,
1707 sizeof(*reply_lvb));
1708 LASSERT(reply_lvb != NULL);
1710 //fixup_handle_for_resent_req(req, lock, &lockh);
1712 /* Call the extent policy function to see if our request can be
1713 * granted, or is blocked.
1714 * If the OST lock has LDLM_FL_HAS_INTENT set, it means a glimpse
1715 * lock, and should not be granted if the lock will be blocked.
1718 LASSERT(ns == ldlm_res_to_ns(res));
1720 rc = policy(lock, &tmpflags, 0, &err, &rpc_list);
1721 check_res_locked(res);
1723 /* FIXME: we should change the policy function slightly, to not make
1724 * this list at all, since we just turn around and free it */
1725 while (!cfs_list_empty(&rpc_list)) {
1726 struct ldlm_lock *wlock =
1727 cfs_list_entry(rpc_list.next, struct ldlm_lock,
1729 LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
1730 LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
1731 lock->l_flags &= ~LDLM_FL_CP_REQD;
1732 cfs_list_del_init(&wlock->l_cp_ast);
1733 LDLM_LOCK_RELEASE(wlock);
1736 /* The lock met with no resistance; we're finished. */
1737 if (rc == LDLM_ITER_CONTINUE) {
1738 /* do not grant locks to the liblustre clients: they cannot
1739 * handle ASTs robustly. We need to do this while still
1740 * holding lr_lock to avoid the lock remaining on the res_link
1741 * list (and potentially being added to l_pending_list by an
1742 * AST) when we are going to drop this lock ASAP. */
1743 if (lock->l_export->exp_libclient ||
1744 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2)) {
1745 ldlm_resource_unlink_lock(lock);
1746 err = ELDLM_LOCK_ABORTED;
1748 err = ELDLM_LOCK_REPLACED;
1754 /* Do not grant any lock, but instead send GL callbacks. The extent
1755 * policy nicely created a list of all PW locks for us. We will choose
1756 * the highest of those which are larger than the size in the LVB, if
1757 * any, and perform a glimpse callback. */
1758 res_lvb = res->lr_lvb_data;
1759 LASSERT(res_lvb != NULL);
1760 *reply_lvb = *res_lvb;
1763 * lr_lock guarantees that no new locks are granted, and,
1764 * therefore, that res->lr_lvb_data cannot increase beyond the
1765 * end of already granted lock. As a result, it is safe to
1766 * check against "stale" reply_lvb->lvb_size value without
1769 arg.size = reply_lvb->lvb_size;
1771 arg.liblustre = &only_liblustre;
1772 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1773 tree = &res->lr_itree[idx];
1774 if (tree->lit_mode == LCK_PR)
1777 interval_iterate_reverse(tree->lit_root,
1778 filter_intent_cb, &arg);
1782 /* There were no PW locks beyond the size in the LVB; finished. */
1784 if (only_liblustre) {
1785 /* If we discovered a liblustre client with a PW lock,
1786 * however, the LVB may be out of date! The LVB is
1787 * updated only on glimpse (which we don't do for
1788 * liblustre clients) and cancel (which the client
1789 * obviously has not yet done). So if it has written
1790 * data but kept the lock, the LVB is stale and needs
1791 * to be updated from disk.
1793 * Of course, this will all disappear when we switch to
1794 * taking liblustre locks on the OST. */
1795 ldlm_res_lvbo_update(res, NULL, 1);
1797 RETURN(ELDLM_LOCK_ABORTED);
1801 * This check is for lock taken in filter_prepare_destroy() that does
1802 * not have l_glimpse_ast set. So the logic is: if there is a lock
1803 * with no l_glimpse_ast set, this object is being destroyed already.
1805 * Hence, if you are grabbing DLM locks on the server, always set
1806 * non-NULL glimpse_ast (e.g., ldlm_request.c:ldlm_glimpse_ast()).
1808 if (l->l_glimpse_ast == NULL) {
1809 /* We are racing with unlink(); just return -ENOENT */
1810 rep->lock_policy_res1 = -ENOENT;
1814 LASSERTF(l->l_glimpse_ast != NULL, "l == %p", l);
1815 rc = l->l_glimpse_ast(l, NULL); /* this will update the LVB */
1818 *reply_lvb = *res_lvb;
1822 LDLM_LOCK_RELEASE(l);
1824 RETURN(ELDLM_LOCK_ABORTED);
1828 * per-obd_device iobuf pool.
1830 * To avoid memory deadlocks in low-memory setups, amount of dynamic
1831 * allocations in write-path has to be minimized (see bug 5137).
1833 * Pages, niobuf_local's and niobuf_remote's are pre-allocated and attached to
1834 * OST threads (see ost_thread_{init,done}()).
1836 * "iobuf's" used by filter cannot be attached to OST thread, however, because
1837 * at the OST layer there are only (potentially) multiple obd_device of type
1838 * unknown at the time of OST thread creation.
1840 * Instead array of iobuf's is attached to struct filter_obd (->fo_iobuf_pool
1841 * field). This array has size OST_MAX_THREADS, so that each OST thread uses
1842 * it's very own iobuf.
1846 * filter_kiobuf_pool_init()
1848 * filter_kiobuf_pool_done()
1850 * filter_iobuf_get()
1852 * operate on this array. They are "generic" in a sense that they don't depend
1853 * on actual type of iobuf's (the latter depending on Linux kernel version).
1857 * destroy pool created by filter_iobuf_pool_init
1859 static void filter_iobuf_pool_done(struct filter_obd *filter)
1861 struct filter_iobuf **pool;
1866 pool = filter->fo_iobuf_pool;
1868 for (i = 0; i < filter->fo_iobuf_count; ++ i) {
1869 if (pool[i] != NULL)
1870 filter_free_iobuf(pool[i]);
1872 OBD_FREE(pool, filter->fo_iobuf_count * sizeof pool[0]);
1873 filter->fo_iobuf_pool = NULL;
1878 static int filter_adapt_sptlrpc_conf(struct obd_device *obd, int initial)
1880 struct filter_obd *filter = &obd->u.filter;
1881 struct sptlrpc_rule_set tmp_rset;
1884 sptlrpc_rule_set_init(&tmp_rset);
1885 rc = sptlrpc_conf_target_get_rules(obd, &tmp_rset, initial);
1887 CERROR("obd %s: failed get sptlrpc rules: %d\n",
1892 sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
1894 cfs_write_lock(&filter->fo_sptlrpc_lock);
1895 sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
1896 filter->fo_sptlrpc_rset = tmp_rset;
1897 cfs_write_unlock(&filter->fo_sptlrpc_lock);
1903 * pre-allocate pool of iobuf's to be used by filter_{prep,commit}rw_write().
1905 static int filter_iobuf_pool_init(struct filter_obd *filter)
1912 OBD_ALLOC_GFP(filter->fo_iobuf_pool, OSS_THREADS_MAX * sizeof(*pool),
1914 if (filter->fo_iobuf_pool == NULL)
1917 filter->fo_iobuf_count = OSS_THREADS_MAX;
1922 /* Return iobuf allocated for @thread_id. We don't know in advance how
1923 * many threads there will be so we allocate a large empty array and only
1924 * fill in those slots that are actually in use.
1925 * If we haven't allocated a pool entry for this thread before, do so now. */
1926 void *filter_iobuf_get(struct filter_obd *filter, struct obd_trans_info *oti)
1928 int thread_id = (oti && oti->oti_thread) ?
1929 oti->oti_thread->t_id : -1;
1930 struct filter_iobuf *pool = NULL;
1931 struct filter_iobuf **pool_place = NULL;
1933 if (thread_id >= 0) {
1934 LASSERT(thread_id < filter->fo_iobuf_count);
1935 pool = *(pool_place = &filter->fo_iobuf_pool[thread_id]);
1938 if (unlikely(pool == NULL)) {
1939 pool = filter_alloc_iobuf(filter, OBD_BRW_WRITE,
1940 PTLRPC_MAX_BRW_PAGES);
1941 if (pool_place != NULL)
1948 /* mount the file system (secretly). lustre_cfg parameters are:
1951 * 3 = flags: failover=f, failout=n
1954 int filter_common_setup(struct obd_device *obd, struct lustre_cfg* lcfg,
1957 struct filter_obd *filter = &obd->u.filter;
1958 struct vfsmount *mnt;
1959 struct lustre_mount_info *lmi;
1960 struct obd_uuid uuid;
1964 struct request_queue *q;
1968 if (lcfg->lcfg_bufcount < 3 ||
1969 LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
1970 LUSTRE_CFG_BUFLEN(lcfg, 2) < 1)
1973 lmi = server_get_mount(obd->obd_name);
1975 /* We already mounted in lustre_fill_super.
1976 lcfg bufs 1, 2, 4 (device, fstype, mount opts) are ignored.*/
1977 struct lustre_sb_info *lsi = s2lsi(lmi->lmi_sb);
1979 obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
1981 /* gets recovery timeouts from mount data */
1982 if (lsi->lsi_lmd && lsi->lsi_lmd->lmd_recovery_time_soft)
1983 obd->obd_recovery_timeout =
1984 lsi->lsi_lmd->lmd_recovery_time_soft;
1985 if (lsi->lsi_lmd && lsi->lsi_lmd->lmd_recovery_time_hard)
1986 obd->obd_recovery_time_hard =
1987 lsi->lsi_lmd->lmd_recovery_time_hard;
1989 /* old path - used by lctl */
1990 CERROR("Using old MDS mount method\n");
1991 mnt = ll_kern_mount(lustre_cfg_string(lcfg, 2),
1992 MS_NOATIME|MS_NODIRATIME,
1993 lustre_cfg_string(lcfg, 1), option);
1996 LCONSOLE_ERROR_MSG(0x135, "Can't mount disk %s (%d)\n",
1997 lustre_cfg_string(lcfg, 1), rc);
2001 obd->obd_fsops = fsfilt_get_ops(lustre_cfg_string(lcfg, 2));
2003 if (IS_ERR(obd->obd_fsops))
2004 GOTO(err_mntput, rc = PTR_ERR(obd->obd_fsops));
2006 rc = filter_iobuf_pool_init(filter);
2010 if (lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb))) {
2011 CERROR("%s: Underlying device is marked as read-only. "
2012 "Setup failed\n", obd->obd_name);
2013 GOTO(err_ops, rc = -EROFS);
2016 /* failover is the default */
2017 obd->obd_replayable = 1;
2019 if (lcfg->lcfg_bufcount > 3 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
2020 str = lustre_cfg_string(lcfg, 3);
2021 if (strchr(str, 'n')) {
2022 CWARN("%s: recovery disabled\n", obd->obd_name);
2023 obd->obd_replayable = 0;
2027 obd->u.obt.obt_vfsmnt = mnt;
2028 obd->u.obt.obt_sb = mnt->mnt_sb;
2029 obd->u.obt.obt_magic = OBT_MAGIC;
2030 filter->fo_fstype = mnt->mnt_sb->s_type->name;
2031 CDEBUG(D_SUPER, "%s: mnt = %p\n", filter->fo_fstype, mnt);
2033 fsfilt_setup(obd, obd->u.obt.obt_sb);
2035 OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
2036 obd->obd_lvfs_ctxt.pwdmnt = mnt;
2037 obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
2038 obd->obd_lvfs_ctxt.fs = get_ds();
2039 obd->obd_lvfs_ctxt.cb_ops = filter_lvfs_ops;
2041 cfs_init_mutex(&filter->fo_init_lock);
2042 filter->fo_committed_group = 0;
2043 filter->fo_destroys_in_progress = 0;
2044 for (i = 0; i < 32; i++)
2045 cfs_sema_init(&filter->fo_create_locks[i], 1);
2047 cfs_spin_lock_init(&filter->fo_objidlock);
2048 CFS_INIT_LIST_HEAD(&filter->fo_export_list);
2049 cfs_sema_init(&filter->fo_alloc_lock, 1);
2050 init_brw_stats(&filter->fo_filter_stats);
2051 cfs_spin_lock_init(&filter->fo_flags_lock);
2052 filter->fo_read_cache = 1; /* enable read-only cache by default */
2053 filter->fo_writethrough_cache = 1; /* enable writethrough cache */
2054 filter->fo_readcache_max_filesize = FILTER_MAX_CACHE_SIZE;
2055 filter->fo_fmd_max_num = FILTER_FMD_MAX_NUM_DEFAULT;
2056 filter->fo_fmd_max_age = FILTER_FMD_MAX_AGE_DEFAULT;
2057 filter->fo_syncjournal = 0; /* Don't sync journals on i/o by default */
2058 filter_slc_set(filter); /* initialize sync on lock cancel */
2060 rc = filter_prep(obd);
2064 CFS_INIT_LIST_HEAD(&filter->fo_llog_list);
2065 cfs_spin_lock_init(&filter->fo_llog_list_lock);
2067 filter->fo_fl_oss_capa = 1;
2069 CFS_INIT_LIST_HEAD(&filter->fo_capa_keys);
2070 filter->fo_capa_hash = init_capa_hash();
2071 if (filter->fo_capa_hash == NULL)
2072 GOTO(err_post, rc = -ENOMEM);
2074 sprintf(ns_name, "filter-%s", obd->obd_uuid.uuid);
2075 obd->obd_namespace = ldlm_namespace_new(obd, ns_name,
2076 LDLM_NAMESPACE_SERVER,
2077 LDLM_NAMESPACE_GREEDY,
2079 if (obd->obd_namespace == NULL)
2080 GOTO(err_post, rc = -ENOMEM);
2081 obd->obd_namespace->ns_lvbp = obd;
2082 obd->obd_namespace->ns_lvbo = &filter_lvbo;
2083 ldlm_register_intent(obd->obd_namespace, filter_intent_policy);
2085 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
2086 "filter_ldlm_cb_client", &obd->obd_ldlm_client);
2088 rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
2090 CERROR("failed to setup llogging subsystems\n");
2094 cfs_rwlock_init(&filter->fo_sptlrpc_lock);
2095 sptlrpc_rule_set_init(&filter->fo_sptlrpc_rset);
2096 /* do this after llog being initialized */
2097 filter_adapt_sptlrpc_conf(obd, 1);
2099 rc = lquota_setup(filter_quota_interface_ref, obd);
2103 q = bdev_get_queue(mnt->mnt_sb->s_bdev);
2104 if (queue_max_sectors(q) < queue_max_hw_sectors(q) &&
2105 queue_max_sectors(q) < PTLRPC_MAX_BRW_SIZE >> 9)
2106 LCONSOLE_INFO("%s: underlying device %s should be tuned "
2107 "for larger I/O requests: max_sectors = %u "
2108 "could be up to max_hw_sectors=%u\n",
2109 obd->obd_name, mnt->mnt_sb->s_id,
2110 queue_max_sectors(q), queue_max_hw_sectors(q));
2112 uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
2113 if (uuid_ptr != NULL) {
2114 class_uuid_unparse(uuid_ptr, &uuid);
2120 label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
2121 LCONSOLE_INFO("%s: Now serving %s %s%s with recovery %s\n",
2122 obd->obd_name, label ?: str, lmi ? "on " : "",
2123 lmi ? s2lsi(lmi->lmi_sb)->lsi_lmd->lmd_dev : "",
2124 obd->obd_replayable ? "enabled" : "disabled");
2126 if (obd->obd_recovering)
2127 LCONSOLE_WARN("%s: Will be in recovery for at least %d:%.02d, "
2128 "or until %d client%s reconnect%s\n",
2130 obd->obd_recovery_timeout / 60,
2131 obd->obd_recovery_timeout % 60,
2132 obd->obd_max_recoverable_clients,
2133 (obd->obd_max_recoverable_clients == 1) ? "" : "s",
2134 (obd->obd_max_recoverable_clients == 1) ? "s": "");
2142 fsfilt_put_ops(obd->obd_fsops);
2143 filter_iobuf_pool_done(filter);
2145 server_put_mount(obd->obd_name, mnt);
2146 obd->u.obt.obt_sb = 0;
2150 static int filter_setup(struct obd_device *obd, struct lustre_cfg* lcfg)
2152 struct lprocfs_static_vars lvars;
2157 CLASSERT(offsetof(struct obd_device, u.obt) ==
2158 offsetof(struct obd_device, u.filter.fo_obt));
2160 if (!LUSTRE_CFG_BUFLEN(lcfg, 1) || !LUSTRE_CFG_BUFLEN(lcfg, 2))
2163 /* 2.6.9 selinux wants a full option page for do_kern_mount (bug6471) */
2164 OBD_PAGE_ALLOC(page, CFS_ALLOC_STD);
2167 addr = (unsigned long)cfs_page_address(page);
2168 clear_page((void *)addr);
2170 /* lprocfs must be setup before the filter so state can be safely added
2171 * to /proc incrementally as the filter is setup */
2172 lprocfs_filter_init_vars(&lvars);
2173 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0 &&
2174 lprocfs_alloc_obd_stats(obd, LPROC_FILTER_LAST) == 0) {
2175 /* Init obdfilter private stats here */
2176 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_READ_BYTES,
2177 LPROCFS_CNTR_AVGMINMAX,
2178 "read_bytes", "bytes");
2179 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
2180 LPROCFS_CNTR_AVGMINMAX,
2181 "write_bytes", "bytes");
2182 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_GET_PAGE,
2183 LPROCFS_CNTR_AVGMINMAX|LPROCFS_CNTR_STDDEV,
2184 "get_page", "usec");
2185 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_NO_PAGE,
2186 LPROCFS_CNTR_AVGMINMAX,
2187 "get_page_failures", "num");
2188 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_ACCESS,
2189 LPROCFS_CNTR_AVGMINMAX,
2190 "cache_access", "pages");
2191 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_HIT,
2192 LPROCFS_CNTR_AVGMINMAX,
2193 "cache_hit", "pages");
2194 lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_MISS,
2195 LPROCFS_CNTR_AVGMINMAX,
2196 "cache_miss", "pages");
2198 lproc_filter_attach_seqstat(obd);
2199 obd->obd_proc_exports_entry = lprocfs_register("exports",
2200 obd->obd_proc_entry,
2202 if (IS_ERR(obd->obd_proc_exports_entry)) {
2203 rc = PTR_ERR(obd->obd_proc_exports_entry);
2204 CERROR("error %d setting up lprocfs for %s\n",
2206 obd->obd_proc_exports_entry = NULL;
2209 if (obd->obd_proc_exports_entry)
2210 lprocfs_add_simple(obd->obd_proc_exports_entry, "clear",
2211 lprocfs_nid_stats_clear_read,
2212 lprocfs_nid_stats_clear_write, obd, NULL);
2214 memcpy((void *)addr, lustre_cfg_buf(lcfg, 4),
2215 LUSTRE_CFG_BUFLEN(lcfg, 4));
2216 rc = filter_common_setup(obd, lcfg, (void *)addr);
2217 OBD_PAGE_FREE(page);
2220 lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
2221 lprocfs_free_per_client_stats(obd);
2222 lprocfs_free_obd_stats(obd);
2223 lprocfs_obd_cleanup(obd);
2229 static struct llog_operations filter_mds_ost_repl_logops;
2231 static struct llog_operations filter_size_orig_logops = {
2232 .lop_setup = llog_obd_origin_setup,
2233 .lop_cleanup = llog_obd_origin_cleanup,
2234 .lop_add = llog_obd_origin_add
2237 static int filter_olg_fini(struct obd_llog_group *olg)
2239 struct llog_ctxt *ctxt;
2240 int rc = 0, rc2 = 0;
2243 ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
2245 rc = llog_cleanup(ctxt);
2247 ctxt = llog_group_get_ctxt(olg, LLOG_SIZE_ORIG_CTXT);
2249 rc2 = llog_cleanup(ctxt);
2254 ctxt = llog_group_get_ctxt(olg, LLOG_CONFIG_ORIG_CTXT);
2256 rc2 = llog_cleanup(ctxt);
2265 filter_olg_init(struct obd_device *obd, struct obd_llog_group *olg,
2266 struct obd_device *tgt)
2271 rc = llog_setup(obd, olg, LLOG_MDS_OST_REPL_CTXT, tgt, 0, NULL,
2272 &filter_mds_ost_repl_logops);
2276 rc = llog_setup(obd, olg, LLOG_SIZE_ORIG_CTXT, tgt, 0, NULL,
2277 &filter_size_orig_logops);
2283 filter_olg_fini(olg);
2288 * Init the default olg, which is embeded in the obd_device, for filter.
2291 filter_default_olg_init(struct obd_device *obd, struct obd_llog_group *olg,
2292 struct obd_device *tgt)
2294 struct filter_obd *filter = &obd->u.filter;
2295 struct llog_ctxt *ctxt;
2299 filter->fo_lcm = llog_recov_thread_init(obd->obd_name);
2300 if (!filter->fo_lcm)
2303 filter_mds_ost_repl_logops = llog_client_ops;
2304 filter_mds_ost_repl_logops.lop_cancel = llog_obd_repl_cancel;
2305 filter_mds_ost_repl_logops.lop_connect = llog_obd_repl_connect;
2306 filter_mds_ost_repl_logops.lop_sync = llog_obd_repl_sync;
2308 rc = filter_olg_init(obd, olg, tgt);
2310 GOTO(cleanup_lcm, rc);
2312 rc = llog_setup(obd, olg, LLOG_CONFIG_ORIG_CTXT, tgt, 0, NULL,
2315 GOTO(cleanup_olg, rc);
2317 ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
2319 CERROR("Can't get ctxt for %p:%x\n", olg,
2320 LLOG_MDS_OST_REPL_CTXT);
2321 GOTO(cleanup_olg, rc = -ENODEV);
2323 ctxt->loc_lcm = lcm_get(filter->fo_lcm);
2324 ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
2325 llog_ctxt_put(ctxt);
2329 filter_olg_fini(olg);
2331 llog_recov_thread_fini(filter->fo_lcm, 1);
2332 filter->fo_lcm = NULL;
2337 filter_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
2338 struct obd_device *tgt, int *index)
2340 struct filter_obd *filter = &obd->u.filter;
2341 struct llog_ctxt *ctxt;
2345 LASSERT(olg != NULL);
2346 if (olg == &obd->obd_olg)
2347 return filter_default_olg_init(obd, olg, tgt);
2349 LASSERT(filter->fo_lcm != NULL);
2350 rc = filter_olg_init(obd, olg, tgt);
2353 ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
2355 CERROR("Can't get ctxt for %p:%x\n", olg,
2356 LLOG_MDS_OST_REPL_CTXT);
2357 filter_olg_fini(olg);
2360 ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
2361 ctxt->loc_lcm = lcm_get(filter->fo_lcm);
2362 llog_ctxt_put(ctxt);
2366 static int filter_llog_finish(struct obd_device *obd, int count)
2368 struct filter_obd *filter = &obd->u.filter;
2369 struct llog_ctxt *ctxt;
2372 ctxt = llog_group_get_ctxt(&obd->obd_olg, LLOG_MDS_OST_REPL_CTXT);
2375 * Make sure that no cached llcds left in recov_thread.
2376 * We actually do sync in disconnect time, but disconnect
2377 * may not come being marked rq_no_resend = 1.
2379 llog_sync(ctxt, NULL);
2382 * Balance class_import_get() in llog_receptor_accept().
2383 * This is safe to do, as llog is already synchronized
2384 * and its import may go.
2386 cfs_mutex_down(&ctxt->loc_sem);
2387 if (ctxt->loc_imp) {
2388 class_import_put(ctxt->loc_imp);
2389 ctxt->loc_imp = NULL;
2391 cfs_mutex_up(&ctxt->loc_sem);
2392 llog_ctxt_put(ctxt);
2395 if (filter->fo_lcm) {
2396 cfs_mutex_down(&ctxt->loc_sem);
2397 llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
2398 filter->fo_lcm = NULL;
2399 cfs_mutex_up(&ctxt->loc_sem);
2401 RETURN(filter_olg_fini(&obd->obd_olg));
2405 * Find the group llog according to group index in the llog group list.
2407 static struct obd_llog_group *
2408 filter_find_olg_internal(struct filter_obd *filter, int group)
2410 struct obd_llog_group *olg;
2412 LASSERT_SPIN_LOCKED(&filter->fo_llog_list_lock);
2413 cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
2414 if (olg->olg_seq == group)
2421 * Find the group llog according to group index on the filter
2423 struct obd_llog_group *filter_find_olg(struct obd_device *obd, int group)
2425 struct obd_llog_group *olg = NULL;
2426 struct filter_obd *filter;
2428 filter = &obd->u.filter;
2430 if (group == FID_SEQ_LLOG)
2431 RETURN(&obd->obd_olg);
2433 cfs_spin_lock(&filter->fo_llog_list_lock);
2434 olg = filter_find_olg_internal(filter, group);
2435 cfs_spin_unlock(&filter->fo_llog_list_lock);
2440 * Find the llog_group of the filter according to the group. If it can not
2441 * find, create the llog_group, which only happens when mds is being synced
2444 struct obd_llog_group *filter_find_create_olg(struct obd_device *obd, int group)
2446 struct obd_llog_group *olg = NULL;
2447 struct filter_obd *filter;
2450 filter = &obd->u.filter;
2452 if (group == FID_SEQ_LLOG)
2453 RETURN(&obd->obd_olg);
2455 cfs_spin_lock(&filter->fo_llog_list_lock);
2456 olg = filter_find_olg_internal(filter, group);
2458 if (olg->olg_initializing) {
2459 GOTO(out_unlock, olg = ERR_PTR(-EBUSY));
2461 GOTO(out_unlock, olg);
2466 GOTO(out_unlock, olg = ERR_PTR(-ENOMEM));
2468 llog_group_init(olg, group);
2469 cfs_list_add(&olg->olg_list, &filter->fo_llog_list);
2470 olg->olg_initializing = 1;
2471 cfs_spin_unlock(&filter->fo_llog_list_lock);
2473 rc = obd_llog_init(obd, olg, obd, NULL);
2475 cfs_spin_lock(&filter->fo_llog_list_lock);
2476 cfs_list_del(&olg->olg_list);
2477 cfs_spin_unlock(&filter->fo_llog_list_lock);
2479 GOTO(out, olg = ERR_PTR(-ENOMEM));
2481 cfs_spin_lock(&filter->fo_llog_list_lock);
2482 olg->olg_initializing = 0;
2483 cfs_spin_unlock(&filter->fo_llog_list_lock);
2484 CDEBUG(D_OTHER, "%s: new llog group %u (0x%p)\n",
2485 obd->obd_name, group, olg);
2490 cfs_spin_unlock(&filter->fo_llog_list_lock);
2494 static int filter_llog_connect(struct obd_export *exp,
2495 struct llogd_conn_body *body)
2497 struct obd_device *obd = exp->exp_obd;
2498 struct llog_ctxt *ctxt;
2499 struct obd_llog_group *olg;
2503 CDEBUG(D_OTHER, "%s: LLog connect for: "LPX64"/"LPX64":%x\n",
2504 obd->obd_name, body->lgdc_logid.lgl_oid,
2505 body->lgdc_logid.lgl_oseq, body->lgdc_logid.lgl_ogen);
2507 olg = filter_find_olg(obd, body->lgdc_logid.lgl_oseq);
2509 CERROR(" %s: can not find olg of group %d\n",
2510 obd->obd_name, (int)body->lgdc_logid.lgl_oseq);
2513 llog_group_set_export(olg, exp);
2515 ctxt = llog_group_get_ctxt(olg, body->lgdc_ctxt_idx);
2516 LASSERTF(ctxt != NULL, "ctxt is not null, ctxt idx %d \n",
2517 body->lgdc_ctxt_idx);
2519 CWARN("%s: Recovery from log "LPX64"/"LPX64":%x\n",
2520 obd->obd_name, body->lgdc_logid.lgl_oid,
2521 body->lgdc_logid.lgl_oseq, body->lgdc_logid.lgl_ogen);
2523 cfs_spin_lock(&obd->u.filter.fo_flags_lock);
2524 obd->u.filter.fo_mds_ost_sync = 1;
2525 cfs_spin_unlock(&obd->u.filter.fo_flags_lock);
2526 rc = llog_connect(ctxt, &body->lgdc_logid,
2527 &body->lgdc_gen, NULL);
2528 llog_ctxt_put(ctxt);
2530 CERROR("failed to connect rc %d idx %d\n", rc,
2531 body->lgdc_ctxt_idx);
2536 static int filter_llog_preclean(struct obd_device *obd)
2538 struct obd_llog_group *olg, *tmp;
2539 struct filter_obd *filter;
2540 cfs_list_t remove_list;
2544 rc = obd_llog_finish(obd, 0);
2546 CERROR("failed to cleanup llogging subsystem\n");
2548 filter = &obd->u.filter;
2549 CFS_INIT_LIST_HEAD(&remove_list);
2551 cfs_spin_lock(&filter->fo_llog_list_lock);
2552 while (!cfs_list_empty(&filter->fo_llog_list)) {
2553 olg = cfs_list_entry(filter->fo_llog_list.next,
2554 struct obd_llog_group, olg_list);
2555 cfs_list_del(&olg->olg_list);
2556 cfs_list_add(&olg->olg_list, &remove_list);
2558 cfs_spin_unlock(&filter->fo_llog_list_lock);
2560 cfs_list_for_each_entry_safe(olg, tmp, &remove_list, olg_list) {
2561 cfs_list_del_init(&olg->olg_list);
2562 rc = filter_olg_fini(olg);
2564 CERROR("failed to cleanup llogging subsystem for %u\n",
2572 static int filter_precleanup(struct obd_device *obd,
2573 enum obd_cleanup_stage stage)
2579 case OBD_CLEANUP_EARLY:
2581 case OBD_CLEANUP_EXPORTS:
2582 /* Stop recovery before namespace cleanup. */
2583 target_recovery_fini(obd);
2584 rc = filter_llog_preclean(obd);
2590 static int filter_cleanup(struct obd_device *obd)
2592 struct filter_obd *filter = &obd->u.filter;
2596 LCONSOLE_WARN("%s: shutting down for failover; client state "
2597 "will be preserved.\n", obd->obd_name);
2599 obd_exports_barrier(obd);
2600 obd_zombie_barrier();
2602 lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
2603 lprocfs_free_per_client_stats(obd);
2604 lprocfs_free_obd_stats(obd);
2605 lprocfs_obd_cleanup(obd);
2606 lquota_cleanup(filter_quota_interface_ref, obd);
2608 ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
2609 obd->obd_namespace = NULL;
2611 sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
2613 if (obd->u.obt.obt_sb == NULL)
2618 ll_vfs_dq_off(obd->u.obt.obt_sb, 0);
2619 shrink_dcache_sb(obd->u.obt.obt_sb);
2621 server_put_mount(obd->obd_name, obd->u.obt.obt_vfsmnt);
2622 obd->u.obt.obt_sb = NULL;
2624 fsfilt_put_ops(obd->obd_fsops);
2626 filter_iobuf_pool_done(filter);
2628 LCONSOLE_INFO("OST %s has stopped.\n", obd->obd_name);
2633 static int filter_connect_internal(struct obd_export *exp,
2634 struct obd_connect_data *data,
2637 struct filter_export_data *fed = &exp->exp_filter_data;
2642 CDEBUG(D_RPCTRACE, "%s: cli %s/%p ocd_connect_flags: "LPX64
2643 " ocd_version: %x ocd_grant: %d ocd_index: %u\n",
2644 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
2645 data->ocd_connect_flags, data->ocd_version,
2646 data->ocd_grant, data->ocd_index);
2648 if (fed->fed_group != 0 && fed->fed_group != data->ocd_group) {
2649 CWARN("!!! This export (nid %s) used object group %d "
2650 "earlier; now it's trying to use group %d! This could "
2651 "be a bug in the MDS. Please report to "
2652 "http://bugzilla.lustre.org/\n",
2653 obd_export_nid2str(exp), fed->fed_group,data->ocd_group);
2656 fed->fed_group = data->ocd_group;
2658 data->ocd_connect_flags &= OST_CONNECT_SUPPORTED;
2659 exp->exp_connect_flags = data->ocd_connect_flags;
2660 data->ocd_version = LUSTRE_VERSION_CODE;
2662 /* Kindly make sure the SKIP_ORPHAN flag is from MDS. */
2663 if (!ergo(data->ocd_connect_flags & OBD_CONNECT_SKIP_ORPHAN,
2664 data->ocd_connect_flags & OBD_CONNECT_MDS))
2667 if (exp->exp_connect_flags & OBD_CONNECT_GRANT) {
2668 struct filter_obd *filter = &exp->exp_obd->u.filter;
2669 obd_size left, want;
2671 cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
2672 left = filter_grant_space_left(exp);
2673 want = data->ocd_grant;
2674 filter_grant(exp, fed->fed_grant, want, left, (reconnect == 0));
2675 data->ocd_grant = fed->fed_grant;
2676 cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
2678 CDEBUG(D_CACHE, "%s: cli %s/%p ocd_grant: %d want: "
2679 LPU64" left: "LPU64"\n", exp->exp_obd->obd_name,
2680 exp->exp_client_uuid.uuid, exp,
2681 data->ocd_grant, want, left);
2683 filter->fo_tot_granted_clients ++;
2686 if (data->ocd_connect_flags & OBD_CONNECT_INDEX) {
2687 struct lr_server_data *lsd = class_server_data(exp->exp_obd);
2688 int index = le32_to_cpu(lsd->lsd_ost_index);
2690 if (!(lsd->lsd_feature_compat &
2691 cpu_to_le32(OBD_COMPAT_OST))) {
2692 /* this will only happen on the first connect */
2693 lsd->lsd_ost_index = cpu_to_le32(data->ocd_index);
2694 lsd->lsd_feature_compat |= cpu_to_le32(OBD_COMPAT_OST);
2695 /* sync is not needed here as filter_client_add will
2696 * set exp_need_sync flag */
2697 filter_update_server_data(exp->exp_obd);
2698 } else if (index != data->ocd_index) {
2699 LCONSOLE_ERROR_MSG(0x136, "Connection from %s to index"
2700 " %u doesn't match actual OST index"
2701 " %u in last_rcvd file, bad "
2703 obd_export_nid2str(exp), index,
2707 /* FIXME: Do the same with the MDS UUID and lsd_peeruuid.
2708 * FIXME: We don't strictly need the COMPAT flag for that,
2709 * FIXME: as lsd_peeruuid[0] will tell us if that is set.
2710 * FIXME: We needed it for the index, as index 0 is valid. */
2713 if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_SIZE)) {
2714 data->ocd_brw_size = 65536;
2715 } else if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) {
2716 data->ocd_brw_size = min(data->ocd_brw_size,
2717 (__u32)(PTLRPC_MAX_BRW_PAGES << CFS_PAGE_SHIFT));
2718 if (data->ocd_brw_size == 0) {
2719 CERROR("%s: cli %s/%p ocd_connect_flags: "LPX64
2720 " ocd_version: %x ocd_grant: %d ocd_index: %u "
2721 "ocd_brw_size is unexpectedly zero, "
2722 "network data corruption?"
2723 "Refusing connection of this client\n",
2724 exp->exp_obd->obd_name,
2725 exp->exp_client_uuid.uuid,
2726 exp, data->ocd_connect_flags, data->ocd_version,
2727 data->ocd_grant, data->ocd_index);
2732 if (data->ocd_connect_flags & OBD_CONNECT_CKSUM) {
2733 __u32 cksum_types = data->ocd_cksum_types;
2735 /* The client set in ocd_cksum_types the checksum types it
2736 * supports. We have to mask off the algorithms that we don't
2738 if (cksum_types & OBD_CKSUM_ALL)
2739 data->ocd_cksum_types &= OBD_CKSUM_ALL;
2741 data->ocd_cksum_types = OBD_CKSUM_CRC32;
2743 CDEBUG(D_RPCTRACE, "%s: cli %s supports cksum type %x, return "
2744 "%x\n", exp->exp_obd->obd_name,
2745 obd_export_nid2str(exp), cksum_types,
2746 data->ocd_cksum_types);
2748 /* This client does not support OBD_CONNECT_CKSUM
2749 * fall back to CRC32 */
2750 CDEBUG(D_RPCTRACE, "%s: cli %s does not support "
2751 "OBD_CONNECT_CKSUM, CRC32 will be used\n",
2752 exp->exp_obd->obd_name,
2753 obd_export_nid2str(exp));
2759 static int filter_reconnect(const struct lu_env *env,
2760 struct obd_export *exp, struct obd_device *obd,
2761 struct obd_uuid *cluuid,
2762 struct obd_connect_data *data,
2768 if (exp == NULL || obd == NULL || cluuid == NULL)
2771 rc = filter_connect_internal(exp, data, 1);
2773 filter_export_stats_init(obd, exp, localdata);
2778 /* nearly identical to mds_connect */
2779 static int filter_connect(const struct lu_env *env,
2780 struct obd_export **exp, struct obd_device *obd,
2781 struct obd_uuid *cluuid,
2782 struct obd_connect_data *data, void *localdata)
2784 struct lvfs_run_ctxt saved;
2785 struct lustre_handle conn = { 0 };
2786 struct obd_export *lexp;
2791 if (exp == NULL || obd == NULL || cluuid == NULL)
2794 rc = class_connect(&conn, obd, cluuid);
2797 lexp = class_conn2export(&conn);
2798 LASSERT(lexp != NULL);
2800 rc = filter_connect_internal(lexp, data, 0);
2804 filter_export_stats_init(obd, lexp, localdata);
2805 if (obd->obd_replayable) {
2806 struct lsd_client_data *lcd = lexp->exp_target_data.ted_lcd;
2808 memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
2809 rc = filter_client_add(obd, lexp, -1);
2814 group = data->ocd_group;
2816 CWARN("%s: Received MDS connection ("LPX64"); group %d\n",
2817 obd->obd_name, lexp->exp_handle.h_cookie, group);
2819 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2820 rc = filter_read_groups(obd, group, 1);
2821 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2823 CERROR("can't read group %u\n", group);
2831 class_disconnect(lexp);
2840 /* Do extra sanity checks for grant accounting. We do this at connect,
2841 * disconnect, and statfs RPC time, so it shouldn't be too bad. We can
2842 * always get rid of it or turn it off when we know accounting is good. */
2843 static void filter_grant_sanity_check(struct obd_device *obd, const char *func)
2845 struct filter_export_data *fed;
2846 struct obd_export *exp;
2847 obd_size maxsize = obd->obd_osfs.os_blocks * obd->obd_osfs.os_bsize;
2848 obd_size tot_dirty = 0, tot_pending = 0, tot_granted = 0;
2849 obd_size fo_tot_dirty, fo_tot_pending, fo_tot_granted;
2851 if (cfs_list_empty(&obd->obd_exports))
2854 /* We don't want to do this for large machines that do lots of
2855 mounts or unmounts. It burns... */
2856 if (obd->obd_num_exports > 100)
2859 cfs_spin_lock(&obd->obd_osfs_lock);
2860 cfs_spin_lock(&obd->obd_dev_lock);
2861 cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2863 fed = &exp->exp_filter_data;
2864 if (fed->fed_grant < 0 || fed->fed_pending < 0 ||
2867 if (maxsize > 0) { /* we may not have done a statfs yet */
2868 LASSERTF(fed->fed_grant + fed->fed_pending <= maxsize,
2869 "%s: cli %s/%p %ld+%ld > "LPU64"\n", func,
2870 exp->exp_client_uuid.uuid, exp,
2871 fed->fed_grant, fed->fed_pending, maxsize);
2872 LASSERTF(fed->fed_dirty <= maxsize,
2873 "%s: cli %s/%p %ld > "LPU64"\n", func,
2874 exp->exp_client_uuid.uuid, exp,
2875 fed->fed_dirty, maxsize);
2878 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
2879 obd->obd_name, exp->exp_client_uuid.uuid, exp,
2880 fed->fed_dirty, fed->fed_pending,fed->fed_grant);
2882 CDEBUG(D_CACHE, "%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
2883 obd->obd_name, exp->exp_client_uuid.uuid, exp,
2884 fed->fed_dirty, fed->fed_pending,fed->fed_grant);
2885 tot_granted += fed->fed_grant + fed->fed_pending;
2886 tot_pending += fed->fed_pending;
2887 tot_dirty += fed->fed_dirty;
2889 fo_tot_granted = obd->u.filter.fo_tot_granted;
2890 fo_tot_pending = obd->u.filter.fo_tot_pending;
2891 fo_tot_dirty = obd->u.filter.fo_tot_dirty;
2892 cfs_spin_unlock(&obd->obd_dev_lock);
2893 cfs_spin_unlock(&obd->obd_osfs_lock);
2895 /* Do these assertions outside the spinlocks so we don't kill system */
2896 if (tot_granted != fo_tot_granted)
2897 CERROR("%s: tot_granted "LPU64" != fo_tot_granted "LPU64"\n",
2898 func, tot_granted, fo_tot_granted);
2899 if (tot_pending != fo_tot_pending)
2900 CERROR("%s: tot_pending "LPU64" != fo_tot_pending "LPU64"\n",
2901 func, tot_pending, fo_tot_pending);
2902 if (tot_dirty != fo_tot_dirty)
2903 CERROR("%s: tot_dirty "LPU64" != fo_tot_dirty "LPU64"\n",
2904 func, tot_dirty, fo_tot_dirty);
2905 if (tot_pending > tot_granted)
2906 CERROR("%s: tot_pending "LPU64" > tot_granted "LPU64"\n",
2907 func, tot_pending, tot_granted);
2908 if (tot_granted > maxsize)
2909 CERROR("%s: tot_granted "LPU64" > maxsize "LPU64"\n",
2910 func, tot_granted, maxsize);
2911 if (tot_dirty > maxsize)
2912 CERROR("%s: tot_dirty "LPU64" > maxsize "LPU64"\n",
2913 func, tot_dirty, maxsize);
2916 /* Remove this client from the grant accounting totals. We also remove
2917 * the export from the obd device under the osfs and dev locks to ensure
2918 * that the filter_grant_sanity_check() calculations are always valid.
2919 * The client should do something similar when it invalidates its import. */
2920 static void filter_grant_discard(struct obd_export *exp)
2922 struct obd_device *obd = exp->exp_obd;
2923 struct filter_obd *filter = &obd->u.filter;
2924 struct filter_export_data *fed = &exp->exp_filter_data;
2926 cfs_spin_lock(&obd->obd_osfs_lock);
2927 LASSERTF(filter->fo_tot_granted >= fed->fed_grant,
2928 "%s: tot_granted "LPU64" cli %s/%p fed_grant %ld\n",
2929 obd->obd_name, filter->fo_tot_granted,
2930 exp->exp_client_uuid.uuid, exp, fed->fed_grant);
2931 filter->fo_tot_granted -= fed->fed_grant;
2932 LASSERTF(filter->fo_tot_pending >= fed->fed_pending,
2933 "%s: tot_pending "LPU64" cli %s/%p fed_pending %ld\n",
2934 obd->obd_name, filter->fo_tot_pending,
2935 exp->exp_client_uuid.uuid, exp, fed->fed_pending);
2936 /* fo_tot_pending is handled in filter_grant_commit as bulk finishes */
2937 LASSERTF(filter->fo_tot_dirty >= fed->fed_dirty,
2938 "%s: tot_dirty "LPU64" cli %s/%p fed_dirty %ld\n",
2939 obd->obd_name, filter->fo_tot_dirty,
2940 exp->exp_client_uuid.uuid, exp, fed->fed_dirty);
2941 filter->fo_tot_dirty -= fed->fed_dirty;
2945 cfs_spin_unlock(&obd->obd_osfs_lock);
2948 static int filter_destroy_export(struct obd_export *exp)
2950 struct filter_export_data *fed = &exp->exp_filter_data;
2953 if (fed->fed_pending)
2954 CERROR("%s: cli %s/%p has %lu pending on destroyed export\n",
2955 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid,
2956 exp, fed->fed_pending);
2958 lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
2960 target_destroy_export(exp);
2961 ldlm_destroy_export(exp);
2962 lut_client_free(exp);
2964 if (obd_uuid_equals(&exp->exp_client_uuid, &exp->exp_obd->obd_uuid))
2967 if (!exp->exp_obd->obd_replayable)
2968 fsfilt_sync(exp->exp_obd, exp->exp_obd->u.obt.obt_sb);
2970 filter_grant_discard(exp);
2971 filter_fmd_cleanup(exp);
2973 if (exp->exp_connect_flags & OBD_CONNECT_GRANT_SHRINK) {
2974 struct filter_obd *filter = &exp->exp_obd->u.filter;
2975 if (filter->fo_tot_granted_clients > 0)
2976 filter->fo_tot_granted_clients --;
2979 if (!(exp->exp_flags & OBD_OPT_FORCE))
2980 filter_grant_sanity_check(exp->exp_obd, __func__);
2985 static void filter_sync_llogs(struct obd_device *obd, struct obd_export *dexp)
2987 struct obd_llog_group *olg_min, *olg;
2988 struct filter_obd *filter;
2989 int worked = -1, group;
2990 struct llog_ctxt *ctxt;
2993 filter = &obd->u.filter;
2995 /* we can't sync log holding spinlock. also, we do not want to get
2996 * into livelock. so we do following: loop over MDS's exports in
2997 * group order and skip already synced llogs -bzzz */
2999 /* look for group with min. number, but > worked */
3002 cfs_spin_lock(&filter->fo_llog_list_lock);
3003 cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
3004 if (olg->olg_seq <= worked) {
3005 /* this group is already synced */
3008 if (group < olg->olg_seq) {
3009 /* we have group with smaller number to sync */
3012 /* store current minimal group */
3014 group = olg->olg_seq;
3016 cfs_spin_unlock(&filter->fo_llog_list_lock);
3018 if (olg_min == NULL)
3021 worked = olg_min->olg_seq;
3022 if (olg_min->olg_exp &&
3023 (dexp == olg_min->olg_exp || dexp == NULL)) {
3025 ctxt = llog_group_get_ctxt(olg_min,
3026 LLOG_MDS_OST_REPL_CTXT);
3028 err = llog_sync(ctxt, olg_min->olg_exp);
3029 llog_ctxt_put(ctxt);
3031 CERROR("error flushing logs to MDS: "
3036 } while (olg_min != NULL);
3039 /* Also incredibly similar to mds_disconnect */
3040 static int filter_disconnect(struct obd_export *exp)
3042 struct obd_device *obd = exp->exp_obd;
3047 class_export_get(exp);
3049 if (!(exp->exp_flags & OBD_OPT_FORCE))
3050 filter_grant_sanity_check(obd, __func__);
3051 filter_grant_discard(exp);
3053 /* Flush any remaining cancel messages out to the target */
3054 filter_sync_llogs(obd, exp);
3056 lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
3058 rc = server_disconnect_export(exp);
3060 /* Do not erase record for recoverable client. */
3061 if (obd->obd_replayable && (!obd->obd_fail || exp->exp_failed))
3062 filter_client_del(exp);
3064 fsfilt_sync(obd, obd->u.obt.obt_sb);
3066 class_export_put(exp);
3070 /* reverse import is changed, sync all cancels */
3071 static void filter_revimp_update(struct obd_export *exp)
3076 class_export_get(exp);
3078 /* flush any remaining cancel messages out to the target */
3079 filter_sync_llogs(exp->exp_obd, exp);
3080 class_export_put(exp);
3084 static int filter_ping(struct obd_export *exp)
3086 filter_fmd_expire(exp);
3090 struct dentry *__filter_oa2dentry(struct obd_device *obd, struct ost_id *ostid,
3091 const char *what, int quiet)
3093 struct dentry *dchild = NULL;
3095 dchild = filter_fid2dentry(obd, NULL, ostid->oi_seq, ostid->oi_id);
3097 if (IS_ERR(dchild)) {
3098 CERROR("%s error looking up object: "POSTID"\n",
3099 what, ostid->oi_id, ostid->oi_seq);
3103 if (dchild->d_inode == NULL) {
3105 CERROR("%s: %s on non-existent object: "POSTID" \n",
3106 obd->obd_name, what, ostid->oi_id,ostid->oi_seq);
3108 RETURN(ERR_PTR(-ENOENT));
3114 static int filter_getattr(struct obd_export *exp, struct obd_info *oinfo)
3116 struct dentry *dentry = NULL;
3117 struct obd_device *obd;
3121 rc = filter_auth_capa(exp, NULL, oinfo->oi_oa->o_seq,
3122 oinfo_capa(oinfo), CAPA_OPC_META_READ);
3126 obd = class_exp2obd(exp);
3128 CDEBUG(D_IOCTL, "invalid client export %p\n", exp);
3132 dentry = filter_oa2dentry(obd, &oinfo->oi_oa->o_oi);
3134 RETURN(PTR_ERR(dentry));
3136 /* Limit the valid bits in the return data to what we actually use */
3137 oinfo->oi_oa->o_valid = OBD_MD_FLID;
3138 obdo_from_inode(oinfo->oi_oa, dentry->d_inode, NULL, FILTER_VALID_FLAGS);
3144 /* this should be enabled/disabled in condition to enabled/disabled large
3145 * inodes (fast EAs) in backing store FS. */
3146 int filter_update_fidea(struct obd_export *exp, struct inode *inode,
3147 void *handle, struct obdo *oa)
3149 struct obd_device *obd = exp->exp_obd;
3153 if (oa->o_valid & OBD_MD_FLFID) {
3154 struct filter_fid ff;
3156 if (!(oa->o_valid & OBD_MD_FLGROUP))
3158 /* packing fid and converting it to LE for storing into EA.
3159 * Here ->o_stripe_idx should be filled by LOV and rest of
3160 * fields - by client. */
3161 ff.ff_parent.f_seq = cpu_to_le64(oa->o_parent_seq);
3162 ff.ff_parent.f_oid = cpu_to_le32(oa->o_parent_oid);
3163 /* XXX: we are ignoring o_parent_ver here, since this should
3164 * be the same for all objects in this fileset. */
3165 ff.ff_parent.f_ver = cpu_to_le32(oa->o_stripe_idx);
3166 ff.ff_objid = cpu_to_le64(oa->o_id);
3167 ff.ff_seq = cpu_to_le64(oa->o_seq);
3169 CDEBUG(D_INODE, "storing filter fid EA (parent "DFID" "
3170 LPU64"/"LPU64")\n", PFID(&ff.ff_parent), oa->o_id,
3173 rc = fsfilt_set_md(obd, inode, handle, &ff, sizeof(ff), "fid");
3175 CERROR("store fid in object failed! rc: %d\n", rc);
3177 CDEBUG(D_HA, "OSS object without fid info!\n");
3183 /* this is called from filter_truncate() until we have filter_punch() */
3184 int filter_setattr_internal(struct obd_export *exp, struct dentry *dentry,
3185 struct obdo *oa, struct obd_trans_info *oti)
3187 unsigned int orig_ids[MAXQUOTAS] = {0, 0};
3188 struct llog_cookie *fcc = NULL;
3189 struct filter_obd *filter;
3190 int rc, err, sync = 0;
3191 loff_t old_size = 0;
3192 unsigned int ia_valid;
3193 struct inode *inode;
3194 struct page *page = NULL;
3199 LASSERT(dentry != NULL);
3200 LASSERT(!IS_ERR(dentry));
3202 inode = dentry->d_inode;
3203 LASSERT(inode != NULL);
3205 filter = &exp->exp_obd->u.filter;
3206 iattr_from_obdo(&iattr, oa, oa->o_valid);
3207 ia_valid = iattr.ia_valid;
3209 if (oa->o_valid & OBD_MD_FLCOOKIE) {
3210 OBD_ALLOC(fcc, sizeof(*fcc));
3212 *fcc = oa->o_lcookie;
3214 if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
3215 unsigned long now = jiffies;
3216 ll_vfs_dq_init(inode);
3217 /* Filter truncates and writes are serialized by
3218 * i_alloc_sem, see the comment in
3219 * filter_preprw_write.*/
3220 if (ia_valid & ATTR_SIZE)
3221 down_write(&inode->i_alloc_sem);
3222 LOCK_INODE_MUTEX(inode);
3223 fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem and i_mutex");
3224 old_size = i_size_read(inode);
3227 /* VBR: version recovery check */
3228 rc = filter_version_get_check(exp, oti, inode);
3230 GOTO(out_unlock, rc);
3232 /* Let's pin the last page so that ldiskfs_truncate
3233 * should not start GFP_FS allocation. */
3234 if (ia_valid & ATTR_SIZE) {
3235 page = grab_cache_page(inode->i_mapping,
3236 iattr.ia_size >> PAGE_CACHE_SHIFT);
3238 GOTO(out_unlock, rc = -ENOMEM);
3243 /* If the inode still has SUID+SGID bits set (see filter_precreate())
3244 * then we will accept the UID+GID sent by the client during write for
3245 * initializing the ownership of this inode. We only allow this to
3246 * happen once so clear these bits in setattr. In 2.6 kernels it is
3247 * possible to get ATTR_UID and ATTR_GID separately, so we only clear
3248 * the flags that are actually being set. */
3249 if (ia_valid & (ATTR_UID | ATTR_GID)) {
3250 CDEBUG(D_INODE, "update UID/GID to %lu/%lu\n",
3251 (unsigned long)oa->o_uid, (unsigned long)oa->o_gid);
3253 if ((inode->i_mode & S_ISUID) && (ia_valid & ATTR_UID)) {
3254 if (!(ia_valid & ATTR_MODE)) {
3255 iattr.ia_mode = inode->i_mode;
3256 iattr.ia_valid |= ATTR_MODE;
3258 iattr.ia_mode &= ~S_ISUID;
3260 if ((inode->i_mode & S_ISGID) && (ia_valid & ATTR_GID)) {
3261 if (!(iattr.ia_valid & ATTR_MODE)) {
3262 iattr.ia_mode = inode->i_mode;
3263 iattr.ia_valid |= ATTR_MODE;
3265 iattr.ia_mode &= ~S_ISGID;
3268 orig_ids[USRQUOTA] = inode->i_uid;
3269 orig_ids[GRPQUOTA] = inode->i_gid;
3270 handle = fsfilt_start_log(exp->exp_obd, inode,
3271 FSFILT_OP_SETATTR, oti, 1);
3273 GOTO(out_unlock, rc = PTR_ERR(handle));
3275 /* update inode EA only once when inode is suid bit marked. As
3276 * on 2.6.x UID and GID may be set separately, we check here
3277 * only one of them to avoid double setting. */
3278 if (inode->i_mode & S_ISUID)
3279 filter_update_fidea(exp, inode, handle, oa);
3281 handle = fsfilt_start(exp->exp_obd, inode,
3282 FSFILT_OP_SETATTR, oti);
3284 GOTO(out_unlock, rc = PTR_ERR(handle));
3286 if (oa->o_valid & OBD_MD_FLFLAGS) {
3287 rc = fsfilt_iocontrol(exp->exp_obd, dentry,
3288 FSFILT_IOC_SETFLAGS, (long)&oa->o_flags);
3290 rc = fsfilt_setattr(exp->exp_obd, dentry, handle, &iattr, 1);
3292 /* set cancel cookie callback function */
3293 sync = fsfilt_add_journal_cb(exp->exp_obd, 0, handle,
3294 filter_cancel_cookies_cb,
3298 if (OBD_FAIL_CHECK(OBD_FAIL_OST_SETATTR_CREDITS))
3299 fsfilt_extend(exp->exp_obd, inode, 0, handle);
3301 /* The truncate might have used up our transaction credits. Make sure
3302 * we have two left for the last_rcvd and VBR inode version updates. */
3303 err = fsfilt_extend(exp->exp_obd, inode, 2, handle);
3305 /* Update inode version only if data has changed => size has changed */
3306 rc = filter_finish_transno(exp, ia_valid & ATTR_SIZE ? inode : NULL,
3310 filter_cancel_cookies_cb(exp->exp_obd, 0, fcc, rc);
3314 err = fsfilt_commit(exp->exp_obd, inode, handle, 0);
3316 CERROR("error on commit, err = %d\n", err);
3323 /* For a partial-page truncate flush the page to disk immediately
3324 * to avoid data corruption during direct disk write. b=17397 */
3325 if (!sync && (iattr.ia_valid & ATTR_SIZE) &&
3326 old_size != iattr.ia_size && (iattr.ia_size & ~CFS_PAGE_MASK)) {
3327 err = filemap_fdatawrite_range(inode->i_mapping, iattr.ia_size,
3337 page_cache_release(page);
3339 if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID))
3340 UNLOCK_INODE_MUTEX(inode);
3341 if (ia_valid & ATTR_SIZE)
3342 up_write(&inode->i_alloc_sem);
3344 OBD_FREE(fcc, sizeof(*fcc));
3346 /* trigger quota release */
3347 if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
3348 unsigned int cur_ids[MAXQUOTAS] = {oa->o_uid, oa->o_gid};
3349 int rc2 = lquota_adjust(filter_quota_interface_ref,
3350 exp->exp_obd, cur_ids,
3351 orig_ids, rc, FSFILT_OP_SETATTR);
3352 CDEBUG(rc2 ? D_ERROR : D_QUOTA,
3353 "filter adjust qunit. (rc:%d)\n", rc2);
3358 /* this is called from filter_truncate() until we have filter_punch() */
3359 int filter_setattr(struct obd_export *exp, struct obd_info *oinfo,
3360 struct obd_trans_info *oti)
3362 struct obdo *oa = oinfo->oi_oa;
3363 struct lustre_capa *capa = oinfo_capa(oinfo);
3364 struct ldlm_res_id res_id;
3365 struct filter_mod_data *fmd;
3366 struct lvfs_run_ctxt saved;
3367 struct filter_obd *filter;
3368 struct ldlm_resource *res;
3369 struct dentry *dentry;
3370 __u64 opc = CAPA_OPC_META_WRITE;
3374 if (oa->o_valid & OBD_FL_TRUNC)
3375 opc |= CAPA_OPC_OSS_TRUNC;
3377 rc = filter_auth_capa(exp, NULL, oa->o_seq, capa, opc);
3381 if (oa->o_valid & (OBD_MD_FLUID | OBD_MD_FLGID)) {
3382 rc = filter_capa_fixoa(exp, oa, oa->o_seq, capa);
3387 osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
3388 /* This would be very bad - accidentally truncating a file when
3389 * changing the time or similar - bug 12203. */
3390 if (oa->o_valid & OBD_MD_FLSIZE &&
3391 oinfo->oi_policy.l_extent.end != OBD_OBJECT_EOF) {
3392 static char mdsinum[48];
3394 if (oa->o_valid & OBD_MD_FLFID)
3395 snprintf(mdsinum, sizeof(mdsinum) - 1, " of inode "DFID,
3396 oa->o_parent_seq, oa->o_parent_oid,
3401 CERROR("%s: setattr from %s trying to truncate objid "POSTID
3402 "%s\n", exp->exp_obd->obd_name, obd_export_nid2str(exp),
3403 oa->o_id, oa->o_seq, mdsinum);
3407 dentry = __filter_oa2dentry(exp->exp_obd, &oinfo->oi_oa->o_oi, __func__, 1);
3409 RETURN(PTR_ERR(dentry));
3411 filter = &exp->exp_obd->u.filter;
3412 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
3415 * We need to be atomic against a concurrent write
3416 * (which takes the semaphore for reading). fmd_mactime_xid
3417 * checks will have no effect if a write request with lower
3418 * xid starts just before a setattr and finishes later than
3419 * the setattr (see bug 21489, comment 27).
3422 (OBD_MD_FLMTIME | OBD_MD_FLATIME | OBD_MD_FLCTIME)) {
3423 unsigned long now = jiffies;
3424 down_write(&dentry->d_inode->i_alloc_sem);
3425 fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem");
3426 fmd = filter_fmd_get(exp, oa->o_id, oa->o_seq);
3427 if (fmd && fmd->fmd_mactime_xid < oti->oti_xid)
3428 fmd->fmd_mactime_xid = oti->oti_xid;
3429 filter_fmd_put(exp, fmd);
3430 up_write(&dentry->d_inode->i_alloc_sem);
3433 /* setting objects attributes (including owner/group) */
3434 rc = filter_setattr_internal(exp, dentry, oa, oti);
3436 GOTO(out_unlock, rc);
3438 res = ldlm_resource_get(exp->exp_obd->obd_namespace, NULL,
3439 &res_id, LDLM_EXTENT, 0);
3442 LDLM_RESOURCE_ADDREF(res);
3443 rc = ldlm_res_lvbo_update(res, NULL, 0);
3444 LDLM_RESOURCE_DELREF(res);
3445 ldlm_resource_putref(res);
3448 oa->o_valid = OBD_MD_FLID;
3450 /* Quota release need uid/gid info */
3451 obdo_from_inode(oa, dentry->d_inode, NULL,
3452 FILTER_VALID_FLAGS | OBD_MD_FLUID | OBD_MD_FLGID);
3457 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
3461 /* XXX identical to osc_unpackmd */
3462 static int filter_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
3463 struct lov_mds_md *lmm, int lmm_bytes)
3469 if (lmm_bytes < sizeof (*lmm)) {
3470 CERROR("lov_mds_md too small: %d, need %d\n",
3471 lmm_bytes, (int)sizeof(*lmm));
3474 /* XXX LOV_MAGIC etc check? */
3476 if (lmm->lmm_object_id == cpu_to_le64(0)) {
3477 CERROR("lov_mds_md: zero lmm_object_id\n");
3482 lsm_size = lov_stripe_md_size(1);
3486 if (*lsmp != NULL && lmm == NULL) {
3487 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
3488 OBD_FREE(*lsmp, lsm_size);
3493 if (*lsmp == NULL) {
3494 OBD_ALLOC(*lsmp, lsm_size);
3498 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
3499 if ((*lsmp)->lsm_oinfo[0] == NULL) {
3500 OBD_FREE(*lsmp, lsm_size);
3503 loi_init((*lsmp)->lsm_oinfo[0]);
3507 /* XXX zero *lsmp? */
3508 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
3509 LASSERT((*lsmp)->lsm_object_id);
3512 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
3517 /* caller must hold fo_create_locks[oa->o_seq] */
3518 static int filter_destroy_precreated(struct obd_export *exp, struct obdo *oa,
3519 struct filter_obd *filter)
3521 struct obdo doa = { 0 }; /* XXX obdo on stack */
3527 LASSERT(down_trylock(&filter->fo_create_locks[oa->o_seq]) != 0);
3529 memset(&doa, 0, sizeof(doa));
3531 doa.o_valid |= OBD_MD_FLGROUP;
3532 doa.o_seq = oa->o_seq;
3533 doa.o_mode = S_IFREG;
3535 if (!cfs_test_bit(doa.o_seq, &filter->fo_destroys_in_progress)) {
3536 CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
3537 exp->exp_obd->obd_name, doa.o_seq);
3541 last = filter_last_id(filter, doa.o_seq);
3543 skip_orphan = !!(exp->exp_connect_flags & OBD_CONNECT_SKIP_ORPHAN);
3545 CDEBUG(D_HA, "%s: deleting orphan objects from "LPU64" to "LPU64"%s\n",
3546 exp->exp_obd->obd_name, oa->o_id + 1, last,
3547 skip_orphan ? ", orphan objids won't be reused any more." : ".");
3549 for (id = last; id > oa->o_id; id--) {
3551 rc = filter_destroy(exp, &doa, NULL, NULL, NULL, NULL);
3552 if (rc && rc != -ENOENT) /* this is pretty fatal... */
3553 CEMERG("error destroying precreate objid "LPU64": %d\n",
3556 /* update last_id on disk periodically so that if we restart
3557 * we don't need to re-scan all of the just-deleted objects. */
3558 if ((id & 511) == 0 && !skip_orphan) {
3559 filter_set_last_id(filter, id - 1, doa.o_seq);
3560 filter_update_last_objid(exp->exp_obd, doa.o_seq, 0);
3564 CDEBUG(D_HA, "%s: after destroy: set last_objids["LPU64"] = "LPU64"\n",
3565 exp->exp_obd->obd_name, doa.o_seq, oa->o_id);
3568 filter_set_last_id(filter, id, doa.o_seq);
3569 rc = filter_update_last_objid(exp->exp_obd, doa.o_seq, 1);
3572 * We have destroyed orphan objects, but don't want to reuse
3573 * them. Therefore we don't reset last_id to the last created
3574 * objects. Instead, we report back to the MDS the object id
3575 * of the last orphan, so that the MDS can restart allocating
3576 * objects from this id + 1 and thus skip the whole orphan
3582 cfs_clear_bit(doa.o_seq, &filter->fo_destroys_in_progress);
3587 static int filter_precreate(struct obd_device *obd, struct obdo *oa,
3588 obd_seq group, int *num);
3589 /* returns a negative error or a nonnegative number of files to create */
3590 static int filter_handle_precreate(struct obd_export *exp, struct obdo *oa,
3591 obd_seq group, struct obd_trans_info *oti)
3593 struct obd_device *obd = exp->exp_obd;
3594 struct filter_obd *filter = &obd->u.filter;
3598 /* delete orphans request */
3599 if ((oa->o_valid & OBD_MD_FLFLAGS) && (oa->o_flags & OBD_FL_DELORPHAN)){
3600 obd_id last = filter_last_id(filter, group);
3602 if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
3603 CERROR("%s: dropping old orphan cleanup request\n",
3607 /* This causes inflight precreates to abort and drop lock */
3608 cfs_set_bit(group, &filter->fo_destroys_in_progress);
3609 cfs_down(&filter->fo_create_locks[group]);
3610 if (!cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
3611 CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
3612 exp->exp_obd->obd_name, group);
3613 cfs_up(&filter->fo_create_locks[group]);
3616 diff = oa->o_id - last;
3617 CDEBUG(D_HA, "filter_last_id() = "LPU64" -> diff = %d\n",
3620 if (-diff > OST_MAX_PRECREATE) {
3621 CERROR("%s: ignoring bogus orphan destroy request: "
3622 "obdid "LPU64" last_id "LPU64"\n", obd->obd_name,
3624 /* FIXME: should reset precreate_next_id on MDS */
3625 GOTO(out, rc = -EINVAL);
3628 rc = filter_destroy_precreated(exp, oa, filter);
3630 CERROR("%s: unable to write lastobjid, but "
3631 "orphans were deleted\n", obd->obd_name);
3634 /* XXX: Used by MDS for the first time! */
3635 cfs_clear_bit(group, &filter->fo_destroys_in_progress);
3638 cfs_down(&filter->fo_create_locks[group]);
3639 if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
3640 CERROR("%s: dropping old precreate request\n",
3644 /* only precreate if group == 0 and o_id is specfied */
3645 if (!fid_seq_is_mdt(group) || oa->o_id == 0)
3648 diff = oa->o_id - filter_last_id(filter, group);
3649 CDEBUG(D_RPCTRACE, "filter_last_id() = "LPU64" -> diff = %d\n",
3650 filter_last_id(filter, group), diff);
3652 LASSERTF(diff >= 0,"%s: "LPU64" - "LPU64" = %d\n",obd->obd_name,
3653 oa->o_id, filter_last_id(filter, group), diff);
3657 oa->o_id = filter_last_id(&obd->u.filter, group);
3658 rc = filter_precreate(obd, oa, group, &diff);
3659 oa->o_id = filter_last_id(&obd->u.filter, group);
3661 oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
3664 /* else diff == 0 */
3667 cfs_up(&filter->fo_create_locks[group]);
3671 static int filter_statfs(struct obd_device *obd, struct obd_statfs *osfs,
3672 __u64 max_age, __u32 flags)
3674 struct filter_obd *filter = &obd->u.filter;
3675 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
3679 /* at least try to account for cached pages. its still racey and
3680 * might be under-reporting if clients haven't announced their
3681 * caches with brw recently */
3682 cfs_spin_lock(&obd->obd_osfs_lock);
3683 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
3684 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
3685 cfs_spin_unlock(&obd->obd_osfs_lock);
3687 CDEBUG(D_SUPER | D_CACHE, "blocks cached "LPU64" granted "LPU64
3688 " pending "LPU64" free "LPU64" avail "LPU64"\n",
3689 filter->fo_tot_dirty, filter->fo_tot_granted,
3690 filter->fo_tot_pending,
3691 osfs->os_bfree << blockbits, osfs->os_bavail << blockbits);
3693 filter_grant_sanity_check(obd, __func__);
3695 osfs->os_bavail -= min(osfs->os_bavail, GRANT_FOR_LLOG(obd) +
3696 ((filter->fo_tot_dirty + filter->fo_tot_pending +
3697 osfs->os_bsize - 1) >> blockbits));
3699 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOSPC)) {
3700 struct lr_server_data *lsd = class_server_data(obd);
3701 int index = le32_to_cpu(lsd->lsd_ost_index);
3703 if (obd_fail_val == -1 ||
3704 index == obd_fail_val)
3705 osfs->os_bfree = osfs->os_bavail = 2;
3706 else if (obd_fail_loc & OBD_FAIL_ONCE)
3707 obd_fail_loc &= ~OBD_FAILED; /* reset flag */
3710 /* set EROFS to state field if FS is mounted as RDONLY. The goal is to
3711 * stop creating files on MDS if OST is not good shape to create
3715 if (filter->fo_obt.obt_sb->s_flags & MS_RDONLY)
3716 osfs->os_state = OS_STATE_READONLY;
3718 if (filter->fo_raid_degraded)
3719 osfs->os_state |= OS_STATE_DEGRADED;
3723 static int filter_use_existing_obj(struct obd_device *obd,
3724 struct dentry *dchild, void **handle,
3727 struct inode *inode = dchild->d_inode;
3731 if ((inode->i_mode & (S_ISUID | S_ISGID)) == (S_ISUID|S_ISGID))
3734 *handle = fsfilt_start_log(obd, inode, FSFILT_OP_SETATTR, NULL, 1);
3735 if (IS_ERR(*handle))
3736 return PTR_ERR(*handle);
3738 iattr.ia_valid = ATTR_MODE;
3739 iattr.ia_mode = S_ISUID | S_ISGID |0666;
3740 rc = fsfilt_setattr(obd, dchild, *handle, &iattr, 1);
3747 static __u64 filter_calc_free_inodes(struct obd_device *obd)
3750 __u64 os_ffree = -1;
3752 cfs_spin_lock(&obd->obd_osfs_lock);
3753 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, cfs_time_shift_64(1));
3755 os_ffree = obd->obd_osfs.os_ffree;
3756 cfs_spin_unlock(&obd->obd_osfs_lock);
3761 /* We rely on the fact that only one thread will be creating files in a given
3762 * group at a time, which is why we don't need an atomic filter_get_new_id.
3763 * Even if we had that atomic function, the following race would exist:
3765 * thread 1: gets id x from filter_next_id
3766 * thread 2: gets id (x + 1) from filter_next_id
3767 * thread 2: creates object (x + 1)
3768 * thread 1: tries to create object x, gets -ENOSPC
3770 * Caller must hold fo_create_locks[group]
3772 static int filter_precreate(struct obd_device *obd, struct obdo *oa,
3773 obd_seq group, int *num)
3775 struct dentry *dchild = NULL, *dparent = NULL;
3776 struct filter_obd *filter;
3777 struct obd_statfs *osfs;
3778 int err = 0, rc = 0, recreate_obj = 0, i;
3779 cfs_time_t enough_time = cfs_time_shift(DISK_TIMEOUT/2);
3782 void *handle = NULL;
3785 filter = &obd->u.filter;
3787 LASSERT(down_trylock(&filter->fo_create_locks[group]) != 0);
3789 OBD_FAIL_TIMEOUT(OBD_FAIL_TGT_DELAY_PRECREATE, obd_timeout / 2);
3791 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
3792 (oa->o_flags & OBD_FL_RECREATE_OBJS)) {
3795 OBD_ALLOC(osfs, sizeof(*osfs));
3798 rc = filter_statfs(obd, osfs,
3799 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
3801 if (rc == 0 && osfs->os_bavail < (osfs->os_blocks >> 10)) {
3802 CDEBUG(D_RPCTRACE,"%s: not enough space for create "
3803 LPU64"\n", obd->obd_name, osfs->os_bavail <<
3804 obd->u.obt.obt_vfsmnt->mnt_sb->s_blocksize_bits);
3808 OBD_FREE(osfs, sizeof(*osfs));
3813 CDEBUG(D_RPCTRACE, "%s: precreating %d objects in group "LPU64
3814 " at "LPU64"\n", obd->obd_name, *num, group, oa->o_id);
3816 for (i = 0; i < *num && err == 0; i++) {
3817 int cleanup_phase = 0;
3819 if (cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
3820 CWARN("%s: create aborted by destroy\n",
3829 last_id = filter_last_id(filter, group);
3830 if (next_id > last_id) {
3831 CERROR("Error: Trying to recreate obj greater"
3832 "than last id "LPD64" > "LPD64"\n",
3834 GOTO(cleanup, rc = -EINVAL);
3837 next_id = filter_last_id(filter, group) + 1;
3839 /* Don't create objects beyond the valid range for this SEQ */
3840 if (unlikely(fid_seq_is_mdt0(group) &&
3841 next_id >= IDIF_MAX_OID)) {
3842 CERROR("%s:"POSTID" hit the IDIF_MAX_OID (1<<48)!\n",
3843 obd->obd_name, next_id, group);
3844 GOTO(cleanup, rc = -ENOSPC);
3845 } else if (unlikely(!fid_seq_is_mdt0(group) &&
3846 next_id >= OBIF_MAX_OID)) {
3847 CERROR("%s:"POSTID" hit the OBIF_MAX_OID (1<<32)!\n",
3848 obd->obd_name, next_id, group);
3849 GOTO(cleanup, rc = -ENOSPC);
3852 dparent = filter_parent_lock(obd, group, next_id);
3853 if (IS_ERR(dparent))
3854 GOTO(cleanup, rc = PTR_ERR(dparent));
3855 cleanup_phase = 1; /* filter_parent_unlock(dparent) */
3857 dchild = filter_fid2dentry(obd, dparent, group, next_id);
3859 GOTO(cleanup, rc = PTR_ERR(dchild));
3860 cleanup_phase = 2; /* f_dput(dchild) */
3862 if (dchild->d_inode != NULL) {
3863 /* This would only happen if lastobjid was bad on disk*/
3864 /* Could also happen if recreating missing obj but it
3865 * already exists. */
3867 CERROR("%s: recreating existing object %.*s?\n",
3868 obd->obd_name, dchild->d_name.len,
3869 dchild->d_name.name);
3871 /* Use these existing objects if they are
3873 if (dchild->d_inode->i_size == 0) {
3874 rc = filter_use_existing_obj(obd,dchild,
3875 &handle, &cleanup_phase);
3882 CERROR("%s: Serious error: objid %.*s already "
3883 "exists; is this filesystem corrupt?\n",
3884 obd->obd_name, dchild->d_name.len,
3885 dchild->d_name.name);
3888 GOTO(cleanup, rc = -EEXIST);
3891 handle = fsfilt_start_log(obd, dparent->d_inode,
3892 FSFILT_OP_CREATE, NULL, 1);
3894 GOTO(cleanup, rc = PTR_ERR(handle));
3897 CDEBUG(D_INODE, "%s: filter_precreate(od->o_seq="LPU64
3898 ",od->o_id="LPU64")\n", obd->obd_name, group,
3901 /* We mark object SUID+SGID to flag it for accepting UID+GID
3902 * from client on first write. Currently the permission bits
3903 * on the OST are never used, so this is OK. */
3904 rc = ll_vfs_create(dparent->d_inode, dchild,
3905 S_IFREG | S_ISUID | S_ISGID | 0666, NULL);
3907 CERROR("create failed rc = %d\n", rc);
3908 if (rc == -ENOSPC) {
3909 os_ffree = filter_calc_free_inodes(obd);
3911 CERROR("%s: free inode "LPU64"\n",
3912 obd->obd_name, os_ffree);
3917 if (dchild->d_inode)
3918 CDEBUG(D_INFO, "objid "LPU64" got inum %lu\n", next_id,
3919 dchild->d_inode->i_ino);
3922 if (!recreate_obj) {
3923 filter_set_last_id(filter, next_id, group);
3924 err = filter_update_last_objid(obd, group, 0);
3926 CERROR("unable to write lastobjid "
3927 "but file created\n");
3931 switch(cleanup_phase) {
3933 err = fsfilt_commit(obd, dparent->d_inode, handle, 0);
3935 CERROR("error on commit, err = %d\n", err);
3942 filter_parent_unlock(dparent);
3949 if (cfs_time_after(jiffies, enough_time)) {
3952 "%s: precreate slow - want %d got %d \n",
3953 obd->obd_name, *num, i);
3960 "%s: created %d objects for group "POSTID" rc %d\n",
3961 obd->obd_name, i, filter->fo_last_objids[group], group, rc);
3966 int filter_create(struct obd_export *exp, struct obdo *oa,
3967 struct lov_stripe_md **ea, struct obd_trans_info *oti)
3969 struct obd_device *obd = exp->exp_obd;
3970 struct filter_export_data *fed;
3971 struct filter_obd *filter;
3972 struct lvfs_run_ctxt saved;
3973 struct lov_stripe_md *lsm = NULL;
3977 CDEBUG(D_INODE, "%s: filter_create(group="LPU64",id="
3978 LPU64")\n", obd->obd_name, oa->o_seq, oa->o_id);
3980 fed = &exp->exp_filter_data;
3981 filter = &obd->u.filter;
3983 if (fed->fed_group != oa->o_seq) {
3984 CERROR("%s: this export (nid %s) used object group %d "
3985 "earlier; now it's trying to use group "LPU64"!"
3986 " This could be a bug in the MDS. Please report to "
3987 "http://bugzilla.lustre.org/\n", obd->obd_name,
3988 obd_export_nid2str(exp), fed->fed_group, oa->o_seq);
3995 rc = obd_alloc_memmd(exp, &lsm);
4002 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4004 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
4005 (oa->o_flags & OBD_FL_RECREATE_OBJS)) {
4006 if (!obd->obd_recovering ||
4007 oa->o_id > filter_last_id(filter, oa->o_seq)) {
4008 CERROR("recreate objid "LPU64" > last id "LPU64"\n",
4009 oa->o_id, filter_last_id(filter, oa->o_seq));
4013 cfs_down(&filter->fo_create_locks[oa->o_seq]);
4014 rc = filter_precreate(obd, oa, oa->o_seq, &diff);
4015 cfs_up(&filter->fo_create_locks[oa->o_seq]);
4018 rc = filter_handle_precreate(exp, oa, oa->o_seq, oti);
4021 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4022 if (rc && ea != NULL && *ea != lsm) {
4023 obd_free_memmd(exp, &lsm);
4024 } else if (rc == 0 && ea != NULL) {
4025 /* XXX LOV STACKING: the lsm that is passed to us from
4026 * LOV does not have valid lsm_oinfo data structs, so
4027 * don't go touching that. This needs to be fixed in a
4029 lsm->lsm_object_id = oa->o_id;
4036 int filter_destroy(struct obd_export *exp, struct obdo *oa,
4037 struct lov_stripe_md *md, struct obd_trans_info *oti,
4038 struct obd_export *md_exp, void *capa)
4040 unsigned int qcids[MAXQUOTAS] = {0, 0};
4041 struct obd_device *obd;
4042 struct filter_obd *filter;
4043 struct dentry *dchild = NULL, *dparent = NULL;
4044 struct lustre_handle lockh = { 0 };
4045 struct lvfs_run_ctxt saved;
4046 void *handle = NULL;
4047 struct llog_cookie *fcc = NULL;
4048 int rc, rc2, cleanup_phase = 0, sync = 0;
4053 rc = filter_auth_capa(exp, NULL, oa->o_seq,
4054 (struct lustre_capa *)capa, CAPA_OPC_OSS_DESTROY);
4059 filter = &obd->u.filter;
4061 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4064 CDEBUG(D_INODE, "%s: filter_destroy(group="LPU64",oid="
4065 LPU64")\n", obd->obd_name, oa->o_seq, oa->o_id);
4067 dchild = filter_fid2dentry(obd, NULL, oa->o_seq, oa->o_id);
4069 GOTO(cleanup, rc = PTR_ERR(dchild));
4072 if (dchild->d_inode == NULL) {
4073 CDEBUG(D_INODE, "destroying non-existent object "POSTID"\n",
4074 oa->o_id, oa->o_seq);
4075 /* If object already gone, cancel cookie right now */
4076 if (oa->o_valid & OBD_MD_FLCOOKIE) {
4077 struct llog_ctxt *ctxt;
4078 struct obd_llog_group *olg;
4080 olg = filter_find_olg(obd, oa->o_seq);
4082 CERROR(" %s: can not find olg of group %d\n",
4083 obd->obd_name, (int)oa->o_seq);
4084 GOTO(cleanup, rc = PTR_ERR(olg));
4086 fcc = &oa->o_lcookie;
4087 ctxt = llog_group_get_ctxt(olg, fcc->lgc_subsys + 1);
4088 llog_cancel(ctxt, NULL, 1, fcc, 0);
4089 llog_ctxt_put(ctxt);
4090 fcc = NULL; /* we didn't allocate fcc, don't free it */
4092 GOTO(cleanup, rc = -ENOENT);
4095 rc = filter_prepare_destroy(obd, oa->o_id, oa->o_seq, &lockh);
4099 /* Our MDC connection is established by the MDS to us */
4100 if (oa->o_valid & OBD_MD_FLCOOKIE) {
4101 OBD_ALLOC(fcc, sizeof(*fcc));
4103 *fcc = oa->o_lcookie;
4105 ll_vfs_dq_init(dchild->d_inode);
4107 /* we're gonna truncate it first in order to avoid possible deadlock:
4109 * open trasaction open transaction
4110 * down(i_zombie) down(i_zombie)
4111 * restart transaction
4112 * (see BUG 4180) -bzzz
4114 * take i_alloc_sem too to prevent other threads from writing to the
4115 * file while we are truncating it. This can cause lock ordering issue
4116 * between page lock, i_mutex & starting new journal handle.
4117 * (see bug 20321) -johann
4120 down_write(&dchild->d_inode->i_alloc_sem);
4121 LOCK_INODE_MUTEX(dchild->d_inode);
4122 fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem and i_mutex");
4124 /* VBR: version recovery check */
4125 rc = filter_version_get_check(exp, oti, dchild->d_inode);
4127 UNLOCK_INODE_MUTEX(dchild->d_inode);
4128 up_write(&dchild->d_inode->i_alloc_sem);
4132 handle = fsfilt_start_log(obd, dchild->d_inode, FSFILT_OP_SETATTR,
4134 if (IS_ERR(handle)) {
4135 UNLOCK_INODE_MUTEX(dchild->d_inode);
4136 up_write(&dchild->d_inode->i_alloc_sem);
4137 GOTO(cleanup, rc = PTR_ERR(handle));
4140 iattr.ia_valid = ATTR_SIZE;
4142 rc = fsfilt_setattr(obd, dchild, handle, &iattr, 1);
4143 rc2 = fsfilt_commit(obd, dchild->d_inode, handle, 0);
4144 UNLOCK_INODE_MUTEX(dchild->d_inode);
4145 up_write(&dchild->d_inode->i_alloc_sem);
4149 GOTO(cleanup, rc = rc2);
4151 /* We don't actually need to lock the parent until we are unlinking
4152 * here, and not while truncating above. That avoids holding the
4153 * parent lock for a long time during truncate, which can block other
4154 * threads from doing anything to objects in that directory. bug 7171 */
4155 dparent = filter_parent_lock(obd, oa->o_seq, oa->o_id);
4156 if (IS_ERR(dparent))
4157 GOTO(cleanup, rc = PTR_ERR(dparent));
4158 cleanup_phase = 3; /* filter_parent_unlock */
4160 LOCK_INODE_MUTEX(dchild->d_inode);
4161 handle = fsfilt_start_log(obd, dparent->d_inode,FSFILT_OP_UNLINK,oti,1);
4162 if (IS_ERR(handle)) {
4163 UNLOCK_INODE_MUTEX(dchild->d_inode);
4164 GOTO(cleanup, rc = PTR_ERR(handle));
4166 cleanup_phase = 4; /* fsfilt_commit */
4168 /* Quota release need uid/gid of inode */
4169 obdo_from_inode(oa, dchild->d_inode, NULL, OBD_MD_FLUID|OBD_MD_FLGID);
4171 filter_fmd_drop(exp, oa->o_id, oa->o_seq);
4173 /* this drops dchild->d_inode->i_mutex unconditionally */
4174 rc = filter_destroy_internal(obd, oa->o_id, oa->o_seq, dparent, dchild);
4178 switch(cleanup_phase) {
4181 sync = fsfilt_add_journal_cb(obd, 0, oti ?
4182 oti->oti_handle : handle,
4183 filter_cancel_cookies_cb,
4185 /* If add_journal_cb failed, then filter_finish_transno
4186 * will commit the handle and we will do a sync
4187 * on commit. then we call callback directly to free
4190 rc = filter_finish_transno(exp, NULL, oti, rc, sync);
4192 filter_cancel_cookies_cb(obd, 0, fcc, rc);
4195 rc2 = fsfilt_commit(obd, dparent->d_inode, handle, 0);
4197 CERROR("error on commit, err = %d\n", rc2);
4204 filter_parent_unlock(dparent);
4206 filter_fini_destroy(obd, &lockh);
4210 OBD_FREE(fcc, sizeof(*fcc));
4212 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4215 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
4219 /* trigger quota release */
4220 qcids[USRQUOTA] = oa->o_uid;
4221 qcids[GRPQUOTA] = oa->o_gid;
4222 rc2 = lquota_adjust(filter_quota_interface_ref, obd, qcids, NULL, rc,
4225 CERROR("filter adjust qunit! (rc:%d)\n", rc2);
4229 /* NB start and end are used for punch, but not truncate */
4230 static int filter_truncate(struct obd_export *exp, struct obd_info *oinfo,
4231 struct obd_trans_info *oti,
4232 struct ptlrpc_request_set *rqset)
4237 if (oinfo->oi_policy.l_extent.end != OBD_OBJECT_EOF) {
4238 CERROR("PUNCH not supported, only truncate: end = "LPX64"\n",
4239 oinfo->oi_policy.l_extent.end);
4243 CDEBUG(D_INODE, "calling truncate for object "LPU64", valid = "LPX64
4244 ", o_size = "LPD64"\n", oinfo->oi_oa->o_id,oinfo->oi_oa->o_valid,
4245 oinfo->oi_policy.l_extent.start);
4247 oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start;
4248 oinfo->oi_oa->o_valid |= OBD_FL_TRUNC;
4249 rc = filter_setattr(exp, oinfo, oti);
4250 oinfo->oi_oa->o_valid &= ~OBD_FL_TRUNC;
4254 static int filter_sync(struct obd_export *exp, struct obd_info *oinfo,
4255 obd_off start, obd_off end,
4256 struct ptlrpc_request_set *set)
4258 struct lvfs_run_ctxt saved;
4259 struct obd_device_target *obt;
4260 struct dentry *dentry;
4264 rc = filter_auth_capa(exp, NULL, oinfo->oi_oa->o_seq,
4265 (struct lustre_capa *)oinfo->oi_capa,
4266 CAPA_OPC_OSS_WRITE);
4270 obt = &exp->exp_obd->u.obt;
4272 /* An objid of zero is taken to mean "sync whole filesystem" */
4273 if (!oinfo->oi_oa || !(oinfo->oi_oa->o_valid & OBD_MD_FLID)) {
4274 rc = fsfilt_sync(exp->exp_obd, obt->obt_sb);
4275 /* Flush any remaining cancel messages out to the target */
4276 filter_sync_llogs(exp->exp_obd, exp);
4280 dentry = filter_oa2dentry(exp->exp_obd, &oinfo->oi_oa->o_oi);
4282 RETURN(PTR_ERR(dentry));
4284 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
4286 LOCK_INODE_MUTEX(dentry->d_inode);
4288 rc = filemap_fdatawrite(dentry->d_inode->i_mapping);
4290 /* just any file to grab fsync method - "file" arg unused */
4291 struct file *file = obt->obt_rcvd_filp;
4293 if (file->f_op && file->f_op->fsync)
4294 rc = file->f_op->fsync(NULL, dentry, 1);
4296 rc2 = filemap_fdatawait(dentry->d_inode->i_mapping);
4300 UNLOCK_INODE_MUTEX(dentry->d_inode);
4302 oinfo->oi_oa->o_valid = OBD_MD_FLID;
4303 obdo_from_inode(oinfo->oi_oa, dentry->d_inode, NULL,
4304 FILTER_VALID_FLAGS);
4306 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
4312 static int filter_get_info(struct obd_export *exp, __u32 keylen,
4313 void *key, __u32 *vallen, void *val,
4314 struct lov_stripe_md *lsm)
4316 struct obd_device *obd;
4319 obd = class_exp2obd(exp);
4321 CDEBUG(D_IOCTL, "invalid client export %p\n", exp);
4325 if (KEY_IS(KEY_BLOCKSIZE)) {
4326 __u32 *blocksize = val;
4328 if (*vallen < sizeof(*blocksize))
4330 *blocksize = obd->u.obt.obt_sb->s_blocksize;
4332 *vallen = sizeof(*blocksize);
4336 if (KEY_IS(KEY_BLOCKSIZE_BITS)) {
4337 __u32 *blocksize_bits = val;
4338 if (blocksize_bits) {
4339 if (*vallen < sizeof(*blocksize_bits))
4341 *blocksize_bits = obd->u.obt.obt_sb->s_blocksize_bits;
4343 *vallen = sizeof(*blocksize_bits);
4347 if (KEY_IS(KEY_LAST_ID)) {
4348 obd_id *last_id = val;
4349 /* FIXME: object groups */
4351 if (*vallen < sizeof(*last_id))
4353 *last_id = filter_last_id(&obd->u.filter,
4354 exp->exp_filter_data.fed_group);
4356 *vallen = sizeof(*last_id);
4360 if (KEY_IS(KEY_FIEMAP)) {
4361 struct ll_fiemap_info_key *fm_key = key;
4362 struct dentry *dentry;
4363 struct ll_user_fiemap *fiemap = val;
4364 struct lvfs_run_ctxt saved;
4367 if (fiemap == NULL) {
4368 *vallen = fiemap_count_to_size(
4369 fm_key->fiemap.fm_extent_count);
4373 dentry = __filter_oa2dentry(exp->exp_obd, &fm_key->oa.o_oi,
4376 RETURN(PTR_ERR(dentry));
4378 memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
4379 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4380 rc = fsfilt_iocontrol(obd, dentry, FSFILT_IOC_FIEMAP,
4382 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
4388 if (KEY_IS(KEY_SYNC_LOCK_CANCEL)) {
4389 *((__u32 *) val) = obd->u.filter.fo_sync_lock_cancel;
4390 *vallen = sizeof(__u32);
4394 CDEBUG(D_IOCTL, "invalid key\n");
4398 static inline int filter_setup_llog_group(struct obd_export *exp,
4399 struct obd_device *obd,
4402 struct obd_llog_group *olg;
4403 struct llog_ctxt *ctxt;
4406 olg = filter_find_create_olg(obd, group);
4408 RETURN(PTR_ERR(olg));
4410 llog_group_set_export(olg, exp);
4412 ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
4413 LASSERTF(ctxt != NULL, "ctxt is null\n");
4415 rc = llog_receptor_accept(ctxt, exp->exp_imp_reverse);
4416 llog_ctxt_put(ctxt);
4420 static int filter_set_grant_shrink(struct obd_export *exp,
4421 struct ost_body *body)
4423 /* handle shrink grant */
4424 cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
4425 filter_grant_incoming(exp, &body->oa);
4426 cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
4432 static int filter_set_mds_conn(struct obd_export *exp, void *val)
4434 struct obd_device *obd;
4440 CDEBUG(D_IOCTL, "invalid export %p\n", exp);
4444 LCONSOLE_WARN("%s: received MDS connection from %s\n", obd->obd_name,
4445 obd_export_nid2str(exp));
4446 obd->u.filter.fo_mdc_conn.cookie = exp->exp_handle.h_cookie;
4448 /* setup llog imports */
4450 group = (int)(*(__u32 *)val);
4452 group = 0; /* default value */
4454 LASSERT_SEQ_IS_MDT(group);
4455 rc = filter_setup_llog_group(exp, obd, group);
4459 if (group == FID_SEQ_OST_MDT0) {
4460 /* setup llog group 1 for interop */
4461 filter_setup_llog_group(exp, obd, FID_SEQ_LLOG);
4464 lquota_setinfo(filter_quota_interface_ref, obd, exp);
4469 static int filter_set_info_async(struct obd_export *exp, __u32 keylen,
4470 void *key, __u32 vallen, void *val,
4471 struct ptlrpc_request_set *set)
4473 struct obd_device *obd;
4478 CDEBUG(D_IOCTL, "invalid export %p\n", exp);
4482 if (KEY_IS(KEY_CAPA_KEY)) {
4484 rc = filter_update_capa_key(obd, (struct lustre_capa_key *)val);
4486 CERROR("filter update capability key failed: %d\n", rc);
4490 if (KEY_IS(KEY_REVIMP_UPD)) {
4491 filter_revimp_update(exp);
4492 lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
4496 if (KEY_IS(KEY_SPTLRPC_CONF)) {
4497 filter_adapt_sptlrpc_conf(obd, 0);
4501 if (KEY_IS(KEY_MDS_CONN))
4502 RETURN(filter_set_mds_conn(exp, val));
4504 if (KEY_IS(KEY_GRANT_SHRINK))
4505 RETURN(filter_set_grant_shrink(exp, val));
4510 int filter_iocontrol(unsigned int cmd, struct obd_export *exp,
4511 int len, void *karg, void *uarg)
4513 struct obd_device *obd = exp->exp_obd;
4514 struct obd_ioctl_data *data = karg;
4518 case OBD_IOC_ABORT_RECOVERY: {
4519 LCONSOLE_WARN("%s: Aborting recovery.\n", obd->obd_name);
4520 target_stop_recovery_thread(obd);
4524 case OBD_IOC_SYNC: {
4525 CDEBUG(D_RPCTRACE, "syncing ost %s\n", obd->obd_name);
4526 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
4530 case OBD_IOC_SET_READONLY: {
4532 struct super_block *sb = obd->u.obt.obt_sb;
4533 struct inode *inode = sb->s_root->d_inode;
4534 BDEVNAME_DECLARE_STORAGE(tmp);
4535 CERROR("*** setting device %s read-only ***\n",
4536 ll_bdevname(sb, tmp));
4538 handle = fsfilt_start(obd, inode, FSFILT_OP_MKNOD, NULL);
4539 if (!IS_ERR(handle))
4540 rc = fsfilt_commit(obd, inode, handle, 1);
4542 CDEBUG(D_HA, "syncing ost %s\n", obd->obd_name);
4543 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
4545 lvfs_set_rdonly(obd, obd->u.obt.obt_sb);
4549 case OBD_IOC_CATLOGLIST: {
4550 rc = llog_catalog_list(obd, 1, data);
4554 case OBD_IOC_LLOG_CANCEL:
4555 case OBD_IOC_LLOG_REMOVE:
4556 case OBD_IOC_LLOG_INFO:
4557 case OBD_IOC_LLOG_PRINT: {
4558 /* FIXME to be finished */
4559 RETURN(-EOPNOTSUPP);
4561 struct llog_ctxt *ctxt = NULL;
4563 push_ctxt(&saved, &ctxt->loc_exp->exp_obd->obd_lvfs_ctxt, NULL);
4564 rc = llog_ioctl(ctxt, cmd, data);
4565 pop_ctxt(&saved, &ctxt->loc_exp->exp_obd->obd_lvfs_ctxt, NULL);
4578 static int filter_health_check(struct obd_device *obd)
4580 #ifdef USE_HEALTH_CHECK_WRITE
4581 struct filter_obd *filter = &obd->u.filter;
4586 * health_check to return 0 on healthy
4587 * and 1 on unhealthy.
4589 if (obd->u.obt.obt_sb->s_flags & MS_RDONLY)
4592 #ifdef USE_HEALTH_CHECK_WRITE
4593 LASSERT(filter->fo_obt.obt_health_check_filp != NULL);
4594 rc |= !!lvfs_check_io_health(obd, filter->fo_obt.obt_health_check_filp);
4599 static struct dentry *filter_lvfs_fid2dentry(__u64 id, __u32 gen, __u64 gr,
4602 return filter_fid2dentry(data, NULL, gr, id);
4605 static int filter_process_config(struct obd_device *obd, obd_count len,
4608 struct lustre_cfg *lcfg = buf;
4609 struct lprocfs_static_vars lvars;
4612 switch (lcfg->lcfg_command) {
4614 lprocfs_filter_init_vars(&lvars);
4616 rc = class_process_proc_param(PARAM_OST, lvars.obd_vars,
4626 static struct lvfs_callback_ops filter_lvfs_ops = {
4627 l_fid2dentry: filter_lvfs_fid2dentry,
4630 static struct obd_ops filter_obd_ops = {
4631 .o_owner = THIS_MODULE,
4632 .o_get_info = filter_get_info,
4633 .o_set_info_async = filter_set_info_async,
4634 .o_setup = filter_setup,
4635 .o_precleanup = filter_precleanup,
4636 .o_cleanup = filter_cleanup,
4637 .o_connect = filter_connect,
4638 .o_reconnect = filter_reconnect,
4639 .o_disconnect = filter_disconnect,
4640 .o_ping = filter_ping,
4641 .o_init_export = filter_init_export,
4642 .o_destroy_export = filter_destroy_export,
4643 .o_statfs = filter_statfs,
4644 .o_getattr = filter_getattr,
4645 .o_unpackmd = filter_unpackmd,
4646 .o_create = filter_create,
4647 .o_setattr = filter_setattr,
4648 .o_destroy = filter_destroy,
4649 .o_brw = filter_brw,
4650 .o_punch = filter_truncate,
4651 .o_sync = filter_sync,
4652 .o_preprw = filter_preprw,
4653 .o_commitrw = filter_commitrw,
4654 .o_llog_init = filter_llog_init,
4655 .o_llog_connect = filter_llog_connect,
4656 .o_llog_finish = filter_llog_finish,
4657 .o_iocontrol = filter_iocontrol,
4658 .o_health_check = filter_health_check,
4659 .o_process_config = filter_process_config,
4662 quota_interface_t *filter_quota_interface_ref;
4663 extern quota_interface_t filter_quota_interface;
4665 static int __init obdfilter_init(void)
4667 struct lprocfs_static_vars lvars;
4670 /** sanity check for group<->mdsno conversion */
4671 for (i = 0; i < MAX_MDT_COUNT; i++)
4672 LASSERT(objseq_to_mdsno(mdt_to_obd_objseq(i)) == i);
4674 lprocfs_filter_init_vars(&lvars);
4676 cfs_request_module("%s", "lquota");
4677 OBD_ALLOC(obdfilter_created_scratchpad,
4678 OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
4679 sizeof(*obdfilter_created_scratchpad));
4680 if (obdfilter_created_scratchpad == NULL)
4683 ll_fmd_cachep = cfs_mem_cache_create("ll_fmd_cache",
4684 sizeof(struct filter_mod_data),
4687 GOTO(out, rc = -ENOMEM);
4689 filter_quota_interface_ref = PORTAL_SYMBOL_GET(filter_quota_interface);
4690 init_obd_quota_ops(filter_quota_interface_ref, &filter_obd_ops);
4692 rc = class_register_type(&filter_obd_ops, NULL, lvars.module_vars,
4693 LUSTRE_OST_NAME, NULL);
4697 err = cfs_mem_cache_destroy(ll_fmd_cachep);
4698 LASSERTF(err == 0, "Cannot destroy ll_fmd_cachep: rc %d\n",err);
4699 ll_fmd_cachep = NULL;
4701 if (filter_quota_interface_ref)
4702 PORTAL_SYMBOL_PUT(filter_quota_interface);
4704 OBD_FREE(obdfilter_created_scratchpad,
4705 OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
4706 sizeof(*obdfilter_created_scratchpad));
4712 static void __exit obdfilter_exit(void)
4714 if (filter_quota_interface_ref)
4715 PORTAL_SYMBOL_PUT(filter_quota_interface);
4717 if (ll_fmd_cachep) {
4718 int rc = cfs_mem_cache_destroy(ll_fmd_cachep);
4719 LASSERTF(rc == 0, "Cannot destroy ll_fmd_cachep: rc %d\n", rc);
4720 ll_fmd_cachep = NULL;
4723 class_unregister_type(LUSTRE_OST_NAME);
4724 OBD_FREE(obdfilter_created_scratchpad,
4725 OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
4726 sizeof(*obdfilter_created_scratchpad));
4729 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4730 MODULE_DESCRIPTION("Lustre Filtering OBD driver");
4731 MODULE_LICENSE("GPL");
4733 module_init(obdfilter_init);
4734 module_exit(obdfilter_exit);