/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * linux/fs/obdfilter/filter.c
+ * GPL HEADER START
*
- * Copyright (c) 2001-2003 Cluster File Systems, Inc.
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Andreas Dilger <adilger@clusterfs.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * This file is part of Lustre, http://www.lustre.org.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdfilter/filter.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Andreas Dilger <adilger@clusterfs.com>
*/
/*
#include <linux/init.h>
#include <linux/version.h>
#include <linux/sched.h>
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
-# include <linux/mount.h>
-# include <linux/buffer_head.h>
-#endif
+#include <linux/mount.h>
+#include <linux/buffer_head.h>
+#include <obd_cksum.h>
#include <obd_class.h>
#include <obd_lov.h>
#include <lustre_dlm.h>
#include <lustre_fsfilt.h>
#include <lprocfs_status.h>
#include <lustre_log.h>
-#include <lustre_commit_confd.h>
#include <libcfs/list.h>
#include <lustre_disk.h>
#include <lustre_quota.h>
#include <linux/slab.h>
#include <lustre_param.h>
+#include <lustre/ll_fiemap.h>
#include "filter_internal.h"
/* Group 0 is no longer a legal group, to catch uninitialized IDs */
-#define FILTER_MIN_GROUPS 3
+#define FILTER_MIN_GROUPS FILTER_GROUP_MDS1_N_BASE
static struct lvfs_callback_ops filter_lvfs_ops;
cfs_mem_cache_t *ll_fmd_cachep;
static void filter_commit_cb(struct obd_device *obd, __u64 transno,
void *cb_data, int error)
{
- obd_transno_commit_cb(obd, transno, error);
+ struct obd_export *exp = cb_data;
+ LASSERT(exp->exp_obd == obd);
+ obd_transno_commit_cb(obd, transno, exp, error);
+ atomic_dec(&exp->exp_cb_count);
+ class_export_put(exp);
+}
+
+int filter_version_get_check(struct obd_export *exp,
+ struct obd_trans_info *oti, struct inode *inode)
+{
+ __u64 curr_version;
+
+ if (inode == NULL || oti == NULL)
+ RETURN(0);
+
+ curr_version = fsfilt_get_version(exp->exp_obd, inode);
+ if ((__s64)curr_version == -EOPNOTSUPP)
+ RETURN(0);
+ /* VBR: version is checked always because costs nothing */
+ if (oti->oti_pre_version != 0 &&
+ oti->oti_pre_version != curr_version) {
+ CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
+ oti->oti_pre_version, curr_version);
+ spin_lock(&exp->exp_lock);
+ exp->exp_vbr_failed = 1;
+ spin_unlock(&exp->exp_lock);
+ RETURN (-EOVERFLOW);
+ }
+ oti->oti_pre_version = curr_version;
+ RETURN(0);
}
/* Assumes caller has already pushed us into the kernel context. */
-int filter_finish_transno(struct obd_export *exp, struct obd_trans_info *oti,
- int rc, int force_sync)
+int filter_finish_transno(struct obd_export *exp, struct inode *inode,
+ struct obd_trans_info *oti, int rc, int force_sync)
{
struct filter_obd *filter = &exp->exp_obd->u.filter;
struct filter_export_data *fed = &exp->exp_filter_data;
- struct filter_client_data *fcd = fed->fed_fcd;
+ struct lsd_client_data *lcd = fed->fed_lcd;
__u64 last_rcvd;
loff_t off;
- int err, log_pri = D_HA;
+ int err, log_pri = D_RPCTRACE;
/* Propagate error code. */
if (rc)
RETURN(rc);
/* we don't allocate new transnos for replayed requests */
+ spin_lock(&filter->fo_translock);
if (oti->oti_transno == 0) {
- spin_lock(&filter->fo_translock);
last_rcvd = le64_to_cpu(filter->fo_fsd->lsd_last_transno) + 1;
filter->fo_fsd->lsd_last_transno = cpu_to_le64(last_rcvd);
- spin_unlock(&filter->fo_translock);
- oti->oti_transno = last_rcvd;
} else {
- spin_lock(&filter->fo_translock);
last_rcvd = oti->oti_transno;
if (last_rcvd > le64_to_cpu(filter->fo_fsd->lsd_last_transno))
filter->fo_fsd->lsd_last_transno =
cpu_to_le64(last_rcvd);
+ }
+ oti->oti_transno = last_rcvd;
+ if (last_rcvd <= le64_to_cpu(lcd->lcd_last_transno)) {
spin_unlock(&filter->fo_translock);
+ LBUG();
}
- fcd->fcd_last_rcvd = cpu_to_le64(last_rcvd);
+ lcd->lcd_last_transno = cpu_to_le64(last_rcvd);
+ lcd->lcd_pre_versions[0] = cpu_to_le64(oti->oti_pre_version);
+ lcd->lcd_last_xid = cpu_to_le64(oti->oti_xid);
+ spin_unlock(&filter->fo_translock);
- /* could get xid from oti, if it's ever needed */
- fcd->fcd_last_xid = 0;
+ if (inode)
+ fsfilt_set_version(exp->exp_obd, inode, last_rcvd);
off = fed->fed_lr_off;
if (off <= 0) {
fed->fed_lr_idx, fed->fed_lr_off);
err = -EINVAL;
} else {
+ class_export_get(exp); /* released when the cb is called */
+ atomic_inc(&exp->exp_cb_count);
if (!force_sync)
- force_sync = fsfilt_add_journal_cb(exp->exp_obd,
+ force_sync = fsfilt_add_journal_cb(exp->exp_obd,
last_rcvd,
oti->oti_handle,
filter_commit_cb,
- NULL);
+ exp);
err = fsfilt_write_record(exp->exp_obd, filter->fo_rcvd_filp,
- fcd, sizeof(*fcd), &off,
+ lcd, sizeof(*lcd), &off,
force_sync | exp->exp_need_sync);
if (force_sync)
- filter_commit_cb(exp->exp_obd, last_rcvd, NULL, err);
+ filter_commit_cb(exp->exp_obd, last_rcvd, exp, err);
}
if (err) {
log_pri = D_ERROR;
}
CDEBUG(log_pri, "wrote trans "LPU64" for client %s at #%d: err = %d\n",
- last_rcvd, fcd->fcd_uuid, fed->fed_lr_idx, err);
+ last_rcvd, lcd->lcd_uuid, fed->fed_lr_idx, err);
RETURN(rc);
}
spin_lock_init(&brw_stats->hist[i].oh_lock);
}
+static int lprocfs_init_rw_stats(struct obd_device *obd,
+ struct lprocfs_stats **stats)
+{
+ int num_stats;
+
+ num_stats = (sizeof(*obd->obd_type->typ_dt_ops) / sizeof(void *)) +
+ LPROC_FILTER_LAST - 1;
+ *stats = lprocfs_alloc_stats(num_stats, LPROCFS_STATS_FLAG_NOPERCPU);
+ if (*stats == NULL)
+ return -ENOMEM;
+
+ lprocfs_init_ops_stats(LPROC_FILTER_LAST, *stats);
+ lprocfs_counter_init(*stats, LPROC_FILTER_READ_BYTES,
+ LPROCFS_CNTR_AVGMINMAX, "read_bytes", "bytes");
+ lprocfs_counter_init(*stats, LPROC_FILTER_WRITE_BYTES,
+ LPROCFS_CNTR_AVGMINMAX, "write_bytes", "bytes");
+
+ return(0);
+}
+
/* brw_stats are 2128, ops are 3916, ldlm are 204, so 6248 bytes per client,
plus the procfs overhead :( */
static int filter_export_stats_init(struct obd_device *obd,
- struct obd_export *exp)
+ struct obd_export *exp,
+ void *client_nid)
{
- struct filter_export_data *fed = &exp->exp_filter_data;
- struct proc_dir_entry *brw_entry;
- int rc, num_stats;
+ int rc, newnid = 0;
ENTRY;
- init_brw_stats(&fed->fed_brw_stats);
-
if (obd_uuid_equals(&exp->exp_client_uuid, &obd->obd_uuid))
/* Self-export gets no proc entry */
RETURN(0);
- rc = lprocfs_exp_setup(exp);
- if (rc)
+ rc = lprocfs_exp_setup(exp, client_nid, &newnid);
+ if (rc) {
+ /* Mask error for already created
+ * /proc entries */
+ if (rc == -EALREADY)
+ rc = 0;
RETURN(rc);
+ }
- /* Create a per export proc entry for brw_stats */
- brw_entry = create_proc_entry("brw_stats", 0644, exp->exp_proc);
- if (brw_entry == NULL)
- RETURN(-ENOMEM);
- brw_entry->proc_fops = &filter_per_export_stats_fops;
- brw_entry->data = fed;
+ if (newnid) {
+ struct nid_stat *tmp = exp->exp_nid_stats;
+ LASSERT(tmp != NULL);
+
+ OBD_ALLOC(tmp->nid_brw_stats, sizeof(struct brw_stats));
+ if (tmp->nid_brw_stats == NULL)
+ RETURN(-ENOMEM);
+
+ init_brw_stats(tmp->nid_brw_stats);
+ rc = lprocfs_seq_create(exp->exp_nid_stats->nid_proc, "brw_stats",
+ 0644, &filter_per_nid_stats_fops,
+ exp->exp_nid_stats);
+ if (rc)
+ CWARN("Error adding the brw_stats file\n");
+
+ rc = lprocfs_init_rw_stats(obd, &exp->exp_nid_stats->nid_stats);
+ if (rc)
+ RETURN(rc);
+
+ rc = lprocfs_register_stats(tmp->nid_proc, "stats",
+ tmp->nid_stats);
+ if (rc)
+ RETURN(rc);
+ /* Always add in ldlm_stats */
+ tmp->nid_ldlm_stats =
+ lprocfs_alloc_stats(LDLM_LAST_OPC - LDLM_FIRST_OPC,
+ LPROCFS_STATS_FLAG_NOPERCPU);
+ if (tmp->nid_ldlm_stats == NULL)
+ return -ENOMEM;
+
+ lprocfs_init_ldlm_stats(tmp->nid_ldlm_stats);
+ rc = lprocfs_register_stats(tmp->nid_proc, "ldlm_stats",
+ tmp->nid_ldlm_stats);
+ if (rc)
+ RETURN(rc);
+ }
- /* Create a per export proc entry for ops stats */
- num_stats = (sizeof(*obd->obd_type->typ_dt_ops) / sizeof(void *)) +
- LPROC_FILTER_LAST - 1;
- exp->exp_ops_stats = lprocfs_alloc_stats(num_stats,
- LPROCFS_STATS_FLAG_NOPERCPU);
- if (exp->exp_ops_stats == NULL)
- RETURN(-ENOMEM);
- lprocfs_init_ops_stats(LPROC_FILTER_LAST, exp->exp_ops_stats);
- lprocfs_counter_init(exp->exp_ops_stats, LPROC_FILTER_READ_BYTES,
- LPROCFS_CNTR_AVGMINMAX, "read_bytes", "bytes");
- lprocfs_counter_init(exp->exp_ops_stats, LPROC_FILTER_WRITE_BYTES,
- LPROCFS_CNTR_AVGMINMAX, "write_bytes", "bytes");
- lprocfs_register_stats(exp->exp_proc, "stats", exp->exp_ops_stats);
RETURN(0);
}
struct filter_export_data *fed = &exp->exp_filter_data;
unsigned long *bitmap = filter->fo_last_rcvd_slots;
int new_client = (cl_idx == -1);
-
+
ENTRY;
LASSERT(bitmap != NULL);
LASSERTF(cl_idx > -2, "%d\n", cl_idx);
/* Self-export */
- if (strcmp(fed->fed_fcd->fcd_uuid, obd->obd_uuid.uuid) == 0)
+ if (strcmp(fed->fed_lcd->lcd_uuid, obd->obd_uuid.uuid) == 0)
RETURN(0);
/* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
LASSERTF(fed->fed_lr_off > 0, "fed_lr_off = %llu\n", fed->fed_lr_off);
CDEBUG(D_INFO, "client at index %d (%llu) with UUID '%s' added\n",
- fed->fed_lr_idx, fed->fed_lr_off, fed->fed_fcd->fcd_uuid);
+ fed->fed_lr_idx, fed->fed_lr_off, fed->fed_lcd->lcd_uuid);
if (new_client) {
struct lvfs_run_ctxt saved;
int rc;
void *handle;
- CDEBUG(D_INFO, "writing client fcd at idx %u (%llu) (len %u)\n",
- fed->fed_lr_idx,off,(unsigned int)sizeof(*fed->fed_fcd));
+ CDEBUG(D_INFO, "writing client lcd at idx %u (%llu) (len %u)\n",
+ fed->fed_lr_idx,off,(unsigned int)sizeof(*fed->fed_lcd));
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
/* Transaction needed to fix bug 1403 */
rc = PTR_ERR(handle);
CERROR("unable to start transaction: rc %d\n", rc);
} else {
+ fed->fed_lcd->lcd_last_epoch =
+ filter->fo_fsd->lsd_start_epoch;
+ exp->exp_last_request_time = cfs_time_current_sec();
rc = fsfilt_add_journal_cb(obd, 0, handle,
target_client_add_cb, exp);
if (rc == 0) {
spin_unlock(&exp->exp_lock);
}
rc = fsfilt_write_record(obd, filter->fo_rcvd_filp,
- fed->fed_fcd,
- sizeof(*fed->fed_fcd),
+ fed->fed_lcd,
+ sizeof(*fed->fed_lcd),
&off, rc /* sync if no cb */);
fsfilt_commit(obd,
filter->fo_rcvd_filp->f_dentry->d_inode,
RETURN(0);
}
+struct lsd_client_data zero_lcd; /* globals are implicitly zeroed */
+
static int filter_client_free(struct obd_export *exp)
{
struct filter_export_data *fed = &exp->exp_filter_data;
struct filter_obd *filter = &exp->exp_obd->u.filter;
struct obd_device *obd = exp->exp_obd;
- struct filter_client_data zero_fcd;
struct lvfs_run_ctxt saved;
int rc;
loff_t off;
ENTRY;
- if (fed->fed_fcd == NULL)
+ if (fed->fed_lcd == NULL)
RETURN(0);
- /* XXX if fcd_uuid were a real obd_uuid, I could use obd_uuid_equals */
- if (strcmp(fed->fed_fcd->fcd_uuid, obd->obd_uuid.uuid ) == 0)
+ /* XXX if lcd_uuid were a real obd_uuid, I could use obd_uuid_equals */
+ if (strcmp(fed->fed_lcd->lcd_uuid, obd->obd_uuid.uuid ) == 0)
GOTO(free, 0);
- CDEBUG(D_INFO, "freeing client at idx %u, offset %lld with UUID '%s'\n",
- fed->fed_lr_idx, fed->fed_lr_off, fed->fed_fcd->fcd_uuid);
-
LASSERT(filter->fo_last_rcvd_slots != NULL);
off = fed->fed_lr_off;
+ CDEBUG(D_INFO, "freeing client at idx %u, offset %lld with UUID '%s'\n",
+ fed->fed_lr_idx, fed->fed_lr_off, fed->fed_lcd->lcd_uuid);
+
/* Don't clear fed_lr_idx here as it is likely also unset. At worst
* we leak a client slot that will be cleaned on the next recovery. */
if (off <= 0) {
}
if (!(exp->exp_flags & OBD_OPT_FAILOVER)) {
- memset(&zero_fcd, 0, sizeof zero_fcd);
+ /* Don't force sync on disconnect if aborting recovery,
+ * or it does num_clients * num_osts. b=17194 */
+ int need_sync = exp->exp_need_sync &&
+ !(exp->exp_flags&OBD_OPT_ABORT_RECOV);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- rc = fsfilt_write_record(obd, filter->fo_rcvd_filp, &zero_fcd,
- sizeof(zero_fcd), &off,
- (!exp->exp_libclient ||
- exp->exp_need_sync));
+ rc = fsfilt_write_record(obd, filter->fo_rcvd_filp, &zero_lcd,
+ sizeof(zero_lcd), &off, 0);
+
+ /* Make sure the server's last_transno is up to date. Do this
+ * after the client is freed so we know all the client's
+ * transactions have been committed. */
if (rc == 0)
- /* update server's transno */
filter_update_server_data(obd, filter->fo_rcvd_filp,
- filter->fo_fsd,
- !exp->exp_libclient);
+ filter->fo_fsd, need_sync);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
CDEBUG(rc == 0 ? D_INFO : D_ERROR,
- "zeroing out client %s at idx %u (%llu) in %s rc %d\n",
- fed->fed_fcd->fcd_uuid, fed->fed_lr_idx, fed->fed_lr_off,
- LAST_RCVD, rc);
+ "zero out client %s at idx %u/%llu in %s %ssync rc %d\n",
+ fed->fed_lcd->lcd_uuid, fed->fed_lr_idx, fed->fed_lr_off,
+ LAST_RCVD, need_sync ? "" : "a", rc);
}
if (!test_and_clear_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
EXIT;
free:
- OBD_FREE(fed->fed_fcd, sizeof(*fed->fed_fcd));
- fed->fed_fcd = NULL;
+ OBD_FREE_PTR(fed->fed_lcd);
+ fed->fed_lcd = NULL;
return 0;
}
struct filter_export_data *fed = &exp->exp_filter_data;
struct filter_mod_data *found = NULL, *fmd_new = NULL;
- OBD_SLAB_ALLOC(fmd_new, ll_fmd_cachep, CFS_ALLOC_IO, sizeof(*fmd_new));
+ OBD_SLAB_ALLOC_PTR_GFP(fmd_new, ll_fmd_cachep, CFS_ALLOC_IO);
spin_lock(&fed->fed_lock);
found = filter_fmd_find_nolock(&exp->exp_obd->u.filter,fed,objid,group);
static int filter_init_export(struct obd_export *exp)
{
spin_lock_init(&exp->exp_filter_data.fed_lock);
- INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
-
+ CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
+
spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
spin_unlock(&exp->exp_lock);
- return 0;
+ return ldlm_init_export(exp);
}
static int filter_free_server_data(struct filter_obd *filter)
{
- OBD_FREE(filter->fo_fsd, sizeof(*filter->fo_fsd));
+ OBD_FREE_PTR(filter->fo_fsd);
filter->fo_fsd = NULL;
OBD_FREE(filter->fo_last_rcvd_slots, LR_MAX_CLIENTS / 8);
filter->fo_last_rcvd_slots = NULL;
CDEBUG(D_INODE, "server last_mount: "LPU64"\n",
le64_to_cpu(fsd->lsd_mount_count));
- fsd->lsd_compat14 = fsd->lsd_last_transno;
rc = fsfilt_write_record(obd, filp, fsd, sizeof(*fsd), &off, force_sync);
if (rc)
CERROR("error writing lr_server_data: rc = %d\n", rc);
{
struct filter_obd *filter = &obd->u.filter;
struct lr_server_data *fsd;
- struct filter_client_data *fcd = NULL;
+ struct lsd_client_data *lcd = NULL;
struct inode *inode = filp->f_dentry->d_inode;
unsigned long last_rcvd_size = i_size_read(inode);
__u64 mount_count;
+ __u32 start_epoch;
int cl_idx;
loff_t off = 0;
int rc;
/* ensure padding in the struct is the correct size */
CLASSERT (offsetof(struct lr_server_data, lsd_padding) +
sizeof(fsd->lsd_padding) == LR_SERVER_SIZE);
- CLASSERT (offsetof(struct filter_client_data, fcd_padding) +
- sizeof(fcd->fcd_padding) == LR_CLIENT_SIZE);
+ CLASSERT (offsetof(struct lsd_client_data, lcd_padding) +
+ sizeof(lcd->lcd_padding) == LR_CLIENT_SIZE);
OBD_ALLOC(fsd, sizeof(*fsd));
if (!fsd)
GOTO(err_fsd, rc = -EINVAL);
}
- CDEBUG(D_INODE, "%s: server last_transno : "LPU64"\n",
+ start_epoch = le32_to_cpu(fsd->lsd_start_epoch);
+
+ CDEBUG(D_INODE, "%s: server start_epoch : %#x\n",
+ obd->obd_name, start_epoch);
+ CDEBUG(D_INODE, "%s: server last_transno : "LPX64"\n",
obd->obd_name, le64_to_cpu(fsd->lsd_last_transno));
CDEBUG(D_INODE, "%s: server mount_count: "LPU64"\n",
obd->obd_name, mount_count + 1);
struct obd_export *exp;
struct filter_export_data *fed;
- if (!fcd) {
- OBD_ALLOC(fcd, sizeof(*fcd));
- if (!fcd)
+ if (!lcd) {
+ OBD_ALLOC_PTR(lcd);
+ if (!lcd)
GOTO(err_client, rc = -ENOMEM);
}
/* Don't assume off is incremented properly by
- * fsfilt_read_record(), in case sizeof(*fcd)
+ * fsfilt_read_record(), in case sizeof(*lcd)
* isn't the same as fsd->lsd_client_size. */
off = le32_to_cpu(fsd->lsd_client_start) +
cl_idx * le16_to_cpu(fsd->lsd_client_size);
- rc = fsfilt_read_record(obd, filp, fcd, sizeof(*fcd), &off);
+ rc = fsfilt_read_record(obd, filp, lcd, sizeof(*lcd), &off);
if (rc) {
CERROR("error reading FILT %s idx %d off %llu: rc %d\n",
LAST_RCVD, cl_idx, off, rc);
break; /* read error shouldn't cause startup to fail */
}
- if (fcd->fcd_uuid[0] == '\0') {
+ if (lcd->lcd_uuid[0] == '\0') {
CDEBUG(D_INFO, "skipping zeroed client at offset %d\n",
cl_idx);
continue;
}
- last_rcvd = le64_to_cpu(fcd->fcd_last_rcvd);
+ last_rcvd = le64_to_cpu(lcd->lcd_last_transno);
/* These exports are cleaned up by filter_disconnect(), so they
* need to be set up like real exports as filter_connect() does.
*/
- exp = class_new_export(obd, (struct obd_uuid *)fcd->fcd_uuid);
+ exp = class_new_export(obd, (struct obd_uuid *)lcd->lcd_uuid);
CDEBUG(D_HA, "RCVRNG CLIENT uuid: %s idx: %d lr: "LPU64
- " srv lr: "LPU64" fcd_group %d\n", fcd->fcd_uuid, cl_idx,
- last_rcvd, le64_to_cpu(fsd->lsd_last_transno),
- le32_to_cpu(fcd->fcd_group));
+ " srv lr: "LPU64"\n", lcd->lcd_uuid, cl_idx,
+ last_rcvd, le64_to_cpu(fsd->lsd_last_transno));
if (IS_ERR(exp)) {
if (PTR_ERR(exp) == -EALREADY) {
/* export already exists, zero out this one */
CERROR("Zeroing out duplicate export due to "
"bug 10479.\n");
- fcd->fcd_uuid[0] = '\0';
+ lcd->lcd_uuid[0] = '\0';
} else {
GOTO(err_client, rc = PTR_ERR(exp));
}
} else {
fed = &exp->exp_filter_data;
- fed->fed_fcd = fcd;
- fed->fed_group = le32_to_cpu(fcd->fcd_group);
- filter_export_stats_init(obd, exp);
+ fed->fed_lcd = lcd;
+ fed->fed_group = 0; /* will be assigned at connect */
+ filter_export_stats_init(obd, exp, NULL);
rc = filter_client_add(obd, exp, cl_idx);
/* can't fail for existing client */
LASSERTF(rc == 0, "rc = %d\n", rc);
- fcd = NULL;
+ /* VBR: set export last committed */
+ exp->exp_last_committed = last_rcvd;
spin_lock(&exp->exp_lock);
exp->exp_connecting = 0;
exp->exp_in_recovery = 0;
spin_unlock(&exp->exp_lock);
+ spin_lock_bh(&obd->obd_processing_task_lock);
obd->obd_max_recoverable_clients++;
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ lcd = NULL;
class_export_put(exp);
}
fsd->lsd_last_transno = cpu_to_le64(last_rcvd);
}
- if (fcd)
- OBD_FREE(fcd, sizeof(*fcd));
+ if (lcd)
+ OBD_FREE_PTR(lcd);
obd->obd_last_committed = le64_to_cpu(fsd->lsd_last_transno);
- target_recovery_init(obd, ost_handle);
+ target_recovery_init(&filter->fo_lut, ost_handle);
out:
filter->fo_mount_count = mount_count + 1;
CDEBUG(D_INODE, "error reading LAST_GROUP: rc %d\n",rc);
GOTO(cleanup, rc);
}
- LASSERT(off == 0 || last_group >= FILTER_MIN_GROUPS);
+ LASSERTF(off == 0 || CHECK_MDS_GROUP(last_group),
+ "off = %llu and last_group = %d\n", off, last_group);
+
CDEBUG(D_INODE, "%s: previous %d, new %d\n",
obd->obd_name, last_group, group);
RETURN(PTR_ERR(dentry));
}
} else {
- dentry = simple_mkdir(filter->fo_dentry_O, name, 0700, 1);
+ dentry = simple_mkdir(filter->fo_dentry_O, filter->fo_vfsmnt,
+ name, 0700, 1);
if (IS_ERR(dentry)) {
CERROR("cannot lookup/create O/%s: rc = %ld\n", name,
PTR_ERR(dentry));
char dir[20];
snprintf(dir, sizeof(dir), "d%u", i);
- tmp_subdirs->dentry[i] = simple_mkdir(dentry, dir, 0700, 1);
+ tmp_subdirs->dentry[i] = simple_mkdir(dentry,
+ filter->fo_vfsmnt,
+ dir, 0700, 1);
if (IS_ERR(tmp_subdirs->dentry[i])) {
rc = PTR_ERR(tmp_subdirs->dentry[i]);
CERROR("can't lookup/create O/%d/%s: rc = %d\n",
down(&filter->fo_init_lock);
old_count = filter->fo_group_count;
for (group = old_count; group <= last_group; group++) {
- if (group == 0)
- continue; /* no group zero */
rc = filter_read_group_internal(obd, group, create);
if (rc != 0)
loff_t off = 0;
ENTRY;
- O_dentry = simple_mkdir(current->fs->pwd, "O", 0700, 1);
+ O_dentry = simple_mkdir(current->fs->pwd, filter->fo_vfsmnt,
+ "O", 0700, 1);
CDEBUG(D_INODE, "got/created O: %p\n", O_dentry);
if (IS_ERR(O_dentry)) {
rc = PTR_ERR(O_dentry);
}
LOCK_INODE_MUTEX(O_dentry->d_inode);
- rc = vfs_rename(O_dentry->d_inode, dentry,
- O_dentry->d_inode, O0_dentry);
+ rc = ll_vfs_rename(O_dentry->d_inode, dentry, filter->fo_vfsmnt,
+ O_dentry->d_inode, O0_dentry,
+ filter->fo_vfsmnt);
UNLOCK_INODE_MUTEX(O_dentry->d_inode);
if (rc) {
if (off == 0) {
last_group = FILTER_MIN_GROUPS;
} else {
- LASSERT(last_group >= FILTER_MIN_GROUPS);
+ LASSERT_MDS_GROUP(last_group);
}
CWARN("%s: initialize groups [%d,%d]\n", obd->obd_name,
GOTO(err_filp, rc = -EOPNOTSUPP);
}
+ /** lu_target has very limited use in filter now */
+ lut_init(NULL, &filter->fo_lut, obd, NULL);
+
rc = filter_init_server_data(obd, file);
if (rc) {
CERROR("cannot read %s: rc = %d\n", LAST_RCVD, rc);
if (rc)
CERROR("error writing server data: rc = %d\n", rc);
- for (i = 1; i < filter->fo_group_count; i++) {
+ for (i = 0; i < filter->fo_group_count; i++) {
rc = filter_update_last_objid(obd, i,
(i == filter->fo_group_count - 1));
if (rc)
spin_lock(&filter->fo_objidlock);
id = filter->fo_last_objids[group];
spin_unlock(&filter->fo_objidlock);
-
return id;
}
static int filter_lock_dentry(struct obd_device *obd, struct dentry *dparent)
{
- LOCK_INODE_MUTEX(dparent->d_inode);
+ LOCK_INODE_MUTEX_PARENT(dparent->d_inode);
return 0;
}
struct filter_subdirs *subdirs;
LASSERT(group < filter->fo_group_count); /* FIXME: object groups */
- if (group > 0 || filter->fo_subdir_count == 0)
+ if ((group > FILTER_GROUP_MDS0 && group < FILTER_GROUP_MDS1_N_BASE) ||
+ filter->fo_subdir_count == 0)
return filter->fo_dentry_O_groups[group];
subdirs = &filter->fo_dentry_O_sub[group];
if (IS_ERR(dparent))
return dparent;
+ if (dparent == NULL)
+ return ERR_PTR(-ENOENT);
rc = filter_lock_dentry(obd, dparent);
- fsfilt_check_slow(obd, now, obd_timeout, "parent lock");
+ fsfilt_check_slow(obd, now, "parent lock");
return rc ? ERR_PTR(rc) : dparent;
}
}
static int filter_prepare_destroy(struct obd_device *obd, obd_id objid,
- obd_id group)
+ obd_id group, struct lustre_handle *lockh)
{
- struct lustre_handle lockh;
int flags = LDLM_AST_DISCARD_DATA, rc;
- struct ldlm_res_id res_id = { .name = { objid, 0, group, 0} };
+ struct ldlm_res_id res_id;
ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
-
ENTRY;
+
+ osc_build_res_name(objid, group, &res_id);
/* Tell the clients that the object is gone now and that they should
* throw away any cached pages. */
rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id, LDLM_EXTENT,
&policy, LCK_PW, &flags, ldlm_blocking_ast,
ldlm_completion_ast, NULL, NULL, 0, NULL,
- &lockh);
-
- /* We only care about the side-effects, just drop the lock. */
- if (rc == ELDLM_OK)
- ldlm_lock_decref(&lockh, LCK_PW);
-
+ NULL, lockh);
+ if (rc != ELDLM_OK)
+ lockh->cookie = 0;
RETURN(rc);
}
+static void filter_fini_destroy(struct obd_device *obd,
+ struct lustre_handle *lockh)
+{
+ if (lockh->cookie)
+ ldlm_lock_decref(lockh, LCK_PW);
+}
+
/* This is vfs_unlink() without down(i_sem). If we call regular vfs_unlink()
* we have 2.6 lock ordering issues with filter_commitrw_write() as it takes
* i_sem before starting a handle, while filter_destroy() + vfs_unlink do the
* reverse. Caller must take i_sem before starting the transaction and we
* drop it here before the inode is removed from the dentry. bug 4180/6984 */
-int filter_vfs_unlink(struct inode *dir, struct dentry *dentry)
+int filter_vfs_unlink(struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt)
{
int rc;
ENTRY;
GOTO(out, rc = -EPERM);
/* check_sticky() */
- if ((dentry->d_inode->i_uid != current->fsuid && !capable(CAP_FOWNER))||
- IS_APPEND(dentry->d_inode) || IS_IMMUTABLE(dentry->d_inode))
+ if ((dentry->d_inode->i_uid != current->fsuid &&
+ !cfs_capable(CFS_CAP_FOWNER)) || IS_APPEND(dentry->d_inode) ||
+ IS_IMMUTABLE(dentry->d_inode))
GOTO(out, rc = -EPERM);
/* NOTE: This might need to go outside i_mutex, though it isn't clear if
* here) or some other ordering issue. */
DQUOT_INIT(dir);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
- rc = security_inode_unlink(dir, dentry);
+ rc = ll_security_inode_unlink(dir, dentry, mnt);
if (rc)
GOTO(out, rc);
-#endif
rc = dir->i_op->unlink(dir, dentry);
out:
struct dentry *dchild)
{
struct inode *inode = dchild->d_inode;
+ struct filter_obd *filter = &obd->u.filter;
int rc;
if (inode->i_nlink != 1 || atomic_read(&inode->i_count) != 1) {
atomic_read(&inode->i_count));
}
- rc = filter_vfs_unlink(dparent->d_inode, dchild);
+ rc = filter_vfs_unlink(dparent->d_inode, dchild, filter->fo_vfsmnt);
if (rc)
CERROR("error unlinking objid %.*s: rc %d\n",
dchild->d_name.len, dchild->d_name.name, rc);
return(rc);
}
+struct filter_intent_args {
+ struct ldlm_lock **victim;
+ __u64 size;
+ int *liblustre;
+};
+
+static enum interval_iter filter_intent_cb(struct interval_node *n,
+ void *args)
+{
+ struct ldlm_interval *node = (struct ldlm_interval *)n;
+ struct filter_intent_args *arg = (struct filter_intent_args*)args;
+ __u64 size = arg->size;
+ struct ldlm_lock **v = arg->victim;
+ struct ldlm_lock *lck;
+
+ /* If the interval is lower than the current file size,
+ * just break. */
+ if (interval_high(n) <= size)
+ return INTERVAL_ITER_STOP;
+
+ list_for_each_entry(lck, &node->li_group, l_sl_policy) {
+ /* Don't send glimpse ASTs to liblustre clients.
+ * They aren't listening for them, and they do
+ * entirely synchronous I/O anyways. */
+ if (lck->l_export == NULL ||
+ lck->l_export->exp_libclient == 1)
+ continue;
+
+ if (*arg->liblustre)
+ *arg->liblustre = 0;
+
+ if (*v == NULL) {
+ *v = LDLM_LOCK_GET(lck);
+ } else if ((*v)->l_policy_data.l_extent.start <
+ lck->l_policy_data.l_extent.start) {
+ LDLM_LOCK_RELEASE(*v);
+ *v = LDLM_LOCK_GET(lck);
+ }
+
+ /* the same policy group - every lock has the
+ * same extent, so needn't do it any more */
+ break;
+ }
+
+ return INTERVAL_ITER_CONT;
+}
+
static int filter_intent_policy(struct ldlm_namespace *ns,
struct ldlm_lock **lockp, void *req_cookie,
ldlm_mode_t mode, int flags, void *data)
{
- struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ CFS_LIST_HEAD(rpc_list);
struct ptlrpc_request *req = req_cookie;
struct ldlm_lock *lock = *lockp, *l = NULL;
struct ldlm_resource *res = lock->l_resource;
ldlm_processing_policy policy;
struct ost_lvb *res_lvb, *reply_lvb;
struct ldlm_reply *rep;
- struct list_head *tmp;
ldlm_error_t err;
- int rc, tmpflags = 0, only_liblustre = 0;
- int repsize[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
+ int idx, rc, tmpflags = 0, only_liblustre = 1;
+ struct ldlm_interval_tree *tree;
+ struct filter_intent_args arg;
+ __u32 repsize[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
[DLM_LOCKREPLY_OFF] = sizeof(*rep),
[DLM_REPLY_REC_OFF] = sizeof(*reply_lvb) };
ENTRY;
//fixup_handle_for_resent_req(req, lock, &lockh);
- /* If we grant any lock at all, it will be a whole-file read lock.
- * Call the extent policy function to see if our request can be
- * granted, or is blocked. */
- lock->l_policy_data.l_extent.start = 0;
- lock->l_policy_data.l_extent.end = OBD_OBJECT_EOF;
- lock->l_req_mode = LCK_PR;
+ /* Call the extent policy function to see if our request can be
+ * granted, or is blocked.
+ * If the OST lock has LDLM_FL_HAS_INTENT set, it means a glimpse
+ * lock, and should not be granted if the lock will be blocked.
+ */
LASSERT(ns == res->lr_namespace);
lock_res(res);
LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
lock->l_flags &= ~LDLM_FL_CP_REQD;
list_del_init(&wlock->l_cp_ast);
- LDLM_LOCK_PUT(wlock);
+ LDLM_LOCK_RELEASE(wlock);
}
/* The lock met with no resistance; we're finished. */
LASSERT(res_lvb != NULL);
*reply_lvb = *res_lvb;
- list_for_each(tmp, &res->lr_granted) {
- struct ldlm_lock *tmplock =
- list_entry(tmp, struct ldlm_lock, l_res_link);
-
- if (tmplock->l_granted_mode == LCK_PR)
- continue;
- /*
- * ->ns_lock guarantees that no new locks are granted, and,
- * therefore, that res->lr_lvb_data cannot increase beyond the
- * end of already granted lock. As a result, it is safe to
- * check against "stale" reply_lvb->lvb_size value without
- * res->lr_lvb_sem.
- */
- if (tmplock->l_policy_data.l_extent.end <= reply_lvb->lvb_size)
- continue;
-
- /* Don't send glimpse ASTs to liblustre clients. They aren't
- * listening for them, and they do entirely synchronous I/O
- * anyways. */
- if (tmplock->l_export == NULL ||
- tmplock->l_export->exp_libclient == 1) {
- only_liblustre = 1;
- continue;
- }
-
- if (l == NULL) {
- l = LDLM_LOCK_GET(tmplock);
- continue;
- }
-
- if (l->l_policy_data.l_extent.start >
- tmplock->l_policy_data.l_extent.start)
+ /*
+ * ->ns_lock guarantees that no new locks are granted, and,
+ * therefore, that res->lr_lvb_data cannot increase beyond the
+ * end of already granted lock. As a result, it is safe to
+ * check against "stale" reply_lvb->lvb_size value without
+ * res->lr_lvb_sem.
+ */
+ arg.size = reply_lvb->lvb_size;
+ arg.victim = &l;
+ arg.liblustre = &only_liblustre;
+ for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+ tree = &res->lr_itree[idx];
+ if (tree->lit_mode == LCK_PR)
continue;
- LDLM_LOCK_PUT(l);
- l = LDLM_LOCK_GET(tmplock);
+ interval_iterate_reverse(tree->lit_root,
+ filter_intent_cb, &arg);
}
unlock_res(res);
unlock_res(res);
out:
- LDLM_LOCK_PUT(l);
+ LDLM_LOCK_RELEASE(l);
RETURN(ELDLM_LOCK_ABORTED);
}
EXIT;
}
+static int filter_adapt_sptlrpc_conf(struct obd_device *obd, int initial)
+{
+ struct filter_obd *filter = &obd->u.filter;
+ struct sptlrpc_rule_set tmp_rset;
+ int rc;
+
+ sptlrpc_rule_set_init(&tmp_rset);
+ rc = sptlrpc_conf_target_get_rules(obd, &tmp_rset, initial);
+ if (rc) {
+ CERROR("obd %s: failed get sptlrpc rules: %d\n",
+ obd->obd_name, rc);
+ return rc;
+ }
+
+ sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
+
+ write_lock(&filter->fo_sptlrpc_lock);
+ sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
+ filter->fo_sptlrpc_rset = tmp_rset;
+ write_unlock(&filter->fo_sptlrpc_lock);
+
+ return 0;
+}
+
/*
* pre-allocate pool of iobuf's to be used by filter_{prep,commit}rw_write().
*/
* If we haven't allocated a pool entry for this thread before, do so now. */
void *filter_iobuf_get(struct filter_obd *filter, struct obd_trans_info *oti)
{
- int thread_id = oti ? oti->oti_thread_id : -1;
+ int thread_id = (oti && oti->oti_thread) ?
+ oti->oti_thread->t_id : -1;
struct filter_iobuf *pool = NULL;
struct filter_iobuf **pool_place = NULL;
__u8 *uuid_ptr;
char *str, *label;
char ns_name[48];
+ request_queue_t *q;
int rc, i;
ENTRY;
if (rc != 0)
GOTO(err_ops, rc);
- LASSERT(!lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb)));
+ if (lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb))) {
+ CERROR("%s: Underlying device is marked as read-only. "
+ "Setup failed\n", obd->obd_name);
+ GOTO(err_ops, rc = -EROFS);
+ }
/* failover is the default */
obd->obd_replayable = 1;
obd->obd_lvfs_ctxt.fs = get_ds();
obd->obd_lvfs_ctxt.cb_ops = filter_lvfs_ops;
- sema_init(&filter->fo_init_lock, 1);
+ init_mutex(&filter->fo_init_lock);
filter->fo_committed_group = 0;
rc = filter_prep(obd);
spin_lock_init(&filter->fo_translock);
spin_lock_init(&filter->fo_objidlock);
- INIT_LIST_HEAD(&filter->fo_export_list);
+ CFS_INIT_LIST_HEAD(&filter->fo_export_list);
sema_init(&filter->fo_alloc_lock, 1);
init_brw_stats(&filter->fo_filter_stats);
+ filter->fo_read_cache = 1; /* enable read-only cache by default */
+ filter->fo_writethrough_cache = 1; /* enable writethrough cache */
filter->fo_readcache_max_filesize = FILTER_MAX_CACHE_SIZE;
filter->fo_fmd_max_num = FILTER_FMD_MAX_NUM_DEFAULT;
filter->fo_fmd_max_age = FILTER_FMD_MAX_AGE_DEFAULT;
- INIT_LIST_HEAD(&filter->fo_llog_list);
+ CFS_INIT_LIST_HEAD(&filter->fo_llog_list);
spin_lock_init(&filter->fo_llog_list_lock);
- filter->fo_fl_oss_capa = 0;
- INIT_LIST_HEAD(&filter->fo_capa_keys);
+ filter->fo_fl_oss_capa = 1;
+
+ CFS_INIT_LIST_HEAD(&filter->fo_capa_keys);
filter->fo_capa_hash = init_capa_hash();
if (filter->fo_capa_hash == NULL)
- GOTO(err_ops, rc = -ENOMEM);
+ GOTO(err_post, rc = -ENOMEM);
sprintf(ns_name, "filter-%s", obd->obd_uuid.uuid);
- obd->obd_namespace = ldlm_namespace_new(ns_name, LDLM_NAMESPACE_SERVER,
+ obd->obd_namespace = ldlm_namespace_new(obd, ns_name, LDLM_NAMESPACE_SERVER,
LDLM_NAMESPACE_GREEDY);
if (obd->obd_namespace == NULL)
GOTO(err_post, rc = -ENOMEM);
ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
"filter_ldlm_cb_client", &obd->obd_ldlm_client);
- rc = llog_cat_initialize(obd, NULL, 1, NULL);
+ rc = obd_llog_init(obd, &obd->obd_olg, obd, 1, NULL, NULL);
if (rc) {
CERROR("failed to setup llogging subsystems\n");
GOTO(err_post, rc);
}
+ rwlock_init(&filter->fo_sptlrpc_lock);
+ sptlrpc_rule_set_init(&filter->fo_sptlrpc_rset);
+ /* do this after llog being initialized */
+ filter_adapt_sptlrpc_conf(obd, 1);
+
rc = lquota_setup(filter_quota_interface_ref, obd);
if (rc)
GOTO(err_post, rc);
+ q = bdev_get_queue(mnt->mnt_sb->s_bdev);
+ if (q->max_sectors < q->max_hw_sectors &&
+ q->max_sectors < PTLRPC_MAX_BRW_SIZE >> 9)
+ LCONSOLE_INFO("%s: underlying device %s should be tuned "
+ "for larger I/O requests: max_sectors = %u "
+ "could be up to max_hw_sectors=%u\n",
+ obd->obd_name, mnt->mnt_sb->s_id,
+ q->max_sectors, q->max_hw_sectors);
+
uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
if (uuid_ptr != NULL) {
class_uuid_unparse(uuid_ptr, &uuid);
if (obd->obd_recovering) {
LCONSOLE_WARN("OST %s now serving %s (%s%s%s), but will be in "
- "recovery until %d %s reconnect, or if no clients"
- " reconnect for %d:%.02d; during that time new "
- "clients will not be allowed to connect. "
- "Recovery progress can be monitored by watching "
- "/proc/fs/lustre/obdfilter/%s/recovery_status.\n",
+ "recovery for at least %d:%.02d, or until %d "
+ "client%s reconnect%s.\n",
obd->obd_name, lustre_cfg_string(lcfg, 1),
label ?: "", label ? "/" : "", str,
+ obd->obd_recovery_timeout / 60,
+ obd->obd_recovery_timeout % 60,
obd->obd_max_recoverable_clients,
- (obd->obd_max_recoverable_clients == 1)
- ? "client" : "clients",
- (int)(OBD_RECOVERY_TIMEOUT) / 60,
- (int)(OBD_RECOVERY_TIMEOUT) % 60,
- obd->obd_name);
+ (obd->obd_max_recoverable_clients == 1) ? "":"s",
+ (obd->obd_max_recoverable_clients == 1) ? "s":"");
} else {
LCONSOLE_INFO("OST %s now serving %s (%s%s%s) with recovery "
"%s\n", obd->obd_name, lustre_cfg_string(lcfg, 1),
static int filter_setup(struct obd_device *obd, struct lustre_cfg* lcfg)
{
struct lprocfs_static_vars lvars;
- unsigned long page;
+ unsigned long addr;
+ struct page *page;
int rc;
CLASSERT(offsetof(struct obd_device, u.obt) ==
RETURN(-EINVAL);
/* 2.6.9 selinux wants a full option page for do_kern_mount (bug6471) */
- page = get_zeroed_page(GFP_KERNEL);
+ OBD_PAGE_ALLOC(page, CFS_ALLOC_STD);
if (!page)
RETURN(-ENOMEM);
+ addr = (unsigned long)cfs_page_address(page);
+ clear_page((void *)addr);
/* lprocfs must be setup before the filter so state can be safely added
* to /proc incrementally as the filter is setup */
- lprocfs_init_vars(filter, &lvars);
+ lprocfs_filter_init_vars(&lvars);
if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0 &&
lprocfs_alloc_obd_stats(obd, LPROC_FILTER_LAST) == 0) {
/* Init obdfilter private stats here */
lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
LPROCFS_CNTR_AVGMINMAX,
"write_bytes", "bytes");
+ lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_GET_PAGE,
+ LPROCFS_CNTR_AVGMINMAX|LPROCFS_CNTR_STDDEV,
+ "get_page", "usec");
+ lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_NO_PAGE,
+ LPROCFS_CNTR_AVGMINMAX,
+ "get_page_failures", "num");
+ lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_ACCESS,
+ LPROCFS_CNTR_AVGMINMAX,
+ "cache_access", "pages");
+ lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_HIT,
+ LPROCFS_CNTR_AVGMINMAX,
+ "cache_hit", "pages");
+ lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_MISS,
+ LPROCFS_CNTR_AVGMINMAX,
+ "cache_miss", "pages");
lproc_filter_attach_seqstat(obd);
- obd->obd_proc_exports = proc_mkdir("exports",
- obd->obd_proc_entry);
+ obd->obd_proc_exports_entry = lprocfs_register("exports",
+ obd->obd_proc_entry,
+ NULL, NULL);
+ if (IS_ERR(obd->obd_proc_exports_entry)) {
+ rc = PTR_ERR(obd->obd_proc_exports_entry);
+ CERROR("error %d setting up lprocfs for %s\n",
+ rc, "exports");
+ obd->obd_proc_exports_entry = NULL;
+ }
}
+ if (obd->obd_proc_exports_entry)
+ lprocfs_add_simple(obd->obd_proc_exports_entry, "clear",
+ lprocfs_nid_stats_clear_read,
+ lprocfs_nid_stats_clear_write, obd, NULL);
- memcpy((void *)page, lustre_cfg_buf(lcfg, 4),
+ memcpy((void *)addr, lustre_cfg_buf(lcfg, 4),
LUSTRE_CFG_BUFLEN(lcfg, 4));
- rc = filter_common_setup(obd, lcfg, (void *)page);
- free_page(page);
+ rc = filter_common_setup(obd, lcfg, (void *)addr);
+ OBD_PAGE_FREE(page);
if (rc) {
- lprocfs_obd_cleanup(obd);
+ lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
+ lprocfs_free_per_client_stats(obd);
lprocfs_free_obd_stats(obd);
+ lprocfs_obd_cleanup(obd);
}
return rc;
}
-static struct llog_operations filter_mds_ost_repl_logops /* initialized below*/;
+static struct llog_operations filter_mds_ost_repl_logops;
+
static struct llog_operations filter_size_orig_logops = {
- lop_setup: llog_obd_origin_setup,
- lop_cleanup: llog_obd_origin_cleanup,
- lop_add: llog_obd_origin_add
+ .lop_setup = llog_obd_origin_setup,
+ .lop_cleanup = llog_obd_origin_cleanup,
+ .lop_add = llog_obd_origin_add
};
-static int filter_llog_init(struct obd_device *obd, struct obd_llogs *llogs,
- struct obd_device *tgt, int count,
- struct llog_catid *catid,
- struct obd_uuid *uuid)
+static int filter_olg_fini(struct obd_llog_group *olg)
{
struct llog_ctxt *ctxt;
- int rc;
+ int rc = 0, rc2 = 0;
ENTRY;
- filter_mds_ost_repl_logops = llog_client_ops;
- filter_mds_ost_repl_logops.lop_cancel = llog_obd_repl_cancel;
- filter_mds_ost_repl_logops.lop_connect = llog_repl_connect;
- filter_mds_ost_repl_logops.lop_sync = llog_obd_repl_sync;
+ ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
+ if (ctxt)
+ rc = llog_cleanup(ctxt);
- rc = llog_setup(obd, llogs, LLOG_MDS_OST_REPL_CTXT, tgt, 0, NULL,
- &filter_mds_ost_repl_logops);
- if (rc)
- RETURN(rc);
+ ctxt = llog_group_get_ctxt(olg, LLOG_SIZE_ORIG_CTXT);
+ if (ctxt) {
+ rc2 = llog_cleanup(ctxt);
+ if (!rc)
+ rc = rc2;
+ }
- /* FIXME - assign unlink_cb for filter's recovery */
- if (!llogs)
- ctxt = llog_get_context(obd, LLOG_MDS_OST_REPL_CTXT);
- else
- ctxt = llog_get_context_from_llogs(llogs, LLOG_MDS_OST_REPL_CTXT);
+ ctxt = llog_group_get_ctxt(olg, LLOG_CONFIG_ORIG_CTXT);
+ if (ctxt) {
+ rc2 = llog_cleanup(ctxt);
+ if (!rc)
+ rc = rc2;
+ }
- LASSERT(ctxt != NULL);
- ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
+ RETURN(rc);
+}
+
+static int
+filter_olg_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *tgt)
+{
+ int rc;
+ ENTRY;
+
+ rc = llog_setup(obd, olg, LLOG_MDS_OST_REPL_CTXT, tgt, 0, NULL,
+ &filter_mds_ost_repl_logops);
+ if (rc)
+ GOTO(cleanup, rc);
- rc = llog_setup(obd, llogs, LLOG_SIZE_ORIG_CTXT, tgt, 0, NULL,
+ rc = llog_setup(obd, olg, LLOG_SIZE_ORIG_CTXT, tgt, 0, NULL,
&filter_size_orig_logops);
- RETURN(rc);
+ if (rc)
+ GOTO(cleanup, rc);
+ EXIT;
+cleanup:
+ if (rc)
+ filter_olg_fini(olg);
+ return rc;
}
-static int filter_group_llog_cleanup(struct llog_ctxt *ctxt)
+/**
+ * Init the default olg, which is embeded in the obd_device, for filter.
+ */
+static int
+filter_default_olg_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *tgt)
{
- int rc = 0;
+ struct filter_obd *filter = &obd->u.filter;
+ struct llog_ctxt *ctxt;
+ int rc;
ENTRY;
- if (CTXTP(ctxt, cleanup))
- rc = CTXTP(ctxt, cleanup)(ctxt);
+ filter->fo_lcm = llog_recov_thread_init(obd->obd_name);
+ if (!filter->fo_lcm)
+ RETURN(-ENOMEM);
- if (ctxt->loc_exp)
- class_export_put(ctxt->loc_exp);
- OBD_FREE(ctxt, sizeof(*ctxt));
+ filter_mds_ost_repl_logops = llog_client_ops;
+ filter_mds_ost_repl_logops.lop_cancel = llog_obd_repl_cancel;
+ filter_mds_ost_repl_logops.lop_connect = llog_obd_repl_connect;
+ filter_mds_ost_repl_logops.lop_sync = llog_obd_repl_sync;
- RETURN(rc);
+ rc = filter_olg_init(obd, olg, tgt);
+ if (rc)
+ GOTO(cleanup_lcm, rc);
+
+ rc = llog_setup(obd, olg, LLOG_CONFIG_ORIG_CTXT, tgt, 0, NULL,
+ &llog_lvfs_ops);
+ if (rc)
+ GOTO(cleanup_olg, rc);
+
+ ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
+ if (!ctxt) {
+ CERROR("Can't get ctxt for %p:%x\n", olg,
+ LLOG_MDS_OST_REPL_CTXT);
+ GOTO(cleanup_olg, rc = -ENODEV);
+ }
+ ctxt->loc_lcm = lcm_get(filter->fo_lcm);
+ ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
+ llog_ctxt_put(ctxt);
+
+ RETURN(0);
+cleanup_olg:
+ filter_olg_fini(olg);
+cleanup_lcm:
+ llog_recov_thread_fini(filter->fo_lcm, 1);
+ filter->fo_lcm = NULL;
+ return rc;
}
-static int filter_group_llog_finish(struct obd_llogs *llogs)
+static int
+filter_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *tgt, int count, struct llog_catid *catid,
+ struct obd_uuid *uuid)
{
+ struct filter_obd *filter = &obd->u.filter;
struct llog_ctxt *ctxt;
- int rc = 0, rc2 = 0;
+ int rc;
ENTRY;
- ctxt = llog_get_context_from_llogs(llogs, LLOG_MDS_OST_REPL_CTXT);
- if (ctxt)
- rc = filter_group_llog_cleanup(ctxt);
-
- ctxt = llog_get_context_from_llogs(llogs, LLOG_SIZE_ORIG_CTXT);
- if (ctxt)
- rc2 = filter_group_llog_cleanup(ctxt);
- if (!rc)
- rc = rc2;
+ LASSERT(olg != NULL);
+ if (olg == &obd->obd_olg)
+ return filter_default_olg_init(obd, olg, tgt);
+ LASSERT(filter->fo_lcm != NULL);
+ rc = filter_olg_init(obd, olg, tgt);
+ if (rc)
+ RETURN(rc);
+ ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
+ if (!ctxt) {
+ CERROR("Can't get ctxt for %p:%x\n", olg,
+ LLOG_MDS_OST_REPL_CTXT);
+ filter_olg_fini(olg);
+ RETURN(-ENODEV);
+ }
+ ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
+ ctxt->loc_lcm = lcm_get(filter->fo_lcm);
+ llog_ctxt_put(ctxt);
RETURN(rc);
}
static int filter_llog_finish(struct obd_device *obd, int count)
{
+ struct filter_obd *filter = &obd->u.filter;
struct llog_ctxt *ctxt;
- int rc = 0, rc2 = 0;
ENTRY;
- ctxt = llog_get_context(obd, LLOG_MDS_OST_REPL_CTXT);
- if (ctxt)
- rc = llog_cleanup(ctxt);
+ ctxt = llog_group_get_ctxt(&obd->obd_olg, LLOG_MDS_OST_REPL_CTXT);
+ if (ctxt) {
+ /*
+ * Make sure that no cached llcds left in recov_thread.
+ * We actually do sync in disconnect time, but disconnect
+ * may not come being marked rq_no_resend = 1.
+ */
+ llog_sync(ctxt, NULL);
- ctxt = llog_get_context(obd, LLOG_SIZE_ORIG_CTXT);
- if (ctxt)
- rc2 = llog_cleanup(ctxt);
- if (!rc)
- rc = rc2;
+ /*
+ * Balance class_import_get() in llog_receptor_accept().
+ * This is safe to do, as llog is already synchronized
+ * and its import may go.
+ */
+ mutex_down(&ctxt->loc_sem);
+ if (ctxt->loc_imp) {
+ class_import_put(ctxt->loc_imp);
+ ctxt->loc_imp = NULL;
+ }
+ mutex_up(&ctxt->loc_sem);
+ llog_ctxt_put(ctxt);
+ }
- RETURN(rc);
+ if (filter->fo_lcm) {
+ mutex_down(&ctxt->loc_sem);
+ llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
+ filter->fo_lcm = NULL;
+ mutex_up(&ctxt->loc_sem);
+ }
+ RETURN(filter_olg_fini(&obd->obd_olg));
+}
+
+/**
+ * Find the group llog according to group index in the llog group list.
+ */
+static struct obd_llog_group *
+filter_find_olg_internal(struct filter_obd *filter, int group)
+{
+ struct obd_llog_group *olg;
+
+ LASSERT_SPIN_LOCKED(&filter->fo_llog_list_lock);
+ list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
+ if (olg->olg_group == group)
+ RETURN(olg);
+ }
+ RETURN(NULL);
}
-struct obd_llogs *filter_grab_llog_for_group(struct obd_device *obd, int group,
- struct obd_export *export)
+/**
+ * Find the group llog according to group index on the filter
+ */
+struct obd_llog_group *filter_find_olg(struct obd_device *obd, int group)
{
- struct filter_group_llog *fglog, *nlog;
+ struct obd_llog_group *olg = NULL;
struct filter_obd *filter;
- struct llog_ctxt *ctxt;
- struct list_head *cur;
- int rc;
filter = &obd->u.filter;
+ if (group == FILTER_GROUP_LLOG)
+ RETURN(&obd->obd_olg);
+
spin_lock(&filter->fo_llog_list_lock);
- list_for_each(cur, &filter->fo_llog_list) {
- fglog = list_entry(cur, struct filter_group_llog, list);
- if (fglog->group == group) {
- if (!(fglog->exp == NULL || fglog->exp == export || export == NULL))
- CWARN("%s: export for group %d changes: 0x%p -> 0x%p\n",
- obd->obd_name, group, fglog->exp, export);
- spin_unlock(&filter->fo_llog_list_lock);
- goto init;
- }
- }
+ olg = filter_find_olg_internal(filter, group);
spin_unlock(&filter->fo_llog_list_lock);
- if (export == NULL)
- RETURN(NULL);
+ RETURN(olg);
+}
+/**
+ * Find the llog_group of the filter according to the group. If it can not
+ * find, create the llog_group, which only happens when mds is being synced
+ * with OST.
+ */
+struct obd_llog_group *filter_find_create_olg(struct obd_device *obd, int group)
+{
+ struct obd_llog_group *olg = NULL;
+ struct filter_obd *filter;
+ int rc;
- OBD_ALLOC_PTR(fglog);
- if (fglog == NULL)
- RETURN(NULL);
- fglog->group = group;
+ filter = &obd->u.filter;
- OBD_ALLOC_PTR(fglog->llogs);
- if (fglog->llogs == NULL) {
- OBD_FREE_PTR(fglog);
- RETURN(NULL);
- }
+ if (group == FILTER_GROUP_LLOG)
+ RETURN(&obd->obd_olg);
spin_lock(&filter->fo_llog_list_lock);
- list_for_each(cur, &filter->fo_llog_list) {
- nlog = list_entry(cur, struct filter_group_llog, list);
- LASSERT(nlog->group != group);
+ olg = filter_find_olg_internal(filter, group);
+ if (olg) {
+ if (olg->olg_initializing) {
+ GOTO(out_unlock, olg = ERR_PTR(-EBUSY));
+ } else {
+ GOTO(out_unlock, olg);
+ }
}
- list_add(&fglog->list, &filter->fo_llog_list);
+ OBD_ALLOC_PTR(olg);
+ if (olg == NULL)
+ GOTO(out_unlock, olg = ERR_PTR(-ENOMEM));
+
+ llog_group_init(olg, group);
+ list_add(&olg->olg_list, &filter->fo_llog_list);
+ olg->olg_initializing = 1;
spin_unlock(&filter->fo_llog_list_lock);
- rc = llog_cat_initialize(obd, fglog->llogs, 1, NULL);
+ rc = llog_cat_initialize(obd, olg, 1, NULL);
if (rc) {
- OBD_FREE_PTR(fglog->llogs);
- OBD_FREE_PTR(fglog);
- RETURN(NULL);
- }
-
-init:
- if (export) {
- fglog->exp = export;
- ctxt = llog_get_context_from_llogs(fglog->llogs,
- LLOG_MDS_OST_REPL_CTXT);
- LASSERT(ctxt != NULL);
-
- llog_receptor_accept(ctxt, export->exp_imp_reverse);
+ spin_lock(&filter->fo_llog_list_lock);
+ list_del(&olg->olg_list);
+ spin_unlock(&filter->fo_llog_list_lock);
+ OBD_FREE_PTR(olg);
+ GOTO(out, olg = ERR_PTR(-ENOMEM));
}
- CDEBUG(D_OTHER, "%s: new llog 0x%p for group %u\n",
- obd->obd_name, fglog->llogs, group);
+ spin_lock(&filter->fo_llog_list_lock);
+ olg->olg_initializing = 0;
+ spin_unlock(&filter->fo_llog_list_lock);
+ CDEBUG(D_OTHER, "%s: new llog group %u (0x%p)\n",
+ obd->obd_name, group, olg);
+out:
+ RETURN(olg);
- RETURN(fglog->llogs);
+out_unlock:
+ spin_unlock(&filter->fo_llog_list_lock);
+ GOTO(out, olg);
}
static int filter_llog_connect(struct obd_export *exp,
{
struct obd_device *obd = exp->exp_obd;
struct llog_ctxt *ctxt;
- struct obd_llogs *llog;
+ struct obd_llog_group *olg;
int rc;
ENTRY;
- CDEBUG(D_OTHER, "handle connect for %s: %u/%u/%u\n", obd->obd_name,
- (unsigned) body->lgdc_logid.lgl_ogr,
- (unsigned) body->lgdc_logid.lgl_oid,
- (unsigned) body->lgdc_logid.lgl_ogen);
+ CDEBUG(D_OTHER, "%s: LLog connect for: "LPX64"/"LPX64":%x\n",
+ obd->obd_name, body->lgdc_logid.lgl_oid,
+ body->lgdc_logid.lgl_ogr, body->lgdc_logid.lgl_ogen);
+
+ olg = filter_find_olg(obd, body->lgdc_logid.lgl_ogr);
+ if (!olg) {
+ CERROR(" %s: can not find olg of group %d\n",
+ obd->obd_name, (int)body->lgdc_logid.lgl_ogr);
+ RETURN(-ENOENT);
+ }
+ llog_group_set_export(olg, exp);
- llog = filter_grab_llog_for_group(obd, body->lgdc_logid.lgl_ogr, exp);
- LASSERT(llog != NULL);
- ctxt = llog_get_context_from_llogs(llog, body->lgdc_ctxt_idx);
+ ctxt = llog_group_get_ctxt(olg, body->lgdc_ctxt_idx);
LASSERTF(ctxt != NULL, "ctxt is not null, ctxt idx %d \n",
body->lgdc_ctxt_idx);
- rc = llog_connect(ctxt, 1, &body->lgdc_logid,
+
+ CWARN("%s: Recovery from log "LPX64"/"LPX64":%x\n",
+ obd->obd_name, body->lgdc_logid.lgl_oid,
+ body->lgdc_logid.lgl_ogr, body->lgdc_logid.lgl_ogen);
+
+ spin_lock_bh(&obd->obd_processing_task_lock);
+ obd->u.filter.fo_mds_ost_sync = 1;
+ spin_unlock_bh(&obd->obd_processing_task_lock);
+ rc = llog_connect(ctxt, &body->lgdc_logid,
&body->lgdc_gen, NULL);
+ llog_ctxt_put(ctxt);
if (rc != 0)
CERROR("failed to connect rc %d idx %d\n", rc,
body->lgdc_ctxt_idx);
RETURN(rc);
}
-static int filter_llog_preclean (struct obd_device *obd)
+static int filter_llog_preclean(struct obd_device *obd)
{
- struct filter_group_llog *log;
+ struct obd_llog_group *olg, *tmp;
struct filter_obd *filter;
+ struct list_head remove_list;
int rc = 0;
ENTRY;
+ rc = obd_llog_finish(obd, 0);
+ if (rc)
+ CERROR("failed to cleanup llogging subsystem\n");
+
filter = &obd->u.filter;
+ CFS_INIT_LIST_HEAD(&remove_list);
+
spin_lock(&filter->fo_llog_list_lock);
while (!list_empty(&filter->fo_llog_list)) {
- log = list_entry(filter->fo_llog_list.next,
- struct filter_group_llog, list);
- list_del(&log->list);
- spin_unlock(&filter->fo_llog_list_lock);
+ olg = list_entry(filter->fo_llog_list.next,
+ struct obd_llog_group, olg_list);
+ list_del(&olg->olg_list);
+ list_add(&olg->olg_list, &remove_list);
+ }
+ spin_unlock(&filter->fo_llog_list_lock);
- rc = filter_group_llog_finish(log->llogs);
+ list_for_each_entry_safe(olg, tmp, &remove_list, olg_list) {
+ list_del_init(&olg->olg_list);
+ rc = filter_olg_fini(olg);
if (rc)
CERROR("failed to cleanup llogging subsystem for %u\n",
- log->group);
- OBD_FREE_PTR(log->llogs);
- OBD_FREE_PTR(log);
- spin_lock(&filter->fo_llog_list_lock);
+ olg->olg_group);
+ OBD_FREE_PTR(olg);
}
- spin_unlock(&filter->fo_llog_list_lock);
-
- rc = obd_llog_finish(obd, 0);
- if (rc)
- CERROR("failed to cleanup llogging subsystem\n");
RETURN(rc);
}
case OBD_CLEANUP_EARLY:
break;
case OBD_CLEANUP_EXPORTS:
- target_cleanup_recovery(obd);
- break;
- case OBD_CLEANUP_SELF_EXP:
+ /* Stop recovery before namespace cleanup. */
+ target_recovery_fini(obd);
rc = filter_llog_preclean(obd);
break;
- case OBD_CLEANUP_OBD:
- break;
}
RETURN(rc);
}
LCONSOLE_WARN("%s: shutting down for failover; client state "
"will be preserved.\n", obd->obd_name);
- if (!list_empty(&obd->obd_exports)) {
- CERROR("%s: still has clients!\n", obd->obd_name);
- class_disconnect_exports(obd);
- if (!list_empty(&obd->obd_exports)) {
- CERROR("still has exports after forced cleanup?\n");
- RETURN(-EBUSY);
- }
- }
+ obd_exports_barrier(obd);
+ obd_zombie_barrier();
- lprocfs_obd_cleanup(obd);
+ lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
+ lprocfs_free_per_client_stats(obd);
lprocfs_free_obd_stats(obd);
+ lprocfs_obd_cleanup(obd);
lquota_cleanup(filter_quota_interface_ref, obd);
- /* Stop recovery before namespace cleanup. */
- target_stop_recovery_thread(obd);
- target_cleanup_recovery(obd);
+ ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
+ obd->obd_namespace = NULL;
- ldlm_namespace_free(obd->obd_namespace, obd->obd_force);
+ sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
if (obd->u.obt.obt_sb == NULL)
RETURN(0);
filter_post(obd);
- shrink_dcache_parent(obd->u.obt.obt_sb->s_root);
-
LL_DQUOT_OFF(obd->u.obt.obt_sb);
+ shrink_dcache_sb(obd->u.obt.obt_sb);
server_put_mount(obd->obd_name, filter->fo_vfsmnt);
obd->u.obt.obt_sb = NULL;
static int filter_connect_internal(struct obd_export *exp,
struct obd_connect_data *data)
{
+ struct filter_export_data *fed = &exp->exp_filter_data;
+
if (!data)
RETURN(0);
data->ocd_connect_flags, data->ocd_version,
data->ocd_grant, data->ocd_index);
+ if (fed->fed_group != 0 && fed->fed_group != data->ocd_group) {
+ CWARN("!!! This export (nid %s) used object group %d "
+ "earlier; now it's trying to use group %d! This could "
+ "be a bug in the MDS. Please report to "
+ "http://bugzilla.lustre.org/\n",
+ obd_export_nid2str(exp), fed->fed_group,data->ocd_group);
+ RETURN(-EPROTO);
+ }
+ fed->fed_group = data->ocd_group;
+
data->ocd_connect_flags &= OST_CONNECT_SUPPORTED;
exp->exp_connect_flags = data->ocd_connect_flags;
data->ocd_version = LUSTRE_VERSION_CODE;
+ /* Kindly make sure the SKIP_ORPHAN flag is from MDS. */
+ if (!ergo(data->ocd_connect_flags & OBD_CONNECT_SKIP_ORPHAN,
+ data->ocd_connect_flags & OBD_CONNECT_MDS))
+ RETURN(-EPROTO);
+
if (exp->exp_connect_flags & OBD_CONNECT_GRANT) {
- struct filter_export_data *fed = &exp->exp_filter_data;
+ struct filter_obd *filter = &exp->exp_obd->u.filter;
obd_size left, want;
spin_lock(&exp->exp_obd->obd_osfs_lock);
LPU64" left: "LPU64"\n", exp->exp_obd->obd_name,
exp->exp_client_uuid.uuid, exp,
data->ocd_grant, want, left);
+
+ filter->fo_tot_granted_clients ++;
}
if (data->ocd_connect_flags & OBD_CONNECT_INDEX) {
data->ocd_index);
RETURN(-EBADF);
}
+ /* FIXME: Do the same with the MDS UUID and fsd_peeruuid.
+ * FIXME: We don't strictly need the COMPAT flag for that,
+ * FIXME: as fsd_peeruuid[0] will tell us if that is set.
+ * FIXME: We needed it for the index, as index 0 is valid. */
}
if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_SIZE)) {
LASSERT(data->ocd_brw_size);
}
- /* FIXME: Do the same with the MDS UUID and fsd_peeruuid.
- * FIXME: We don't strictly need the COMPAT flag for that,
- * FIXME: as fsd_peeruuid[0] will tell us if that is set.
- * FIXME: We needed it for the index, as index 0 is valid. */
+ if (data->ocd_connect_flags & OBD_CONNECT_CKSUM) {
+ __u32 cksum_types = data->ocd_cksum_types;
+
+ /* The client set in ocd_cksum_types the checksum types it
+ * supports. We have to mask off the algorithms that we don't
+ * support */
+ if (cksum_types & OBD_CKSUM_ALL)
+ data->ocd_cksum_types &= OBD_CKSUM_ALL;
+ else
+ data->ocd_cksum_types = OBD_CKSUM_CRC32;
+
+ CDEBUG(D_RPCTRACE, "%s: cli %s supports cksum type %x, return "
+ "%x\n", exp->exp_obd->obd_name,
+ obd_export_nid2str(exp), cksum_types,
+ data->ocd_cksum_types);
+ } else {
+ /* This client does not support OBD_CONNECT_CKSUM
+ * fall back to CRC32 */
+ CDEBUG(D_RPCTRACE, "%s: cli %s does not support "
+ "OBD_CONNECT_CKSUM, CRC32 will be used\n",
+ exp->exp_obd->obd_name,
+ obd_export_nid2str(exp));
+ }
RETURN(0);
}
-static int filter_reconnect(struct obd_export *exp, struct obd_device *obd,
+static int filter_reconnect(const struct lu_env *env,
+ struct obd_export *exp, struct obd_device *obd,
struct obd_uuid *cluuid,
- struct obd_connect_data *data)
+ struct obd_connect_data *data,
+ void *localdata)
{
int rc;
ENTRY;
RETURN(-EINVAL);
rc = filter_connect_internal(exp, data);
+ if (rc == 0)
+ filter_export_stats_init(obd, exp, localdata);
RETURN(rc);
}
/* nearly identical to mds_connect */
static int filter_connect(const struct lu_env *env,
- struct lustre_handle *conn, struct obd_device *obd,
+ struct obd_export **exp, struct obd_device *obd,
struct obd_uuid *cluuid,
- struct obd_connect_data *data)
+ struct obd_connect_data *data, void *localdata)
{
struct lvfs_run_ctxt saved;
- struct obd_export *exp;
+ struct lustre_handle conn = { 0 };
+ struct obd_export *lexp;
struct filter_export_data *fed;
- struct filter_client_data *fcd = NULL;
+ struct lsd_client_data *lcd = NULL;
__u32 group;
int rc;
ENTRY;
- if (conn == NULL || obd == NULL || cluuid == NULL)
+ if (exp == NULL || obd == NULL || cluuid == NULL)
RETURN(-EINVAL);
- rc = class_connect(conn, obd, cluuid);
+ rc = class_connect(&conn, obd, cluuid);
if (rc)
RETURN(rc);
- exp = class_conn2export(conn);
- LASSERT(exp != NULL);
+ lexp = class_conn2export(&conn);
+ LASSERT(lexp != NULL);
- fed = &exp->exp_filter_data;
+ fed = &lexp->exp_filter_data;
- rc = filter_connect_internal(exp, data);
+ rc = filter_connect_internal(lexp, data);
if (rc)
GOTO(cleanup, rc);
- filter_export_stats_init(obd, exp);
- group = data->ocd_group;
+ filter_export_stats_init(obd, lexp, localdata);
if (obd->obd_replayable) {
- OBD_ALLOC(fcd, sizeof(*fcd));
- if (!fcd) {
+ OBD_ALLOC(lcd, sizeof(*lcd));
+ if (!lcd) {
CERROR("filter: out of memory for client data\n");
GOTO(cleanup, rc = -ENOMEM);
}
- memcpy(fcd->fcd_uuid, cluuid, sizeof(fcd->fcd_uuid));
- fed->fed_fcd = fcd;
- fed->fed_fcd->fcd_group = group;
- rc = filter_client_add(obd, exp, -1);
+ memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
+ fed->fed_lcd = lcd;
+ rc = filter_client_add(obd, lexp, -1);
if (rc)
GOTO(cleanup, rc);
}
- CWARN("%s: Received MDS connection ("LPX64"); group %d\n",
- obd->obd_name, exp->exp_handle.h_cookie, group);
- if (group == 0)
- GOTO(cleanup, rc);
- if (fed->fed_group != 0 && fed->fed_group != group) {
- CERROR("!!! This export (nid %s) used object group %d "
- "earlier; now it's trying to use group %d! This could "
- "be a bug in the MDS. Tell CFS.\n",
- obd_export_nid2str(exp), fed->fed_group, group);
- GOTO(cleanup, rc = -EPROTO);
- }
- fed->fed_group = group;
+ group = data->ocd_group;
+
+ CWARN("%s: Received MDS connection ("LPX64"); group %d\n",
+ obd->obd_name, lexp->exp_handle.h_cookie, group);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
rc = filter_read_groups(obd, group, 1);
cleanup:
if (rc) {
- if (fcd) {
- OBD_FREE(fcd, sizeof(*fcd));
- fed->fed_fcd = NULL;
+ if (lcd) {
+ OBD_FREE_PTR(lcd);
+ fed->fed_lcd = NULL;
}
- class_disconnect(exp);
+ class_disconnect(lexp);
+ *exp = NULL;
} else {
- class_export_put(exp);
+ *exp = lexp;
}
RETURN(rc);
exp->exp_obd->obd_name, exp->exp_client_uuid.uuid,
exp, exp->exp_filter_data.fed_pending);
+ lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
+
target_destroy_export(exp);
+ ldlm_destroy_export(exp);
if (obd_uuid_equals(&exp->exp_client_uuid, &exp->exp_obd->obd_uuid))
RETURN(0);
- lprocfs_exp_cleanup(exp);
if (exp->exp_obd->obd_replayable)
filter_client_free(exp);
filter_grant_discard(exp);
filter_fmd_cleanup(exp);
+ if (exp->exp_connect_flags & OBD_CONNECT_GRANT_SHRINK) {
+ struct filter_obd *filter = &exp->exp_obd->u.filter;
+ if (filter->fo_tot_granted_clients > 0)
+ filter->fo_tot_granted_clients --;
+ }
+
if (!(exp->exp_flags & OBD_OPT_FORCE))
filter_grant_sanity_check(exp->exp_obd, __FUNCTION__);
static void filter_sync_llogs(struct obd_device *obd, struct obd_export *dexp)
{
- struct filter_group_llog *fglog, *nlog;
+ struct obd_llog_group *olg_min, *olg;
struct filter_obd *filter;
- int worked = 0, group;
+ int worked = -1, group;
struct llog_ctxt *ctxt;
ENTRY;
* group order and skip already synced llogs -bzzz */
do {
/* look for group with min. number, but > worked */
- fglog = NULL;
+ olg_min = NULL;
group = 1 << 30;
spin_lock(&filter->fo_llog_list_lock);
- list_for_each_entry(nlog, &filter->fo_llog_list, list) {
- if (nlog->group <= worked) {
+ list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
+ if (olg->olg_group <= worked) {
/* this group is already synced */
continue;
}
- if (group < nlog->group) {
+ if (group < olg->olg_group) {
/* we have group with smaller number to sync */
continue;
}
/* store current minimal group */
- fglog = nlog;
- group = nlog->group;
+ olg_min = olg;
+ group = olg->olg_group;
}
spin_unlock(&filter->fo_llog_list_lock);
- if (fglog == NULL)
+ if (olg_min == NULL)
break;
- worked = fglog->group;
- if (fglog->exp && (dexp == fglog->exp || dexp == NULL)) {
- ctxt = llog_get_context_from_llogs(fglog->llogs,
- LLOG_MDS_OST_REPL_CTXT);
- LASSERT(ctxt != NULL);
- llog_sync(ctxt, fglog->exp);
+ worked = olg_min->olg_group;
+ if (olg_min->olg_exp &&
+ (dexp == olg_min->olg_exp || dexp == NULL)) {
+ int err;
+ ctxt = llog_group_get_ctxt(olg_min,
+ LLOG_MDS_OST_REPL_CTXT);
+ if (ctxt) {
+ err = llog_sync(ctxt, olg_min->olg_exp);
+ llog_ctxt_put(ctxt);
+ if (err) {
+ CERROR("error flushing logs to MDS: "
+ "rc %d\n", err);
+ }
+ }
}
- } while (fglog != NULL);
+ } while (olg_min != NULL);
}
-/* also incredibly similar to mds_disconnect */
+/* Also incredibly similar to mds_disconnect */
static int filter_disconnect(struct obd_export *exp)
{
struct obd_device *obd = exp->exp_obd;
filter_grant_sanity_check(obd, __FUNCTION__);
filter_grant_discard(exp);
+ /* Flush any remaining cancel messages out to the target */
+ filter_sync_llogs(obd, exp);
+
+ lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
+
/* Disconnect early so that clients can't keep using export */
rc = class_disconnect(exp);
if (exp->exp_obd->obd_namespace != NULL)
fsfilt_sync(obd, obd->u.obt.obt_sb);
- /* flush any remaining cancel messages out to the target */
- filter_sync_llogs(obd, exp);
+ lprocfs_exp_cleanup(exp);
+
class_export_put(exp);
RETURN(rc);
}
static int filter_ping(struct obd_export *exp)
{
filter_fmd_expire(exp);
-
return 0;
}
dchild = filter_fid2dentry(obd, NULL, group, oa->o_id);
if (IS_ERR(dchild)) {
- CERROR("%s error looking up object: "LPU64"\n",
- what, oa->o_id);
+ CERROR("%s error looking up object: "LPU64":"LPU64"\n",
+ what, group, oa->o_id);
RETURN(dchild);
}
unsigned int orig_ids[MAXQUOTAS] = {0, 0};
struct llog_cookie *fcc = NULL;
struct filter_obd *filter;
- int rc, err, locked = 0, sync = 0;
+ int rc, err, sync = 0;
+ loff_t old_size = 0;
unsigned int ia_valid;
struct inode *inode;
struct iattr iattr;
if (oa->o_valid & OBD_MD_FLCOOKIE) {
OBD_ALLOC(fcc, sizeof(*fcc));
if (fcc != NULL)
- memcpy(fcc, obdo_logcookie(oa), sizeof(*fcc));
+ *fcc = oa->o_lcookie;
}
-
- if (ia_valid & ATTR_SIZE || ia_valid & (ATTR_UID | ATTR_GID)) {
+ if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
DQUOT_INIT(inode);
+ /* Filter truncates and writes are serialized by
+ * i_alloc_sem, see the comment in
+ * filter_preprw_write.*/
+ if (ia_valid & ATTR_SIZE)
+ down_write(&inode->i_alloc_sem);
LOCK_INODE_MUTEX(inode);
- locked = 1;
+ old_size = i_size_read(inode);
}
+ /* VBR: version recovery check */
+ rc = filter_version_get_check(exp, oti, inode);
+ if (rc)
+ GOTO(out_unlock, rc);
+
/* If the inode still has SUID+SGID bits set (see filter_precreate())
* then we will accept the UID+GID sent by the client during write for
* initializing the ownership of this inode. We only allow this to
* sure we have one left for the last_rcvd update. */
err = fsfilt_extend(exp->exp_obd, inode, 1, handle);
- rc = filter_finish_transno(exp, oti, rc, sync);
+ rc = filter_finish_transno(exp, inode, oti, rc, sync);
if (sync) {
filter_cancel_cookies_cb(exp->exp_obd, 0, fcc, rc);
fcc = NULL;
fcc = NULL;
}
- if (locked) {
- /* Let's flush truncated page on disk immediately, then we can
- * avoid need to search for page aliases before directio writes
- * and this sort of stuff at expense of somewhat slower
- * truncates not on a page boundary. I believe this is the only
- * place in filter code that can lead to pages getting to
- * pagecache so far. */
- filter_clear_truncated_page(inode);
- UNLOCK_INODE_MUTEX(inode);
- locked = 0;
+ /* For a partial-page truncate flush the page to disk immediately
+ * to avoid data corruption during direct disk write. b=17397 */
+ if (!sync && (iattr.ia_valid & ATTR_SIZE) &&
+ old_size != iattr.ia_size && (iattr.ia_size & ~CFS_PAGE_MASK)) {
+ err = filemap_fdatawrite_range(inode->i_mapping, iattr.ia_size,
+ iattr.ia_size + 1);
+ if (!rc)
+ rc = err;
}
EXIT;
out_unlock:
- if (locked)
+ if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID))
UNLOCK_INODE_MUTEX(inode);
-
+ if (ia_valid & ATTR_SIZE)
+ up_write(&inode->i_alloc_sem);
if (fcc)
OBD_FREE(fcc, sizeof(*fcc));
int filter_setattr(struct obd_export *exp, struct obd_info *oinfo,
struct obd_trans_info *oti)
{
- struct ldlm_res_id res_id = { .name = { oinfo->oi_oa->o_id, 0,
- oinfo->oi_oa->o_gr, 0 } };
+ struct obdo *oa = oinfo->oi_oa;
+ struct lustre_capa *capa = oinfo_capa(oinfo);
+ struct ldlm_res_id res_id;
struct filter_mod_data *fmd;
struct lvfs_run_ctxt saved;
struct filter_obd *filter;
struct ldlm_resource *res;
struct dentry *dentry;
+ __u64 opc = CAPA_OPC_META_WRITE;
int rc;
ENTRY;
- rc = filter_auth_capa(exp, NULL, oinfo_mdsno(oinfo),
- oinfo_capa(oinfo), CAPA_OPC_META_WRITE);
+ if (oa->o_valid & OBD_FL_TRUNC)
+ opc |= CAPA_OPC_OSS_TRUNC;
+ rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa), capa, opc);
if (rc)
RETURN(rc);
- dentry = __filter_oa2dentry(exp->exp_obd, oinfo->oi_oa,
- __FUNCTION__, 1);
+ if (oa->o_valid & (OBD_MD_FLUID | OBD_MD_FLGID)) {
+ rc = filter_capa_fixoa(exp, oa, obdo_mdsno(oa), capa);
+ if (rc)
+ RETURN(rc);
+ }
+
+ osc_build_res_name(oa->o_id, oa->o_gr, &res_id);
+ /* This would be very bad - accidentally truncating a file when
+ * changing the time or similar - bug 12203. */
+ if (oa->o_valid & OBD_MD_FLSIZE &&
+ oinfo->oi_policy.l_extent.end != OBD_OBJECT_EOF) {
+ static char mdsinum[48];
+
+ if (oa->o_valid & OBD_MD_FLFID)
+ snprintf(mdsinum, sizeof(mdsinum) - 1,
+ " of inode "LPU64"/%u", oa->o_fid,
+ oa->o_generation);
+ else
+ mdsinum[0] = '\0';
+
+ CERROR("%s: setattr from %s trying to truncate objid "LPU64
+ " %s\n",
+ exp->exp_obd->obd_name, obd_export_nid2str(exp),
+ oa->o_id, mdsinum);
+ RETURN(-EPERM);
+ }
+
+ dentry = __filter_oa2dentry(exp->exp_obd, oa, __FUNCTION__, 1);
if (IS_ERR(dentry))
RETURN(PTR_ERR(dentry));
push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
lock_kernel();
- if (oinfo->oi_oa->o_valid &
+ if (oa->o_valid &
(OBD_MD_FLMTIME | OBD_MD_FLATIME | OBD_MD_FLCTIME)) {
- fmd = filter_fmd_get(exp,oinfo->oi_oa->o_id,oinfo->oi_oa->o_gr);
+ fmd = filter_fmd_get(exp, oa->o_id, oa->o_gr);
if (fmd && fmd->fmd_mactime_xid < oti->oti_xid)
fmd->fmd_mactime_xid = oti->oti_xid;
filter_fmd_put(exp, fmd);
}
/* setting objects attributes (including owner/group) */
- rc = filter_setattr_internal(exp, dentry, oinfo->oi_oa, oti);
+ rc = filter_setattr_internal(exp, dentry, oa, oti);
if (rc)
GOTO(out_unlock, rc);
&res_id, LDLM_EXTENT, 0);
if (res != NULL) {
+ LDLM_RESOURCE_ADDREF(res);
rc = ldlm_res_lvbo_update(res, NULL, 0, 0);
+ LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
}
- oinfo->oi_oa->o_valid = OBD_MD_FLID;
+ oa->o_valid = OBD_MD_FLID;
/* Quota release need uid/gid info */
- obdo_from_inode(oinfo->oi_oa, dentry->d_inode,
+ obdo_from_inode(oa, dentry->d_inode,
FILTER_VALID_FLAGS | OBD_MD_FLUID | OBD_MD_FLGID);
EXIT;
/* caller must hold fo_create_locks[oa->o_gr] */
static int filter_destroy_precreated(struct obd_export *exp, struct obdo *oa,
- struct filter_obd *filter)
+ struct filter_obd *filter)
{
- struct obdo doa; /* XXX obdo on stack */
+ struct obdo doa = { 0 }; /* XXX obdo on stack */
obd_id last, id;
- int rc;
+ int rc = 0;
+ int skip_orphan;
ENTRY;
LASSERT(oa);
- LASSERT(oa->o_gr != 0);
+ LASSERT_MDS_GROUP(oa->o_gr);
LASSERT(oa->o_valid & OBD_MD_FLGROUP);
LASSERT(down_trylock(&filter->fo_create_locks[oa->o_gr]) != 0);
last = filter_last_id(filter, doa.o_gr);
- CWARN("%s: deleting orphan objects from "LPU64" to "LPU64"\n",
- exp->exp_obd->obd_name, oa->o_id + 1, last);
-
+ skip_orphan = !!(exp->exp_connect_flags & OBD_CONNECT_SKIP_ORPHAN);
+
+ CDEBUG(D_HA, "%s: deleting orphan objects from "LPU64" to "LPU64"%s\n",
+ exp->exp_obd->obd_name, oa->o_id + 1, last,
+ skip_orphan ? ", orphan objids won't be reused any more." : ".");
+
for (id = last; id > oa->o_id; id--) {
doa.o_id = id;
- rc = filter_destroy(exp, &doa, NULL, NULL, NULL);
+ rc = filter_destroy(exp, &doa, NULL, NULL, NULL, NULL);
if (rc && rc != -ENOENT) /* this is pretty fatal... */
CEMERG("error destroying precreate objid "LPU64": %d\n",
id, rc);
- filter_set_last_id(filter, id - 1, doa.o_gr);
+
/* update last_id on disk periodically so that if we restart
* we don't need to re-scan all of the just-deleted objects. */
- if ((id & 511) == 0)
+ if ((id & 511) == 0 && !skip_orphan) {
+ filter_set_last_id(filter, id - 1, doa.o_gr);
filter_update_last_objid(exp->exp_obd, doa.o_gr, 0);
+ }
}
CDEBUG(D_HA, "%s: after destroy: set last_objids["LPU64"] = "LPU64"\n",
exp->exp_obd->obd_name, doa.o_gr, oa->o_id);
- rc = filter_update_last_objid(exp->exp_obd, doa.o_gr, 1);
+ if (!skip_orphan) {
+ filter_set_last_id(filter, id, doa.o_gr);
+ rc = filter_update_last_objid(exp->exp_obd, doa.o_gr, 1);
+ } else {
+ /* don't reuse orphan object, return last used objid */
+ oa->o_id = last;
+ rc = 0;
+ }
clear_bit(doa.o_gr, &filter->fo_destroys_in_progress);
RETURN(rc);
/* delete orphans request */
if ((oa->o_valid & OBD_MD_FLFLAGS) && (oa->o_flags & OBD_FL_DELORPHAN)){
+ obd_id last = filter_last_id(filter, group);
+
if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
CERROR("%s: dropping old orphan cleanup request\n",
obd->obd_name);
up(&filter->fo_create_locks[group]);
RETURN(0);
}
- diff = oa->o_id - filter_last_id(filter, group);
+ diff = oa->o_id - last;
CDEBUG(D_HA, "filter_last_id() = "LPU64" -> diff = %d\n",
- filter_last_id(filter, group), diff);
+ last, diff);
if (-diff > OST_MAX_PRECREATE) {
CERROR("%s: ignoring bogus orphan destroy request: "
"obdid "LPU64" last_id "LPU64"\n", obd->obd_name,
- oa->o_id, filter_last_id(filter, group));
+ oa->o_id, last);
/* FIXME: should reset precreate_next_id on MDS */
GOTO(out, rc = -EINVAL);
}
GOTO(out, rc = 0);
}
/* only precreate if group == 0 and o_id is specfied */
- if (group < FILTER_GROUP_MDS0 || oa->o_id == 0)
+ if (group == FILTER_GROUP_LLOG || oa->o_id == 0)
diff = 1;
else
diff = oa->o_id - filter_last_id(filter, group);
}
static int filter_statfs(struct obd_device *obd, struct obd_statfs *osfs,
- __u64 max_age)
+ __u64 max_age, __u32 flags)
{
struct filter_obd *filter = &obd->u.filter;
int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
* stop creating files on MDS if OST is not good shape to create
* objects.*/
osfs->os_state = (filter->fo_obt.obt_sb->s_flags & MS_RDONLY) ?
- EROFS : 0;
+ OS_STATE_READONLY : 0;
RETURN(rc);
}
return rc;
}
+static __u64 filter_calc_free_inodes(struct obd_device *obd)
+{
+ int rc;
+ __u64 os_ffree = -1;
+
+ spin_lock(&obd->obd_osfs_lock);
+ rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, cfs_time_shift_64(1));
+ if (rc == 0)
+ os_ffree = obd->obd_osfs.os_ffree;
+ spin_unlock(&obd->obd_osfs_lock);
+
+ return os_ffree;
+}
/* We rely on the fact that only one thread will be creating files in a given
* group at a time, which is why we don't need an atomic filter_get_new_id.
struct filter_obd *filter;
struct obd_statfs *osfs;
int err = 0, rc = 0, recreate_obj = 0, i;
- unsigned long enough_time = jiffies + min(obd_timeout * HZ / 4, 10U*HZ);
+ cfs_time_t enough_time = cfs_time_shift(DISK_TIMEOUT/2);
+ __u64 os_ffree;
obd_id next_id;
void *handle = NULL;
ENTRY;
OBD_ALLOC(osfs, sizeof(*osfs));
if (osfs == NULL)
RETURN(-ENOMEM);
- rc = filter_statfs(obd, osfs, cfs_time_current_64() - HZ);
+ rc = filter_statfs(obd, osfs, cfs_time_current_64() - HZ, 0);
if (rc == 0 && osfs->os_bavail < (osfs->os_blocks >> 10)) {
CDEBUG(D_RPCTRACE,"%s: not enough space for create "
LPU64"\n", obd->obd_name, osfs->os_bavail <<
rc = -EAGAIN;
break;
}
-
+
if (recreate_obj) {
__u64 last_id;
next_id = oa->o_id;
} else
next_id = filter_last_id(filter, group) + 1;
- CDEBUG(D_INFO, "precreate objid "LPU64"\n", next_id);
-
dparent = filter_parent_lock(obd, group, next_id);
if (IS_ERR(dparent))
GOTO(cleanup, rc = PTR_ERR(dparent));
GOTO(cleanup, rc = PTR_ERR(handle));
cleanup_phase = 3;
+ CDEBUG(D_INODE, "%s: filter_precreate(od->o_gr="LPU64
+ ",od->o_id="LPU64")\n", obd->obd_name, group,
+ next_id);
+
/* We mark object SUID+SGID to flag it for accepting UID+GID
* from client on first write. Currently the permission bits
* on the OST are never used, so this is OK. */
S_IFREG | S_ISUID | S_ISGID | 0666, NULL);
if (rc) {
CERROR("create failed rc = %d\n", rc);
+ if (rc == -ENOSPC) {
+ os_ffree = filter_calc_free_inodes(obd);
+ if (os_ffree != -1)
+ CERROR("%s: free inode "LPU64"\n",
+ obd->obd_name, os_ffree);
+ }
GOTO(cleanup, rc);
}
+ if (dchild->d_inode)
+ CDEBUG(D_INFO, "objid "LPU64" got inum %lu\n", next_id,
+ dchild->d_inode->i_ino);
+
set_last_id:
if (!recreate_obj) {
filter_set_last_id(filter, next_id, group);
static int filter_create(struct obd_export *exp, struct obdo *oa,
struct lov_stripe_md **ea, struct obd_trans_info *oti)
{
+ struct obd_device *obd = exp->exp_obd;
struct filter_export_data *fed;
- struct obd_device *obd = NULL;
struct filter_obd *filter;
struct lvfs_run_ctxt saved;
struct lov_stripe_md *lsm = NULL;
int rc = 0, diff, group = oa->o_gr;
ENTRY;
- if (!(oa->o_valid & OBD_MD_FLGROUP) || group == 0) {
+ CDEBUG(D_INODE, "%s: filter_create(od->o_gr="LPU64",od->o_id="
+ LPU64")\n", obd->obd_name, oa->o_gr, oa->o_id);
+
+ if (!(oa->o_valid & OBD_MD_FLGROUP)) {
CERROR("!!! nid %s sent invalid object group %d\n",
obd_export_nid2str(exp), group);
RETURN(-EINVAL);
}
- obd = exp->exp_obd;
fed = &exp->exp_filter_data;
filter = &obd->u.filter;
if (fed->fed_group != group) {
CERROR("!!! this export (nid %s) used object group %d "
"earlier; now it's trying to use group %d! This could "
- "be a bug in the MDS. Tell CFS.\n",
+ "be a bug in the MDS. Please report to "
+ "http://bugzilla.lustre.org/\n",
obd_export_nid2str(exp), fed->fed_group, group);
RETURN(-ENOTUNIQ);
}
- CDEBUG(D_INFO, "filter_create(od->o_gr="LPU64",od->o_id="LPU64")\n",
- oa->o_gr, oa->o_id);
if (ea != NULL) {
lsm = *ea;
if (lsm == NULL) {
int filter_destroy(struct obd_export *exp, struct obdo *oa,
struct lov_stripe_md *md, struct obd_trans_info *oti,
- struct obd_export *md_exp)
+ struct obd_export *md_exp, void *capa)
{
unsigned int qcids[MAXQUOTAS] = {0, 0};
struct obd_device *obd;
struct filter_obd *filter;
struct dentry *dchild = NULL, *dparent = NULL;
+ struct lustre_handle lockh = { 0 };
struct lvfs_run_ctxt saved;
void *handle = NULL;
struct llog_cookie *fcc = NULL;
LASSERT(oa->o_valid & OBD_MD_FLGROUP);
+ rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa),
+ (struct lustre_capa *)capa, CAPA_OPC_OSS_DESTROY);
+ if (rc)
+ RETURN(rc);
+
obd = exp->exp_obd;
filter = &obd->u.filter;
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
cleanup_phase = 1;
+ CDEBUG(D_INODE, "%s: filter_destroy(od->o_gr="LPU64",od->o_id="
+ LPU64")\n", obd->obd_name, oa->o_gr, oa->o_id);
+
dchild = filter_fid2dentry(obd, NULL, oa->o_gr, oa->o_id);
if (IS_ERR(dchild))
GOTO(cleanup, rc = PTR_ERR(dchild));
oa->o_id);
/* If object already gone, cancel cookie right now */
if (oa->o_valid & OBD_MD_FLCOOKIE) {
- fcc = obdo_logcookie(oa);
- llog_cancel(llog_get_context(obd, fcc->lgc_subsys + 1),
- NULL, 1, fcc, 0);
+ struct llog_ctxt *ctxt;
+ struct obd_llog_group *olg;
+ fcc = &oa->o_lcookie;
+ olg = filter_find_olg(obd, oa->o_gr);
+ if (!olg) {
+ CERROR(" %s: can not find olg of group %d\n",
+ obd->obd_name, (int)oa->o_gr);
+ GOTO(cleanup, rc = PTR_ERR(olg));
+ }
+ llog_group_set_export(olg, exp);
+
+ ctxt = llog_group_get_ctxt(olg, fcc->lgc_subsys + 1);
+ llog_cancel(ctxt, NULL, 1, fcc, 0);
+ llog_ctxt_put(ctxt);
fcc = NULL; /* we didn't allocate fcc, don't free it */
}
GOTO(cleanup, rc = -ENOENT);
}
- filter_prepare_destroy(obd, oa->o_id, oa->o_gr);
+ filter_prepare_destroy(obd, oa->o_id, oa->o_gr, &lockh);
/* Our MDC connection is established by the MDS to us */
if (oa->o_valid & OBD_MD_FLCOOKIE) {
OBD_ALLOC(fcc, sizeof(*fcc));
if (fcc != NULL)
- memcpy(fcc, obdo_logcookie(oa), sizeof(*fcc));
+ *fcc = oa->o_lcookie;
}
DQUOT_INIT(dchild->d_inode);
* (see BUG 4180) -bzzz
*/
LOCK_INODE_MUTEX(dchild->d_inode);
+
+ /* VBR: version recovery check */
+ rc = filter_version_get_check(exp, oti, dchild->d_inode);
+ if (rc)
+ GOTO(cleanup, rc);
+
handle = fsfilt_start_log(obd, dchild->d_inode, FSFILT_OP_SETATTR,
NULL, 1);
if (IS_ERR(handle)) {
filter_cancel_cookies_cb,
fcc);
/* If add_journal_cb failed, then filter_finish_transno
- * will commit the handle and we will do a sync
- * on commit. then we call callback directly to free
- * the fcc.
+ * will commit the handle and we will do a sync
+ * on commit. then we call callback directly to free
+ * the fcc.
*/
- rc = filter_finish_transno(exp, oti, rc, sync);
+ rc = filter_finish_transno(exp, NULL, oti, rc, sync);
if (sync) {
- filter_cancel_cookies_cb(obd, 0, fcc, rc);
+ filter_cancel_cookies_cb(obd, 0, fcc, rc);
fcc = NULL;
}
rc2 = fsfilt_commit(obd, dparent->d_inode, handle, 0);
case 3:
filter_parent_unlock(dparent);
case 2:
+ filter_fini_destroy(obd, &lockh);
+
f_dput(dchild);
if (fcc != NULL)
OBD_FREE(fcc, sizeof(*fcc));
qcids[GRPQUOTA] = oa->o_gid;
rc2 = lquota_adjust(filter_quota_interface_ref, obd, qcids, NULL, rc,
FSFILT_OP_UNLINK);
-
if (rc2)
- CDEBUG(D_QUOTA, "filter adjust qunit! (rc:%d)\n", rc2);
+ CERROR("filter adjust qunit! (rc:%d)\n", rc2);
return rc;
}
", o_size = "LPD64"\n", oinfo->oi_oa->o_id,
oinfo->oi_oa->o_valid, oinfo->oi_policy.l_extent.start);
- rc = filter_auth_capa(exp, NULL, oinfo_mdsno(oinfo),
- oinfo_capa(oinfo), CAPA_OPC_OSS_TRUNC);
- if (rc)
- RETURN(rc);
-
oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start;
+ oinfo->oi_oa->o_valid |= OBD_FL_TRUNC;
rc = filter_setattr(exp, oinfo, oti);
+ oinfo->oi_oa->o_valid &= ~OBD_FL_TRUNC;
RETURN(rc);
}
struct lvfs_run_ctxt saved;
struct filter_obd *filter;
struct dentry *dentry;
- struct llog_ctxt *ctxt;
int rc, rc2;
ENTRY;
filter = &exp->exp_obd->u.filter;
- /* an objid of zero is taken to mean "sync whole filesystem" */
+ /* An objid of zero is taken to mean "sync whole filesystem" */
if (!oa || !(oa->o_valid & OBD_MD_FLID)) {
rc = fsfilt_sync(exp->exp_obd, filter->fo_obt.obt_sb);
- /* flush any remaining cancel messages out to the target */
- ctxt = llog_get_context(exp->exp_obd, LLOG_MDS_OST_REPL_CTXT);
- llog_sync(ctxt, exp);
+ /* Flush any remaining cancel messages out to the target */
+ filter_sync_llogs(exp->exp_obd, exp);
RETURN(rc);
}
}
static int filter_get_info(struct obd_export *exp, __u32 keylen,
- void *key, __u32 *vallen, void *val)
+ void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm)
{
struct obd_device *obd;
ENTRY;
RETURN(-EINVAL);
}
- if (keylen == strlen("blocksize") &&
- memcmp(key, "blocksize", keylen) == 0) {
+ if (KEY_IS(KEY_BLOCKSIZE)) {
__u32 *blocksize = val;
+ if (blocksize) {
+ if (*vallen < sizeof(*blocksize))
+ RETURN(-EOVERFLOW);
+ *blocksize = obd->u.obt.obt_sb->s_blocksize;
+ }
*vallen = sizeof(*blocksize);
- *blocksize = obd->u.obt.obt_sb->s_blocksize;
RETURN(0);
}
- if (keylen == strlen("blocksize_bits") &&
- memcmp(key, "blocksize_bits", keylen) == 0) {
+ if (KEY_IS(KEY_BLOCKSIZE_BITS)) {
__u32 *blocksize_bits = val;
+ if (blocksize_bits) {
+ if (*vallen < sizeof(*blocksize_bits))
+ RETURN(-EOVERFLOW);
+ *blocksize_bits = obd->u.obt.obt_sb->s_blocksize_bits;
+ }
*vallen = sizeof(*blocksize_bits);
- *blocksize_bits = obd->u.obt.obt_sb->s_blocksize_bits;
RETURN(0);
}
- if (keylen >= strlen("last_id") && memcmp(key, "last_id", 7) == 0) {
+ if (KEY_IS(KEY_LAST_ID)) {
obd_id *last_id = val;
/* FIXME: object groups */
- *last_id = filter_last_id(&obd->u.filter, 0);
+ if (last_id) {
+ if (*vallen < sizeof(*last_id))
+ RETURN(-EOVERFLOW);
+ *last_id = filter_last_id(&obd->u.filter,
+ exp->exp_filter_data.fed_group);
+ }
+ *vallen = sizeof(*last_id);
RETURN(0);
}
+
+ if (KEY_IS(KEY_FIEMAP)) {
+ struct ll_fiemap_info_key *fm_key = key;
+ struct dentry *dentry;
+ struct ll_user_fiemap *fiemap = val;
+ struct lvfs_run_ctxt saved;
+ int rc;
+
+ if (fiemap == NULL) {
+ *vallen = fiemap_count_to_size(
+ fm_key->fiemap.fm_extent_count);
+ RETURN(0);
+ }
+
+ dentry = __filter_oa2dentry(exp->exp_obd, &fm_key->oa,
+ __FUNCTION__, 1);
+ if (IS_ERR(dentry))
+ RETURN(PTR_ERR(dentry));
+
+ memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
+ push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ rc = fsfilt_iocontrol(obd, dentry->d_inode, NULL,
+ EXT3_IOC_FIEMAP, (long)fiemap);
+ pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+
+ f_dput(dentry);
+ RETURN(rc);
+ }
+
CDEBUG(D_IOCTL, "invalid key\n");
RETURN(-EINVAL);
}
+static inline int filter_setup_llog_group(struct obd_export *exp,
+ struct obd_device *obd,
+ int group)
+{
+ struct obd_llog_group *olg;
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ olg = filter_find_create_olg(obd, group);
+ if (IS_ERR(olg))
+ RETURN(PTR_ERR(olg));
+
+ llog_group_set_export(olg, exp);
+
+ ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
+ LASSERTF(ctxt != NULL, "ctxt is null\n");
+
+ rc = llog_receptor_accept(ctxt, exp->exp_imp_reverse);
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+
+static int filter_set_grant_shrink(struct obd_export *exp,
+ struct ost_body *body)
+{
+ /* handle shrink grant */
+ spin_lock(&exp->exp_obd->obd_osfs_lock);
+ filter_grant_incoming(exp, &body->oa);
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
+
+ RETURN(0);
+
+}
+
+static int filter_set_mds_conn(struct obd_export *exp, void *val)
+{
+ struct obd_device *obd;
+ int rc = 0, group;
+ ENTRY;
+
+ obd = exp->exp_obd;
+ if (obd == NULL) {
+ CDEBUG(D_IOCTL, "invalid export %p\n", exp);
+ RETURN(-EINVAL);
+ }
+
+ LCONSOLE_WARN("%s: received MDS connection from %s\n", obd->obd_name,
+ obd_export_nid2str(exp));
+ obd->u.filter.fo_mdc_conn.cookie = exp->exp_handle.h_cookie;
+
+ /* setup llog imports */
+ if (val != NULL)
+ group = (int)(*(__u32 *)val);
+ else
+ group = 0; /* default value */
+
+ LASSERT_MDS_GROUP(group);
+ rc = filter_setup_llog_group(exp, obd, group);
+ if (rc)
+ goto out;
+
+ if (group == FILTER_GROUP_MDS0) {
+ /* setup llog group 1 for interop */
+ filter_setup_llog_group(exp, obd, FILTER_GROUP_LLOG);
+ }
+
+ lquota_setinfo(filter_quota_interface_ref, obd, exp);
+out:
+ RETURN(rc);
+}
+
static int filter_set_info_async(struct obd_export *exp, __u32 keylen,
void *key, __u32 vallen, void *val,
struct ptlrpc_request_set *set)
{
struct obd_device *obd;
- struct obd_llogs *llog;
- struct llog_ctxt *ctxt;
- int rc = 0, group;
ENTRY;
obd = exp->exp_obd;
}
if (KEY_IS(KEY_CAPA_KEY)) {
+ int rc;
rc = filter_update_capa_key(obd, (struct lustre_capa_key *)val);
if (rc)
CERROR("filter update capability key failed: %d\n", rc);
if (KEY_IS(KEY_REVIMP_UPD)) {
filter_revimp_update(exp);
+ lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
RETURN(0);
}
- if (keylen < strlen(KEY_MDS_CONN) ||
- memcmp(key, KEY_MDS_CONN, keylen) != 0)
- RETURN(-EINVAL);
-
- LCONSOLE_WARN("%s: received MDS connection from %s\n", obd->obd_name,
- obd_export_nid2str(exp));
- obd->u.filter.fo_mdc_conn.cookie = exp->exp_handle.h_cookie;
-
- /* setup llog imports */
- LASSERT(val != NULL);
- group = (int)(*(__u32 *)val);
- LASSERT(group >= FILTER_GROUP_MDS0);
-
- llog = filter_grab_llog_for_group(obd, group, exp);
- LASSERT(llog != NULL);
- ctxt = llog_get_context_from_llogs(llog, LLOG_MDS_OST_REPL_CTXT);
- LASSERTF(ctxt != NULL, "ctxt is not null\n"),
+ if (KEY_IS(KEY_SPTLRPC_CONF)) {
+ filter_adapt_sptlrpc_conf(obd, 0);
+ RETURN(0);
+ }
- rc = llog_receptor_accept(ctxt, exp->exp_imp_reverse);
+ if (KEY_IS(KEY_MDS_CONN))
+ RETURN(filter_set_mds_conn(exp, val));
- lquota_setinfo(filter_quota_interface_ref, exp, obd);
+ if (KEY_IS(KEY_GRANT_SHRINK))
+ RETURN(filter_set_grant_shrink(exp, val));
- RETURN(rc);
+ RETURN(-EINVAL);
}
int filter_iocontrol(unsigned int cmd, struct obd_export *exp,
switch (cmd) {
case OBD_IOC_ABORT_RECOVERY: {
- CERROR("aborting recovery for device %s\n", obd->obd_name);
+ LCONSOLE_WARN("%s: Aborting recovery.\n", obd->obd_name);
target_stop_recovery_thread(obd);
RETURN(0);
}
struct lprocfs_static_vars lvars;
int rc = 0;
- lprocfs_init_vars(filter, &lvars);
+ switch (lcfg->lcfg_command) {
+ default:
+ lprocfs_filter_init_vars(&lvars);
+
+ rc = class_process_proc_param(PARAM_OST, lvars.obd_vars,
+ lcfg, obd);
+ if (rc > 0)
+ rc = 0;
+ break;
+ }
- rc = class_process_proc_param(PARAM_OST, lvars.obd_vars, lcfg, obd);
return rc;
}
struct lprocfs_static_vars lvars;
int rc;
- lprocfs_init_vars(filter, &lvars);
+ lprocfs_filter_init_vars(&lvars);
request_module("lquota");
OBD_ALLOC(obdfilter_created_scratchpad,
sizeof(*obdfilter_created_scratchpad));
}
-MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Filtering OBD driver");
MODULE_LICENSE("GPL");