* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <linux/fs.h>
/* XATTR_{REPLACE,CREATE} */
#include <linux/xattr.h>
-/* simple_mkdir() */
-#include <lvfs.h>
/*
* struct OBD_{ALLOC,FREE}*()
#include <obd_support.h>
/* struct ptlrpc_thread */
#include <lustre_net.h>
-
-/* fid_is_local() */
#include <lustre_fid.h>
#include "osd_internal.h"
-#include "osd_igif.h"
+#include "osd_dynlocks.h"
/* llo_* api support */
#include <md_object.h>
-/* dt_acct_features */
-#include <lquota.h>
+#include <lustre_quota.h>
-#ifdef HAVE_LDISKFS_PDO
int ldiskfs_pdo = 1;
CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
"ldiskfs with parallel directory operations");
-#else
-int ldiskfs_pdo = 0;
-#endif
+
+int ldiskfs_track_declares_assert;
+CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
+ "LBUG during tracking of declares");
+
+/* Slab to allocate dynlocks */
+struct kmem_cache *dynlock_cachep;
+
+static struct lu_kmem_descr ldiskfs_caches[] = {
+ {
+ .ckd_cache = &dynlock_cachep,
+ .ckd_name = "dynlock_cache",
+ .ckd_size = sizeof(struct dynlock_handle)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
static const char dot[] = ".";
static const char dotdot[] = "..";
static const struct dt_index_operations osd_index_iam_ops;
static const struct dt_index_operations osd_index_ea_ops;
+int osd_trans_declare_op2rb[] = {
+ [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
+ [OSD_OT_PUNCH] = OSD_OT_MAX,
+ [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
+ [OSD_OT_CREATE] = OSD_OT_DESTROY,
+ [OSD_OT_DESTROY] = OSD_OT_CREATE,
+ [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
+ [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
+ [OSD_OT_WRITE] = OSD_OT_WRITE,
+ [OSD_OT_INSERT] = OSD_OT_DELETE,
+ [OSD_OT_DELETE] = OSD_OT_INSERT,
+ [OSD_OT_UPDATE] = OSD_OT_MAX,
+ [OSD_OT_QUOTA] = OSD_OT_MAX,
+};
+
static int osd_has_index(const struct osd_object *obj)
{
return obj->oo_dt.do_index_ops != NULL;
return osd_invariant(osd_obj(l));
}
-#ifdef HAVE_QUOTA_SUPPORT
-static inline void
-osd_push_ctxt(const struct lu_env *env, struct osd_ctxt *save, bool is_md)
-{
- struct md_ucred *uc;
- struct cred *tc;
-
- if (!is_md)
- /* OFD support */
- return;
-
- uc = md_ucred(env);
-
- LASSERT(uc != NULL);
-
- save->oc_uid = current_fsuid();
- save->oc_gid = current_fsgid();
- save->oc_cap = current_cap();
- if ((tc = prepare_creds())) {
- tc->fsuid = uc->mu_fsuid;
- tc->fsgid = uc->mu_fsgid;
- commit_creds(tc);
- }
- /* XXX not suboptimal */
- cfs_curproc_cap_unpack(uc->mu_cap);
-}
-
-static inline void
-osd_pop_ctxt(struct osd_ctxt *save, bool is_md)
-{
- struct cred *tc;
-
- if (!is_md)
- /* OFD support */
- return;
-
- if ((tc = prepare_creds())) {
- tc->fsuid = save->oc_uid;
- tc->fsgid = save->oc_gid;
- tc->cap_effective = save->oc_cap;
- commit_creds(tc);
- }
-}
-#endif
-
/*
* Concurrency: doesn't matter
*/
l = &mo->oo_dt.do_lu;
dt_object_init(&mo->oo_dt, NULL, d);
- if (osd_dev(d)->od_iop_mode)
- mo->oo_dt.do_ops = &osd_obj_ea_ops;
- else
- mo->oo_dt.do_ops = &osd_obj_ops;
-
+ mo->oo_dt.do_ops = &osd_obj_ea_ops;
l->lo_ops = &osd_lu_obj_ops;
- cfs_init_rwsem(&mo->oo_sem);
- cfs_init_rwsem(&mo->oo_ext_idx_sem);
- cfs_spin_lock_init(&mo->oo_guard);
+ init_rwsem(&mo->oo_sem);
+ init_rwsem(&mo->oo_ext_idx_sem);
+ spin_lock_init(&mo->oo_guard);
return l;
} else {
return NULL;
}
}
-static int osd_get_lma(struct inode *inode, struct dentry *dentry,
- struct lustre_mdt_attrs *lma)
+int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
+ struct dentry *dentry, struct lustre_mdt_attrs *lma)
{
int rc;
- dentry->d_inode = inode;
- rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA, (void *)lma,
- sizeof(*lma));
+ CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
+ info->oti_mdt_attrs_old, LMA_OLD_SIZE);
if (rc > 0) {
+ if ((void *)lma != (void *)info->oti_mdt_attrs_old)
+ memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
+ rc = 0;
+ lustre_lma_swab(lma);
/* Check LMA compatibility */
- if (lma->lma_incompat & ~cpu_to_le32(LMA_INCOMPAT_SUPP)) {
- CWARN("%.16s: unsupported incompat LMA feature(s) "
- "%lx/%#x\n",
+ if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
+ CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
+ "for fid = "DFID", ino = %lu\n",
LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
- inode->i_ino, le32_to_cpu(lma->lma_incompat) &
- ~LMA_INCOMPAT_SUPP);
- rc = -ENOSYS;
- } else {
- lustre_lma_swab(lma);
- rc = 0;
+ lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
+ PFID(&lma->lma_self_fid), inode->i_ino);
+ rc = -EOPNOTSUPP;
}
} else if (rc == 0) {
rc = -ENODATA;
return inode;
}
-struct inode *osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
- struct osd_inode_id *id, struct lu_fid *fid)
+static struct inode *
+osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
+ struct osd_inode_id *id, struct lu_fid *fid)
{
struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
struct inode *inode;
if (IS_ERR(inode))
return inode;
- rc = osd_get_lma(inode, &info->oti_obj_dentry, lma);
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
if (rc == 0) {
*fid = lma->lma_self_fid;
} else if (rc == -ENODATA) {
- LU_IGIF_BUILD(fid, inode->i_ino, inode->i_generation);
+ if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
+ lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
+ else
+ lu_igif_build(fid, inode->i_ino, inode->i_generation);
} else {
iput(inode);
inode = ERR_PTR(rc);
return inode;
}
-static struct inode *
-osd_iget_verify(struct osd_thread_info *info, struct osd_device *dev,
- struct osd_inode_id *id, const struct lu_fid *fid)
+/**
+ * \retval +v: new filter_fid, does not contain self-fid
+ * \retval 0: filter_fid_old, contains self-fid
+ * \retval -v: other failure cases
+ */
+int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
+ struct dentry *dentry, struct lu_fid *fid)
{
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
- struct inode *inode;
+ struct filter_fid_old *ff = &info->oti_ff;
+ struct ost_id *ostid = &info->oti_ostid;
int rc;
- inode = osd_iget(info, dev, id);
- if (IS_ERR(inode))
- return inode;
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
+ if (rc == sizeof(*ff)) {
+ rc = 0;
+ ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
+ ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
+ /* XXX: should use real OST index in the future. LU-3569 */
+ ostid_to_fid(fid, ostid, 0);
+ } else if (rc == sizeof(struct filter_fid)) {
+ rc = 1;
+ } else if (rc >= 0) {
+ rc = -EINVAL;
+ }
- rc = osd_get_lma(inode, &info->oti_obj_dentry, lma);
- if (rc != 0) {
- if (rc == -ENODATA) {
- CDEBUG(D_LFSCK, "inconsistent obj: NULL, %lu, "DFID"\n",
- inode->i_ino, PFID(fid));
- rc = -EREMCHG;
+ return rc;
+}
+
+static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
+{
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct inode *inode = obj->oo_inode;
+ struct dentry *dentry = &info->oti_obj_dentry;
+ struct lu_fid *fid = NULL;
+ int rc;
+ ENTRY;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
+ RETURN(0);
+
+ CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
+ info->oti_mdt_attrs_old, LMA_OLD_SIZE);
+ if (rc == -ENODATA && !fid_is_igif(lu_object_fid(&obj->oo_dt.do_lu)) &&
+ osd->od_check_ff) {
+ fid = &lma->lma_self_fid;
+ rc = osd_get_idif(info, inode, dentry, fid);
+ if ((rc > 0) || (rc == -ENODATA && osd->od_lma_self_repair)) {
+ handle_t *jh;
+
+ /* For the given OST-object, if it has neither LMA nor
+ * FID in XATTR_NAME_FID, then the given FID (which is
+ * contained in the @obj, from client RPC for locating
+ * the OST-object) is trusted. We use it to generate
+ * the LMA. */
+
+ LASSERT(current->journal_info == NULL);
+
+ jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
+ if (IS_ERR(jh)) {
+ CWARN("%s: cannot start journal for "
+ "lma_self_repair: rc = %ld\n",
+ osd_name(osd), PTR_ERR(jh));
+ RETURN(0);
+ }
+
+ rc = osd_ea_fid_set(info, inode,
+ lu_object_fid(&obj->oo_dt.do_lu),
+ fid_is_on_ost(info, osd,
+ lu_object_fid(&obj->oo_dt.do_lu),
+ OI_CHECK_FLD) ?
+ LMAC_FID_ON_OST : 0, 0);
+ if (rc != 0)
+ CWARN("%s: cannot self repair the LMA: "
+ "rc = %d\n", osd_name(osd), rc);
+ ldiskfs_journal_stop(jh);
+ RETURN(0);
}
- iput(inode);
- return ERR_PTR(rc);
}
- if (!lu_fid_eq(fid, &lma->lma_self_fid)) {
- CDEBUG(D_LFSCK, "inconsistent obj: "DFID", %lu, "DFID"\n",
- PFID(&lma->lma_self_fid), inode->i_ino, PFID(fid));
- iput(inode);
- return ERR_PTR(EREMCHG);
+ if (unlikely(rc == -ENODATA))
+ RETURN(0);
+
+ if (rc < 0)
+ RETURN(rc);
+
+ if (rc > 0) {
+ rc = 0;
+ lustre_lma_swab(lma);
+ if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
+ CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
+ CWARN("%s: unsupported incompat LMA feature(s) %#x for "
+ "fid = "DFID", ino = %lu\n", osd_name(osd),
+ lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
+ PFID(lu_object_fid(&obj->oo_dt.do_lu)),
+ inode->i_ino);
+ rc = -EOPNOTSUPP;
+ } else if (!(lma->lma_compat & LMAC_NOT_IN_OI)) {
+ fid = &lma->lma_self_fid;
+ }
}
- return inode;
+
+ if (fid != NULL &&
+ unlikely(!lu_fid_eq(lu_object_fid(&obj->oo_dt.do_lu), fid))) {
+ CDEBUG(D_INODE, "%s: FID "DFID" != self_fid "DFID"\n",
+ osd_name(osd), PFID(lu_object_fid(&obj->oo_dt.do_lu)),
+ PFID(&lma->lma_self_fid));
+ rc = -EREMCHG;
+ }
+
+ RETURN(rc);
}
static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
struct osd_scrub *scrub;
struct scrub_file *sf;
int result;
- int verify = 0;
+ int saved = 0;
+ bool in_oi = false;
+ bool triggered = false;
ENTRY;
LINVRNT(osd_invariant(obj));
info = osd_oti_get(env);
LASSERT(info);
oic = &info->oti_cache;
- id = &oic->oic_lid;
if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
RETURN(-ENOENT);
- if (fid_is_norm(fid)) {
- /* Search order: 1. per-thread cache. */
- if (lu_fid_eq(fid, &oic->oic_fid)) {
- goto iget;
- } else if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
- /* Search order: 2. OI scrub pending list. */
- result = osd_oii_lookup(dev, fid, id);
- if (result == 0)
- goto iget;
- }
-
- if (sf->sf_flags & SF_INCONSISTENT)
- verify = 1;
- }
-
- /*
- * Objects are created as locking anchors or place holders for objects
- * yet to be created. No need to osd_oi_lookup() at here because FID
+ /* For the object is created as locking anchor, or for the object to
+ * be created on disk. No need to osd_oi_lookup() at here because FID
* shouldn't never be re-used, if it's really a duplicate FID from
* unexpected reason, we should be able to detect it later by calling
- * do_create->osd_oi_insert()
- */
- if (conf != NULL && (conf->loc_flags & LOC_F_NEW) != 0)
+ * do_create->osd_oi_insert(). */
+ if (conf != NULL && conf->loc_flags & LOC_F_NEW)
GOTO(out, result = 0);
+ /* Search order: 1. per-thread cache. */
+ if (lu_fid_eq(fid, &oic->oic_fid) &&
+ likely(oic->oic_dev == dev)) {
+ id = &oic->oic_lid;
+ goto iget;
+ }
+
+ id = &info->oti_id;
+ if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+ /* Search order: 2. OI scrub pending list. */
+ result = osd_oii_lookup(dev, fid, id);
+ if (result == 0)
+ goto iget;
+ }
+
/* Search order: 3. OI files. */
- result = osd_oi_lookup(info, dev, fid, id);
+ result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
if (result == -ENOENT) {
if (!fid_is_norm(fid) ||
+ fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
!ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
sf->sf_oi_bitmap))
GOTO(out, result = 0);
if (result != 0)
GOTO(out, result);
+ in_oi = true;
+
iget:
- if (verify == 0)
- inode = osd_iget(info, dev, id);
- else
- inode = osd_iget_verify(info, dev, id, fid);
+ inode = osd_iget(info, dev, id);
if (IS_ERR(inode)) {
result = PTR_ERR(inode);
if (result == -ENOENT || result == -ESTALE) {
- fid_zero(&oic->oic_fid);
- result = 0;
+ if (!in_oi) {
+ fid_zero(&oic->oic_fid);
+ GOTO(out, result = -ENOENT);
+ }
+
+ /* XXX: There are three possible cases:
+ * 1. Backup/restore caused the OI invalid.
+ * 2. Someone unlinked the object but NOT removed
+ * the OI mapping, such as mount target device
+ * as ldiskfs, and modify something directly.
+ * 3. Someone just removed the object between the
+ * former oi_lookup and the iget. It is normal.
+ *
+ * It is diffcult to distinguish the 2nd from the
+ * 1st case. Relatively speaking, the 1st case is
+ * common than the 2nd case, trigger OI scrub. */
+ result = osd_oi_lookup(info, dev, fid, id, true);
+ if (result == 0)
+ /* It is the case 1 or 2. */
+ goto trigger;
} else if (result == -EREMCHG) {
trigger:
+ if (unlikely(triggered))
+ GOTO(out, result = saved);
+
+ triggered = true;
if (thread_is_running(&scrub->os_thread)) {
result = -EINPROGRESS;
- } else if (!scrub->os_no_scrub) {
+ } else if (!dev->od_noscrub) {
result = osd_scrub_start(dev);
LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC "
"for "DFID", rc = %d [1]\n",
else
result = -EREMCHG;
}
+
+ /* We still have chance to get the valid inode: for the
+ * object which is referenced by remote name entry, the
+ * object on the local MDT will be linked under the dir
+ * of "/REMOTE_PARENT_DIR" with its FID string as name.
+ *
+ * We do not know whether the object for the given FID
+ * is referenced by some remote name entry or not, and
+ * especially for DNE II, a multiple-linked object may
+ * have many name entries reside on many MDTs.
+ *
+ * To simplify the operation, OSD will not distinguish
+ * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
+ * only happened for the RPC from other MDT during the
+ * OI scrub, or for the client side RPC with FID only,
+ * such as FID to path, or from old connected client. */
+ saved = result;
+ result = osd_lookup_in_remote_parent(info, dev,
+ fid, id);
+ if (result == 0) {
+ in_oi = false;
+ goto iget;
+ }
+
+ result = saved;
}
GOTO(out, result);
obj->oo_inode = inode;
LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
- if (dev->od_iop_mode) {
- obj->oo_compat_dot_created = 1;
- obj->oo_compat_dotdot_created = 1;
- }
+
+ result = osd_check_lma(env, obj);
+ if (result != 0) {
+ iput(inode);
+ obj->oo_inode = NULL;
+ if (result == -EREMCHG)
+ goto trigger;
+
+ GOTO(out, result);
+ }
+
+ obj->oo_compat_dot_created = 1;
+ obj->oo_compat_dotdot_created = 1;
if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
GOTO(out, result = 0);
LINVRNT(osd_invariant(obj));
+ if (fid_is_otable_it(&l->lo_header->loh_fid)) {
+ obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
+ l->lo_header->loh_attr |= LOHA_EXISTS;
+ return 0;
+ }
+
result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
obj->oo_dt.do_body_ops = &osd_body_ops_new;
- if (result == 0) {
- if (obj->oo_inode != NULL) {
- osd_object_init0(obj);
- } else if (fid_is_otable_it(&l->lo_header->loh_fid)) {
- obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
- /* LFSCK iterator object is special without inode */
- l->lo_header->loh_attr |= LOHA_EXISTS;
- }
- }
+ if (result == 0 && obj->oo_inode != NULL)
+ osd_object_init0(obj);
+
LINVRNT(osd_invariant(obj));
return result;
}
/**
* Helper function to convert time interval to microseconds packed in
- * long int (default time units for the counter in "stats" initialized
- * by lu_time_init() )
+ * long int.
*/
static long interval_to_usec(cfs_time_t start, cfs_time_t end)
{
/*
* Concurrency: doesn't access mutable data.
*/
-static int osd_param_is_sane(const struct osd_device *dev,
- const struct thandle *th)
+static int osd_param_is_not_sane(const struct osd_device *dev,
+ const struct thandle *th)
{
- struct osd_thandle *oh;
- oh = container_of0(th, struct osd_thandle, ot_super);
- return oh->ot_credits <= osd_journal(dev)->j_max_transaction_buffers;
+ struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
+
+ return oh->ot_credits > osd_journal(dev)->j_max_transaction_buffers;
}
/*
* Concurrency: shouldn't matter.
*/
-#ifdef HAVE_LDISKFS_JOURNAL_CALLBACK_ADD
static void osd_trans_commit_cb(struct super_block *sb,
- struct journal_callback *jcb, int error)
-#else
-static void osd_trans_commit_cb(struct journal_callback *jcb, int error)
-#endif
+ struct ldiskfs_journal_cb_entry *jcb, int error)
{
struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
struct thandle *th = &oh->ot_super;
dcb->dcb_func(NULL, th, dcb, error);
}
- lu_ref_del_at(&lud->ld_reference, oh->ot_dev_link, "osd-tx", th);
+ lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
lu_device_put(lud);
th->th_dev = NULL;
LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
th = ERR_PTR(-ENOMEM);
- OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
+ OBD_ALLOC_GFP(oh, sizeof *oh, __GFP_IO);
if (oh != NULL) {
oh->ot_quota_trans = &oti->oti_quota_trans;
memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
oti->oti_dev = osd_dt_dev(d);
CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
osd_th_alloced(oh);
+
+ memset(oti->oti_declare_ops, 0,
+ sizeof(oti->oti_declare_ops));
+ memset(oti->oti_declare_ops_rb, 0,
+ sizeof(oti->oti_declare_ops_rb));
+ memset(oti->oti_declare_ops_cred, 0,
+ sizeof(oti->oti_declare_ops_cred));
+ oti->oti_rollback = false;
}
RETURN(th);
}
if (rc != 0)
GOTO(out, rc);
- if (!osd_param_is_sane(dev, th)) {
+ if (unlikely(osd_param_is_not_sane(dev, th))) {
+ static unsigned long last_printed;
+ static int last_credits;
+
CWARN("%.16s: too many transaction credits (%d > %d)\n",
LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
oh->ot_credits,
osd_journal(dev)->j_max_transaction_buffers);
- /* XXX Limit the credits to 'max_transaction_buffers', and
- * let the underlying filesystem to catch the error if
- * we really need so many credits.
- *
- * This should be removed when we can calculate the
- * credits precisely. */
- oh->ot_credits = osd_journal(dev)->j_max_transaction_buffers;
-#ifdef OSD_TRACK_DECLARES
- CERROR(" attr_set: %d, punch: %d, xattr_set: %d,\n",
- oh->ot_declare_attr_set, oh->ot_declare_punch,
- oh->ot_declare_xattr_set);
- CERROR(" create: %d, ref_add: %d, ref_del: %d, write: %d\n",
- oh->ot_declare_create, oh->ot_declare_ref_add,
- oh->ot_declare_ref_del, oh->ot_declare_write);
- CERROR(" insert: %d, delete: %d, destroy: %d\n",
- oh->ot_declare_insert, oh->ot_declare_delete,
- oh->ot_declare_destroy);
-#endif
- }
+ CWARN(" create: %u/%u, delete: %u/%u, destroy: %u/%u\n",
+ oti->oti_declare_ops[OSD_OT_CREATE],
+ oti->oti_declare_ops_cred[OSD_OT_CREATE],
+ oti->oti_declare_ops[OSD_OT_DELETE],
+ oti->oti_declare_ops_cred[OSD_OT_DELETE],
+ oti->oti_declare_ops[OSD_OT_DESTROY],
+ oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
+ CWARN(" attr_set: %u/%u, xattr_set: %u/%u\n",
+ oti->oti_declare_ops[OSD_OT_ATTR_SET],
+ oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
+ oti->oti_declare_ops[OSD_OT_XATTR_SET],
+ oti->oti_declare_ops_cred[OSD_OT_XATTR_SET]);
+ CWARN(" write: %u/%u, punch: %u/%u, quota %u/%u\n",
+ oti->oti_declare_ops[OSD_OT_WRITE],
+ oti->oti_declare_ops_cred[OSD_OT_WRITE],
+ oti->oti_declare_ops[OSD_OT_PUNCH],
+ oti->oti_declare_ops_cred[OSD_OT_PUNCH],
+ oti->oti_declare_ops[OSD_OT_QUOTA],
+ oti->oti_declare_ops_cred[OSD_OT_QUOTA]);
+ CWARN(" insert: %u/%u, delete: %u/%u\n",
+ oti->oti_declare_ops[OSD_OT_INSERT],
+ oti->oti_declare_ops_cred[OSD_OT_INSERT],
+ oti->oti_declare_ops[OSD_OT_DESTROY],
+ oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
+ CWARN(" ref_add: %u/%u, ref_del: %u/%u\n",
+ oti->oti_declare_ops[OSD_OT_REF_ADD],
+ oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
+ oti->oti_declare_ops[OSD_OT_REF_DEL],
+ oti->oti_declare_ops_cred[OSD_OT_REF_DEL]);
+
+ if (last_credits != oh->ot_credits &&
+ time_after(jiffies, last_printed + 60 * HZ)) {
+ libcfs_debug_dumpstack(NULL);
+ last_credits = oh->ot_credits;
+ last_printed = jiffies;
+ }
+ /* XXX Limit the credits to 'max_transaction_buffers', and
+ * let the underlying filesystem to catch the error if
+ * we really need so many credits.
+ *
+ * This should be removed when we can calculate the
+ * credits precisely. */
+ oh->ot_credits = osd_journal(dev)->j_max_transaction_buffers;
+ }
/*
* XXX temporary stuff. Some abstraction layer should
* be used.
*/
- jh = ldiskfs_journal_start_sb(osd_sb(dev), oh->ot_credits);
+ jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
osd_th_started(oh);
if (!IS_ERR(jh)) {
oh->ot_handle = jh;
lu_context_enter(&th->th_ctx);
lu_device_get(&d->dd_lu_dev);
- oh->ot_dev_link = lu_ref_add(&d->dd_lu_dev.ld_reference,
- "osd-tx", th);
+ lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
+ "osd-tx", th);
oti->oti_txns++;
rc = 0;
} else {
RETURN(rc);
}
+static int osd_seq_exists(const struct lu_env *env,
+ struct osd_device *osd, obd_seq seq)
+{
+ struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
+ struct seq_server_site *ss = osd_seq_site(osd);
+ int rc;
+ ENTRY;
+
+ if (ss == NULL)
+ RETURN(1);
+
+ rc = osd_fld_lookup(env, osd, seq, range);
+ if (rc != 0) {
+ if (rc != -ENOENT)
+ CERROR("%s: can't lookup FLD sequence "LPX64
+ ": rc = %d\n", osd_name(osd), seq, rc);
+ RETURN(0);
+ }
+
+ RETURN(ss->ss_node_id == range->lsr_index);
+}
+
/*
* Concurrency: shouldn't matter.
*/
* notice we don't do this in osd_trans_start()
* as underlying transaction can change during truncate
*/
- osd_journal_callback_set(hdl, osd_trans_commit_cb,
+ ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
&oh->ot_jcb);
LASSERT(oti->oti_txns == 1);
OBD_FREE_PTR(oh);
}
- /* as we want IO to journal and data IO be concurrent, we don't block
- * awaiting data IO completion in osd_do_bio(), instead we wait here
- * once transaction is submitted to the journal. all reqular requests
- * don't do direct IO (except read/write), thus this wait_event becomes
- * no-op for them.
- *
- * IMPORTANT: we have to wait till any IO submited by the thread is
- * completed otherwise iobuf may be corrupted by different request
- */
- cfs_wait_event(iobuf->dr_wait,
- cfs_atomic_read(&iobuf->dr_numreqs) == 0);
- if (!rc)
- rc = iobuf->dr_error;
+ /* as we want IO to journal and data IO be concurrent, we don't block
+ * awaiting data IO completion in osd_do_bio(), instead we wait here
+ * once transaction is submitted to the journal. all reqular requests
+ * don't do direct IO (except read/write), thus this wait_event becomes
+ * no-op for them.
+ *
+ * IMPORTANT: we have to wait till any IO submited by the thread is
+ * completed otherwise iobuf may be corrupted by different request
+ */
+ wait_event(iobuf->dr_wait,
+ cfs_atomic_read(&iobuf->dr_numreqs) == 0);
+ if (!rc)
+ rc = iobuf->dr_error;
- RETURN(rc);
+ RETURN(rc);
}
static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
osd_index_fini(obj);
if (inode != NULL) {
+ struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
+ qid_t uid = inode->i_uid;
+ qid_t gid = inode->i_gid;
+
iput(inode);
obj->oo_inode = NULL;
+
+ if (qsd != NULL) {
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lquota_id_info *qi = &info->oti_qi;
+
+ /* Release granted quota to master if necessary */
+ qi->lqi_id.qid_uid = uid;
+ qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
+
+ qi->lqi_id.qid_uid = gid;
+ qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
+ }
}
}
ksfs = &osd_oti_get(env)->oti_ksfs;
}
- cfs_spin_lock(&osd->od_osfs_lock);
+ spin_lock(&osd->od_osfs_lock);
/* cache 1 second */
if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
result = sb->s_op->statfs(sb->s_root, ksfs);
if (likely(result == 0)) { /* N.B. statfs can't really fail */
osd->od_osfs_age = cfs_time_current_64();
statfs_pack(&osd->od_statfs, ksfs);
+ if (sb->s_flags & MS_RDONLY)
+ sfs->os_state = OS_STATE_READONLY;
}
}
- if (likely(result == 0))
- *sfs = osd->od_statfs;
- cfs_spin_unlock(&osd->od_osfs_lock);
+ if (likely(result == 0))
+ *sfs = osd->od_statfs;
+ spin_unlock(&osd->od_osfs_lock);
if (unlikely(env == NULL))
OBD_FREE_PTR(ksfs);
return result;
}
+/**
+ * Estimate space needed for file creations. We assume the largest filename
+ * which is 2^64 - 1, hence a filename of 20 chars.
+ * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
+ */
+#ifdef __LDISKFS_DIR_REC_LEN
+#define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
+#else
+#define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
+#endif
+
/*
* Concurrency: doesn't access mutable data.
*/
/*
* XXX should be taken from not-yet-existing fs abstraction layer.
*/
- param->ddp_mnt = osd_dt_dev(dev)->od_mnt;
param->ddp_max_name_len = LDISKFS_NAME_LEN;
param->ddp_max_nlink = LDISKFS_LINK_MAX;
param->ddp_block_shift = sb->s_blocksize_bits;
param->ddp_mount_type = LDD_MT_LDISKFS;
+ param->ddp_maxbytes = sb->s_maxbytes;
+ /* Overhead estimate should be fairly accurate, so we really take a tiny
+ * error margin which also avoids fragmenting the filesystem too much */
+ param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
+ /* inode are statically allocated, so per-inode space consumption
+ * is the space consumed by the directory entry */
+ param->ddp_inodespace = PER_OBJ_USAGE;
+ /* per-fragment overhead to be used by the client code */
+ param->ddp_grant_frag = 6 * LDISKFS_BLOCK_SIZE(sb);
param->ddp_mntopts = 0;
if (test_opt(sb, XATTR_USER))
param->ddp_mntopts |= MNTOPT_USERXATTR;
}
-/**
- * Helper function to get and fill the buffer with input values.
- */
-static struct lu_buf *osd_buf_get(const struct lu_env *env, void *area, ssize_t len)
-{
- struct lu_buf *buf;
-
- buf = &osd_oti_get(env)->oti_buf;
- buf->lb_buf = area;
- buf->lb_len = len;
- return buf;
-}
-
/*
* Concurrency: shouldn't matter.
*/
static int osd_ro(const struct lu_env *env, struct dt_device *d)
{
- struct super_block *sb = osd_sb(osd_dt_dev(d));
- int rc;
- ENTRY;
+ struct super_block *sb = osd_sb(osd_dt_dev(d));
+ struct block_device *dev = sb->s_bdev;
+#ifdef HAVE_DEV_SET_RDONLY
+ struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
+ int rc = 0;
+#else
+ int rc = -EOPNOTSUPP;
+#endif
+ ENTRY;
+#ifdef HAVE_DEV_SET_RDONLY
CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
- rc = __lvfs_set_rdonly(sb->s_bdev, LDISKFS_SB(sb)->journal_bdev);
- RETURN(rc);
+ if (jdev && (jdev != dev)) {
+ CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
+ (long)jdev);
+ dev_set_rdonly(jdev);
+ }
+ CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
+ dev_set_rdonly(dev);
+#else
+ CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
+ osd_dt_dev(d)->od_svname, (long)dev, rc);
+#endif
+ RETURN(rc);
}
/*
}
/**
- * Concurrency: serialization provided by callers.
- */
-static void osd_init_quota_ctxt(const struct lu_env *env, struct dt_device *d,
- struct dt_quota_ctxt *ctxt, void *data)
-{
- struct obd_device *obd = (void *)ctxt;
- struct vfsmount *mnt = (struct vfsmount *)data;
- ENTRY;
-
- obd->u.obt.obt_sb = mnt->mnt_root->d_inode->i_sb;
- OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
- obd->obd_lvfs_ctxt.pwdmnt = mnt;
- obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
- obd->obd_lvfs_ctxt.fs = get_ds();
-
- EXIT;
-}
-
-/**
* Note: we do not count into QUOTA here.
* If we mount with --data_journal we may need more.
*/
.dt_ro = osd_ro,
.dt_commit_async = osd_commit_async,
.dt_init_capa_ctxt = osd_init_capa_ctxt,
- .dt_init_quota_ctxt= osd_init_quota_ctxt,
};
static void osd_object_read_lock(const struct lu_env *env,
LINVRNT(osd_invariant(obj));
LASSERT(obj->oo_owner != env);
- cfs_down_read_nested(&obj->oo_sem, role);
+ down_read_nested(&obj->oo_sem, role);
LASSERT(obj->oo_owner == NULL);
oti->oti_r_locks++;
LINVRNT(osd_invariant(obj));
LASSERT(obj->oo_owner != env);
- cfs_down_write_nested(&obj->oo_sem, role);
+ down_write_nested(&obj->oo_sem, role);
LASSERT(obj->oo_owner == NULL);
obj->oo_owner = env;
LASSERT(oti->oti_r_locks > 0);
oti->oti_r_locks--;
- cfs_up_read(&obj->oo_sem);
+ up_read(&obj->oo_sem);
}
static void osd_object_write_unlock(const struct lu_env *env,
LASSERT(oti->oti_w_locks > 0);
oti->oti_w_locks--;
obj->oo_owner = NULL;
- cfs_up_write(&obj->oo_sem);
+ up_write(&obj->oo_sem);
}
static int osd_object_write_locked(const struct lu_env *env,
RETURN(-ESTALE);
}
- cfs_spin_lock(&capa_lock);
- for (i = 0; i < 2; i++) {
- if (keys[i].lk_keyid == capa->lc_keyid) {
- oti->oti_capa_key = keys[i];
- break;
- }
- }
- cfs_spin_unlock(&capa_lock);
+ spin_lock(&capa_lock);
+ for (i = 0; i < 2; i++) {
+ if (keys[i].lk_keyid == capa->lc_keyid) {
+ oti->oti_capa_key = keys[i];
+ break;
+ }
+ }
+ spin_unlock(&capa_lock);
if (i == 2) {
DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
}
int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
- struct lustre_capa *capa, __u64 opc)
+ struct lustre_capa *capa, __u64 opc)
{
- const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
- struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
- struct md_capainfo *ci;
- int rc;
+ const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct lu_capainfo *lci;
+ int rc;
- if (!dev->od_fl_capa)
- return 0;
+ if (!osd->od_fl_capa)
+ return 0;
- if (capa == BYPASS_CAPA)
- return 0;
+ if (capa == BYPASS_CAPA)
+ return 0;
- ci = md_capainfo(env);
- if (unlikely(!ci))
- return 0;
+ lci = lu_capainfo_get(env);
+ if (unlikely(lci == NULL))
+ return 0;
- if (ci->mc_auth == LC_ID_NONE)
- return 0;
+ if (lci->lci_auth == LC_ID_NONE)
+ return 0;
- if (!capa) {
- CERROR("no capability is provided for fid "DFID"\n", PFID(fid));
- return -EACCES;
- }
+ if (capa == NULL) {
+ CERROR("%s: no capability provided for FID "DFID": rc = %d\n",
+ osd_name(osd), PFID(fid), -EACCES);
+ return -EACCES;
+ }
- if (!lu_fid_eq(fid, &capa->lc_fid)) {
- DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
- PFID(fid));
- return -EACCES;
- }
+ if (!lu_fid_eq(fid, &capa->lc_fid)) {
+ DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
+ PFID(fid));
+ return -EACCES;
+ }
- if (!capa_opc_supported(capa, opc)) {
- DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
- return -EACCES;
- }
+ if (!capa_opc_supported(capa, opc)) {
+ DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
+ return -EACCES;
+ }
- if ((rc = capa_is_sane(env, dev, capa, dev->od_capa_keys))) {
- DEBUG_CAPA(D_ERROR, capa, "insane (rc %d)", rc);
- return -EACCES;
- }
+ rc = capa_is_sane(env, osd, capa, osd->od_capa_keys);
+ if (rc != 0) {
+ DEBUG_CAPA(D_ERROR, capa, "insane: rc = %d", rc);
+ return -EACCES;
+ }
- return 0;
+ return 0;
}
static struct timespec *osd_inode_time(const struct lu_env *env,
{
attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
- LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE;
+ LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
+ LA_TYPE;
attr->la_atime = LTIME_S(inode->i_atime);
attr->la_mtime = LTIME_S(inode->i_mtime);
{
struct osd_object *obj = osd_dt_obj(dt);
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LINVRNT(osd_invariant(obj));
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
- cfs_spin_lock(&obj->oo_guard);
- osd_inode_getattr(env, obj->oo_inode, attr);
- cfs_spin_unlock(&obj->oo_guard);
- return 0;
+ spin_lock(&obj->oo_guard);
+ osd_inode_getattr(env, obj->oo_inode, attr);
+ spin_unlock(&obj->oo_guard);
+ return 0;
}
static int osd_declare_attr_set(const struct lu_env *env,
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, attr_set);
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
+ osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
+ osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
if (attr == NULL || obj->oo_inode == NULL)
RETURN(rc);
bits = attr->la_valid;
- LASSERT(!(bits & LA_TYPE)); /* Huh? You want too much. */
-
if (bits & LA_ATIME)
inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
if (bits & LA_CTIME)
if (bits & LA_GID)
inode->i_gid = attr->la_gid;
if (bits & LA_NLINK)
- inode->i_nlink = attr->la_nlink;
+ set_nlink(inode, attr->la_nlink);
if (bits & LA_RDEV)
inode->i_rdev = attr->la_rdev;
int rc;
LASSERT(handle != NULL);
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(osd_invariant(obj));
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
return -EACCES;
- OSD_EXEC_OP(handle, attr_set);
+ osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING)) {
+ struct osd_thread_info *oti = osd_oti_get(env);
+ const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
+ struct lu_fid *fid1 = &oti->oti_fid;
+ struct osd_inode_id *id = &oti->oti_id;
+ struct iam_path_descr *ipd;
+ struct iam_container *bag;
+ struct osd_thandle *oh;
+ int rc;
+
+ fid_cpu_to_be(fid1, fid0);
+ memset(id, 1, sizeof(*id));
+ bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
+ fid0)->oi_dir.od_container;
+ ipd = osd_idx_ipd_get(env, bag);
+ if (unlikely(ipd == NULL))
+ RETURN(-ENOMEM);
+
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)fid1,
+ (const struct iam_rec *)id, ipd);
+ osd_ipd_put(env, bag, ipd);
+ return(rc > 0 ? 0 : rc);
+ }
inode = obj->oo_inode;
- if (!osd_dt_dev(handle->th_dev)->od_is_md) {
- /* OFD support */
- rc = osd_quota_transfer(inode, attr);
- if (rc)
- return rc;
- } else {
-#ifdef HAVE_QUOTA_SUPPORT
- if ((attr->la_valid & LA_UID && attr->la_uid != inode->i_uid) ||
- (attr->la_valid & LA_GID && attr->la_gid != inode->i_gid)) {
- struct osd_ctxt *save = &osd_oti_get(env)->oti_ctxt;
- struct iattr iattr;
- int rc;
-
- iattr.ia_valid = 0;
- if (attr->la_valid & LA_UID)
- iattr.ia_valid |= ATTR_UID;
- if (attr->la_valid & LA_GID)
- iattr.ia_valid |= ATTR_GID;
- iattr.ia_uid = attr->la_uid;
- iattr.ia_gid = attr->la_gid;
- osd_push_ctxt(env, save, 1);
- rc = ll_vfs_dq_transfer(inode, &iattr) ? -EDQUOT : 0;
- osd_pop_ctxt(save, 1);
- if (rc != 0)
- return rc;
- }
-#endif
- }
- cfs_spin_lock(&obj->oo_guard);
- rc = osd_inode_setattr(env, inode, attr);
- cfs_spin_unlock(&obj->oo_guard);
+ ll_vfs_dq_init(inode);
+
+ rc = osd_quota_transfer(inode, attr);
+ if (rc)
+ return rc;
+
+ spin_lock(&obj->oo_guard);
+ rc = osd_inode_setattr(env, inode, attr);
+ spin_unlock(&obj->oo_guard);
if (!rc)
- inode->i_sb->s_op->dirty_inode(inode);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
return rc;
}
}
static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
- cfs_umode_t mode,
- struct dt_allocation_hint *hint,
- struct thandle *th)
+ umode_t mode, struct dt_allocation_hint *hint,
+ struct thandle *th)
{
int result;
struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oth;
struct dt_object *parent = NULL;
struct inode *inode;
-#ifdef HAVE_QUOTA_SUPPORT
- struct osd_ctxt *save = &info->oti_ctxt;
-#endif
LINVRNT(osd_invariant(obj));
LASSERT(obj->oo_inode == NULL);
if (hint && hint->dah_parent)
parent = hint->dah_parent;
-#ifdef HAVE_QUOTA_SUPPORT
- osd_push_ctxt(info->oti_env, save, osd_dt_dev(th->th_dev)->od_is_md);
-#endif
inode = ldiskfs_create_inode(oth->ot_handle,
parent ? osd_dt_obj(parent)->oo_inode :
osd_sb(osd)->s_root->d_inode,
mode);
-#ifdef HAVE_QUOTA_SUPPORT
- osd_pop_ctxt(save, osd_dt_dev(th->th_dev)->od_is_md);
-#endif
if (!IS_ERR(inode)) {
/* Do not update file c/mtime in ldiskfs.
* NB: don't need any lock because no contention at this
* early stage */
inode->i_flags |= S_NOCMTIME;
- inode->i_state |= I_LUSTRE_NOSCRUB;
+
+ /* For new created object, it must be consistent,
+ * and it is unnecessary to scrub against it. */
+ ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
obj->oo_inode = inode;
result = 0;
} else {
{
int result;
struct osd_thandle *oth;
- struct osd_device *osd = osd_obj2dev(obj);
__u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
LASSERT(S_ISDIR(attr->la_mode));
oth = container_of(th, struct osd_thandle, ot_super);
LASSERT(oth->ot_handle->h_transaction != NULL);
result = osd_mkfile(info, obj, mode, hint, th);
- if (result == 0 && osd->od_iop_mode == 0) {
- LASSERT(obj->oo_inode != NULL);
- /*
- * XXX uh-oh... call low-level iam function directly.
- */
- result = iam_lvar_create(obj->oo_inode, OSD_NAME_LEN, 4,
- sizeof (struct osd_fid_pack),
- oth->ot_handle);
- }
return result;
}
}
static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *th)
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *th)
{
- cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
- int result;
+ umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
+ int result;
- LINVRNT(osd_invariant(obj));
- LASSERT(obj->oo_inode == NULL);
+ LINVRNT(osd_invariant(obj));
+ LASSERT(obj->oo_inode == NULL);
LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
S_ISFIFO(mode) || S_ISSOCK(mode));
* This inode should be marked dirty for i_rdev. Currently
* that is done in the osd_attr_init().
*/
- init_special_inode(obj->oo_inode, mode, attr->la_rdev);
+ init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
+ attr->la_rdev);
}
LINVRNT(osd_invariant(obj));
return result;
static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
struct dt_object *parent, struct dt_object *child,
- cfs_umode_t child_mode)
+ umode_t child_mode)
{
LASSERT(ah);
if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
attr->la_valid &= ~LA_MTIME;
- if (!osd_obj2dev(obj)->od_is_md) {
- /* OFD support */
- result = osd_quota_transfer(inode, attr);
- if (result)
- return;
- } else {
-#ifdef HAVE_QUOTA_SUPPORT
- attr->la_valid &= ~(LA_UID | LA_GID);
-#endif
- }
+ result = osd_quota_transfer(inode, attr);
+ if (result)
+ return;
if (attr->la_valid != 0) {
result = osd_inode_setattr(info->oti_env, inode, attr);
* enabled on ldiskfs (lquota takes care of it).
*/
LASSERTF(result == 0, "%d", result);
- inode->i_sb->s_op->dirty_inode(inode);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
}
attr->la_valid = valid;
/* restore previous umask value */
current->fs->umask = umask;
- return result;
+ return result;
}
/**
* \retval 0, on success
*/
static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
- const struct lu_fid *fid, struct thandle *th)
+ const struct lu_fid *fid, struct thandle *th)
{
- struct osd_thread_info *info = osd_oti_get(env);
- struct osd_inode_id *id = &info->oti_id;
- struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct osd_inode_id *id = &info->oti_id;
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_thandle *oh;
- LASSERT(obj->oo_inode != NULL);
+ LASSERT(obj->oo_inode != NULL);
- if (osd->od_is_md) {
- struct md_ucred *uc = md_ucred(env);
- LASSERT(uc != NULL);
- }
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle);
osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
- return osd_oi_insert(info, osd, fid, id, th);
+ return osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
}
-static int osd_declare_object_create(const struct lu_env *env,
- struct dt_object *dt,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *handle)
+int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
+ obd_seq seq, struct lu_seq_range *range)
{
- struct osd_thandle *oh;
- int rc;
- ENTRY;
+ struct seq_server_site *ss = osd_seq_site(osd);
- LASSERT(handle != NULL);
+ if (fid_seq_is_idif(seq)) {
+ fld_range_set_ost(range);
+ range->lsr_index = idif_ost_idx(seq);
+ return 0;
+ }
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ if (!fid_seq_in_fldb(seq)) {
+ fld_range_set_mdt(range);
+ if (ss != NULL)
+ /* FIXME: If ss is NULL, it suppose not get lsr_index
+ * at all */
+ range->lsr_index = ss->ss_node_id;
+ return 0;
+ }
- OSD_DECLARE_OP(oh, create);
- oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
- /* XXX: So far, only normal fid needs be inserted into the oi,
- * things could be changed later. Revise following code then. */
- if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
- OSD_DECLARE_OP(oh, insert);
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
- /* Reuse idle OI block may cause additional one OI block
+ LASSERT(ss != NULL);
+ fld_range_set_any(range);
+ /* OSD will only do local fld lookup */
+ return fld_local_lookup(env, ss->ss_server_fld, seq, range);
+}
+
+/*
+ * Concurrency: no external locking is necessary.
+ */
+static int osd_declare_object_create(const struct lu_env *env,
+ struct dt_object *dt,
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *handle)
+{
+ struct osd_thandle *oh;
+ int rc;
+ ENTRY;
+
+ LASSERT(handle != NULL);
+
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
+
+ osd_trans_declare_op(env, oh, OSD_OT_CREATE,
+ osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
+ if (!fid_is_on_ost(osd_oti_get(env), osd_dt_dev(handle->th_dev),
+ lu_object_fid(&dt->do_lu), OI_CHECK_FLD))
+ /* Reuse idle OI block may cause additional one OI block
* to be changed. */
- oh->ot_credits += 1;
- }
- /* If this is directory, then we expect . and .. to be inserted as
- * well. The one directory block always needs to be created for the
- * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
- * block), there is no danger of needing a tree for the first block.
- */
- if (attr && S_ISDIR(attr->la_mode)) {
- OSD_DECLARE_OP(oh, insert);
- OSD_DECLARE_OP(oh, insert);
- oh->ot_credits += osd_dto_credits_noquota[DTO_WRITE_BASE];
- }
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
+
+ /* If this is directory, then we expect . and .. to be inserted as
+ * well. The one directory block always needs to be created for the
+ * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
+ * block), there is no danger of needing a tree for the first block.
+ */
+ if (attr && S_ISDIR(attr->la_mode)) {
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_WRITE_BASE]);
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT, 0);
+ }
if (!attr)
RETURN(0);
rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
false, false, NULL, false);
+ if (rc != 0)
+ RETURN(rc);
+
RETURN(rc);
}
ENTRY;
LINVRNT(osd_invariant(obj));
- LASSERT(!dt_object_exists(dt));
+ LASSERT(!dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
* 'tune2fs -O quota' will take care of creating them */
RETURN(-EPERM);
- OSD_EXEC_OP(th, create);
+ osd_trans_exec_op(env, th, OSD_OT_CREATE);
+ osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
result = __osd_object_create(info, obj, attr, hint, dof, th);
if (result == 0)
result = __osd_oi_insert(env, obj, fid, th);
- LASSERT(ergo(result == 0, dt_object_exists(dt)));
+ LASSERT(ergo(result == 0,
+ dt_object_exists(dt) && !dt_object_remote(dt)));
+
LASSERT(osd_invariant(obj));
RETURN(result);
}
* Concurrency: must be locked
*/
static int osd_declare_object_destroy(const struct lu_env *env,
- struct dt_object *dt,
- struct thandle *th)
+ struct dt_object *dt,
+ struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_thandle *oh;
- int rc;
- ENTRY;
-
- oh = container_of0(th, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
- LASSERT(inode);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thandle *oh;
+ int rc;
+ ENTRY;
- OSD_DECLARE_OP(oh, destroy);
- OSD_DECLARE_OP(oh, delete);
- oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_DELETE];
- /* XXX: So far, only normal fid needs to be inserted into the OI,
- * so only normal fid needs to be removed from the OI also. */
- if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
- /* Recycle idle OI leaf may cause additional three OI blocks
- * to be changed. */
- oh->ot_credits += 3;
- }
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
+ LASSERT(inode);
+ osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
+ osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
+ /* Recycle idle OI leaf may cause additional three OI blocks
+ * to be changed. */
+ osd_trans_declare_op(env, oh, OSD_OT_DELETE,
+ osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
/* one less inode */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
false, true, NULL, false);
if (rc)
RETURN(rc);
/* data to be truncated */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh, true,
- true, NULL, false);
- RETURN(rc);
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
+ true, true, NULL, false);
+ RETURN(rc);
}
static int osd_object_destroy(const struct lu_env *env,
if (unlikely(fid_is_acct(fid)))
RETURN(-EPERM);
- /* Parallel control for OI scrub. For most of cases, there is no
- * lock contention. So it will not affect unlink performance. */
- cfs_mutex_lock(&inode->i_mutex);
- if (S_ISDIR(inode->i_mode)) {
- LASSERT(osd_inode_unlinked(inode) ||
- inode->i_nlink == 1);
- cfs_spin_lock(&obj->oo_guard);
- inode->i_nlink = 0;
- cfs_spin_unlock(&obj->oo_guard);
- inode->i_sb->s_op->dirty_inode(inode);
- } else {
- LASSERT(osd_inode_unlinked(inode));
- }
+ if (S_ISDIR(inode->i_mode)) {
+ LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1 ||
+ inode->i_nlink == 2);
+ /* it will check/delete the inode from remote parent,
+ * how to optimize it? unlink performance impaction XXX */
+ result = osd_delete_from_remote_parent(env, osd, obj, oh);
+ if (result != 0 && result != -ENOENT) {
+ CERROR("%s: delete inode "DFID": rc = %d\n",
+ osd_name(osd), PFID(fid), result);
+ }
+ spin_lock(&obj->oo_guard);
+ clear_nlink(inode);
+ spin_unlock(&obj->oo_guard);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ }
- OSD_EXEC_OP(th, destroy);
+ osd_trans_exec_op(env, th, OSD_OT_DESTROY);
- result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
- cfs_mutex_unlock(&inode->i_mutex);
+ result = osd_oi_delete(osd_oti_get(env), osd, fid, oh->ot_handle,
+ OI_CHECK_FLD);
/* XXX: add to ext3 orphan list */
/* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
}
/**
- * Helper function for osd_xattr_set()
- */
-static int __osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
- const struct lu_buf *buf, const char *name, int fl)
-{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = osd_oti_get(env);
- struct dentry *dentry = &info->oti_child_dentry;
- int fs_flags = 0;
- int rc;
-
- LASSERT(dt_object_exists(dt));
- LASSERT(inode->i_op != NULL && inode->i_op->setxattr != NULL);
-
- if (fl & LU_XATTR_REPLACE)
- fs_flags |= XATTR_REPLACE;
-
- if (fl & LU_XATTR_CREATE)
- fs_flags |= XATTR_CREATE;
-
- dentry->d_inode = inode;
- rc = inode->i_op->setxattr(dentry, name, buf->lb_buf,
- buf->lb_len, fs_flags);
- return rc;
-}
-
-/**
* Put the fid into lustre_mdt_attrs, and then place the structure
* inode's ea. This fid should not be altered during the life time
* of the inode.
*
* FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
*/
-static int osd_ea_fid_set(const struct lu_env *env, struct dt_object *dt,
- const struct lu_fid *fid)
+int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
+ const struct lu_fid *fid, __u32 compat, __u32 incompat)
{
- struct osd_thread_info *info = osd_oti_get(env);
- struct lustre_mdt_attrs *mdt_attrs = &info->oti_mdt_attrs;
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ int rc;
+ ENTRY;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
+ RETURN(0);
- lustre_lma_init(mdt_attrs, fid);
- lustre_lma_swab(mdt_attrs);
- return __osd_xattr_set(env, dt,
- osd_buf_get(env, mdt_attrs, sizeof *mdt_attrs),
- XATTR_NAME_LMA, LU_XATTR_CREATE);
+ lustre_lma_init(lma, fid, compat, incompat);
+ lustre_lma_swab(lma);
+
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
+ XATTR_CREATE);
+ /* LMA may already exist, but we need to check that all the
+ * desired compat/incompat flags have been added. */
+ if (unlikely(rc == -EEXIST)) {
+ if (compat == 0 && incompat == 0)
+ RETURN(0);
+
+ rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
+ XATTR_NAME_LMA, info->oti_mdt_attrs_old,
+ LMA_OLD_SIZE);
+ if (rc <= 0)
+ RETURN(-EINVAL);
+
+ lustre_lma_swab(lma);
+ if (!(~lma->lma_compat & compat) &&
+ !(~lma->lma_incompat & incompat))
+ RETURN(0);
+
+ lma->lma_compat |= compat;
+ lma->lma_incompat |= incompat;
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
+ sizeof(*lma), XATTR_REPLACE);
+ }
+ RETURN(rc);
}
/**
* its inmemory API.
*/
void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
- const struct dt_rec *fid)
+ const struct dt_rec *fid)
{
- param->edp_magic = LDISKFS_LUFID_MAGIC;
- param->edp_len = sizeof(struct lu_fid) + 1;
+ if (!fid_is_namespace_visible((const struct lu_fid *)fid) ||
+ OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
+ param->edp_magic = 0;
+ return;
+ }
- fid_cpu_to_be((struct lu_fid *)param->edp_data,
- (struct lu_fid *)fid);
+ param->edp_magic = LDISKFS_LUFID_MAGIC;
+ param->edp_len = sizeof(struct lu_fid) + 1;
+ fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
}
/**
- * Try to read the fid from inode ea into dt_rec, if return value
- * i.e. rc is +ve, then we got fid, otherwise we will have to form igif
+ * Try to read the fid from inode ea into dt_rec.
*
* \param fid object fid.
*
RETURN(0);
}
+static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
+ struct inode *dir,
+ struct inode *parent_dir,
+ const struct dt_rec *dot_fid,
+ const struct dt_rec *dot_dot_fid,
+ struct osd_thandle *oth)
+{
+ struct ldiskfs_dentry_param *dot_ldp;
+ struct ldiskfs_dentry_param *dot_dot_ldp;
+
+ dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
+ osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
+
+ dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
+ dot_ldp->edp_magic = 0;
+ return ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
+ dir, dot_ldp, dot_dot_ldp);
+}
+
+/**
+ * Create an local agent inode for remote entry
+ */
+static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
+ struct osd_device *osd,
+ struct osd_object *pobj,
+ const struct lu_fid *fid,
+ struct thandle *th)
+{
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct inode *local;
+ struct osd_thandle *oh;
+ int rc;
+ ENTRY;
+
+ LASSERT(th);
+ oh = container_of(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle->h_transaction != NULL);
+
+ /* FIXME: Insert index api needs to know the mode of
+ * the remote object. Just use S_IFDIR for now */
+ local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, S_IFDIR);
+ if (IS_ERR(local)) {
+ CERROR("%s: create local error %d\n", osd_name(osd),
+ (int)PTR_ERR(local));
+ RETURN(local);
+ }
+
+ /* Set special LMA flag for local agent inode */
+ rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
+ if (rc != 0) {
+ CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
+ osd_name(osd), PFID(fid), rc);
+ RETURN(ERR_PTR(rc));
+ }
+
+ rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
+ (const struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
+ (const struct dt_rec *)fid, oh);
+ if (rc != 0) {
+ CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
+ osd_name(osd), PFID(fid), rc);
+ RETURN(ERR_PTR(rc));
+ }
+
+ RETURN(local);
+}
+
+/**
+ * Delete local agent inode for remote entry
+ */
+static int osd_delete_local_agent_inode(const struct lu_env *env,
+ struct osd_device *osd,
+ const struct lu_fid *fid,
+ __u32 ino, struct osd_thandle *oh)
+{
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_inode_id *id = &oti->oti_id;
+ struct inode *inode;
+ ENTRY;
+
+ id->oii_ino = le32_to_cpu(ino);
+ id->oii_gen = OSD_OII_NOGEN;
+ inode = osd_iget(oti, osd, id);
+ if (IS_ERR(inode)) {
+ CERROR("%s: iget error "DFID" id %u:%u\n", osd_name(osd),
+ PFID(fid), id->oii_ino, id->oii_gen);
+ RETURN(PTR_ERR(inode));
+ }
+
+ clear_nlink(inode);
+ mark_inode_dirty(inode);
+ CDEBUG(D_INODE, "%s: delete remote inode "DFID" %lu\n",
+ osd_name(osd), PFID(fid), inode->i_ino);
+ iput(inode);
+ RETURN(0);
+}
+
/**
* OSD layer object create function for interoperability mode (b11826).
* This is mostly similar to osd_object_create(). Only difference being, fid is
ENTRY;
LASSERT(osd_invariant(obj));
- LASSERT(!dt_object_exists(dt));
+ LASSERT(!dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
* 'tune2fs -O quota' will take care of creating them */
RETURN(-EPERM);
- OSD_EXEC_OP(th, create);
+ osd_trans_exec_op(env, th, OSD_OT_CREATE);
+ osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
result = __osd_object_create(info, obj, attr, hint, dof, th);
- /* objects under osd root shld have igif fid, so dont add fid EA */
- if (result == 0 && fid_seq(fid) >= FID_SEQ_NORMAL)
- result = osd_ea_fid_set(env, dt, fid);
+ if (result == 0)
+ result = osd_ea_fid_set(info, obj->oo_inode, fid,
+ fid_is_on_ost(info, osd_obj2dev(obj),
+ fid, OI_CHECK_FLD) ?
+ LMAC_FID_ON_OST : 0, 0);
- if (result == 0)
- result = __osd_oi_insert(env, obj, fid, th);
+ if (result == 0)
+ result = __osd_oi_insert(env, obj, fid, th);
- LASSERT(ergo(result == 0, dt_object_exists(dt)));
+ LASSERT(ergo(result == 0,
+ dt_object_exists(dt) && !dt_object_remote(dt)));
LINVRNT(osd_invariant(obj));
RETURN(result);
}
struct dt_object *dt,
struct thandle *handle)
{
- struct osd_thandle *oh;
+ struct osd_thandle *oh;
/* it's possible that object doesn't exist yet */
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, ref_add);
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
+ osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
+ osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
- return 0;
+ return 0;
}
/*
static int osd_object_ref_add(const struct lu_env *env,
struct dt_object *dt, struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thandle *oh;
+ int rc = 0;
LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
- OSD_EXEC_OP(th, ref_add);
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle != NULL);
- /*
- * DIR_NLINK feature is set for compatibility reasons if:
- * 1) nlinks > LDISKFS_LINK_MAX, or
- * 2) nlinks == 2, since this indicates i_nlink was previously 1.
- *
- * It is easier to always set this flag (rather than check and set),
- * since it has less overhead, and the superblock will be dirtied
- * at some point. Both e2fsprogs and any Lustre-supported ldiskfs
- * do not actually care whether this flag is set or not.
- */
- cfs_spin_lock(&obj->oo_guard);
- inode->i_nlink++;
- if (S_ISDIR(inode->i_mode) && inode->i_nlink > 1) {
- if (inode->i_nlink >= LDISKFS_LINK_MAX ||
- inode->i_nlink == 2)
- inode->i_nlink = 1;
- }
- LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
- cfs_spin_unlock(&obj->oo_guard);
- inode->i_sb->s_op->dirty_inode(inode);
- LINVRNT(osd_invariant(obj));
+ osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
- return 0;
+ /*
+ * The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
+ * (65000) subdirectories by storing "1" in i_nlink if the link count
+ * would otherwise overflow. Directory tranversal tools understand
+ * that (st_nlink == 1) indicates that the filesystem dose not track
+ * hard links count on the directory, and will not abort subdirectory
+ * scanning early once (st_nlink - 2) subdirs have been found.
+ *
+ * This also has to properly handle the case of inodes with nlink == 0
+ * in case they are being linked into the PENDING directory
+ */
+ spin_lock(&obj->oo_guard);
+ ldiskfs_inc_count(oh->ot_handle, inode);
+ LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
+ spin_unlock(&obj->oo_guard);
+
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ LINVRNT(osd_invariant(obj));
+
+ return rc;
}
static int osd_declare_object_ref_del(const struct lu_env *env,
{
struct osd_thandle *oh;
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, ref_del);
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
+ osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
+ osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
- return 0;
+ return 0;
}
/*
static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct osd_thandle *oh;
- LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
- LASSERT(osd_write_locked(env, obj));
- LASSERT(th != NULL);
+ LINVRNT(osd_invariant(obj));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
+ LASSERT(osd_write_locked(env, obj));
+ LASSERT(th != NULL);
- OSD_EXEC_OP(th, ref_del);
-
- cfs_spin_lock(&obj->oo_guard);
- LASSERT(inode->i_nlink > 0);
- inode->i_nlink--;
- /* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
- * then the nlink count is 1. Don't let it be set to 0 or the directory
- * inode will be deleted incorrectly. */
- if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
- inode->i_nlink++;
- cfs_spin_unlock(&obj->oo_guard);
- inode->i_sb->s_op->dirty_inode(inode);
- LINVRNT(osd_invariant(obj));
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle != NULL);
- return 0;
+ osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
+
+ spin_lock(&obj->oo_guard);
+ /* That can be result of upgrade from old Lustre version and
+ * applied only to local files. Just skip this ref_del call.
+ * ext4_unlink() only treats this as a warning, don't LASSERT here.*/
+ if (inode->i_nlink == 0) {
+ CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
+ D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
+ ", maybe an upgraded file? (LU-3915)\n",
+ osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
+ spin_unlock(&obj->oo_guard);
+ return 0;
+ }
+
+ ldiskfs_dec_count(oh->ot_handle, inode);
+ spin_unlock(&obj->oo_guard);
+
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ LINVRNT(osd_invariant(obj));
+
+ return 0;
}
/*
return sizeof(dt_obj_version_t);
}
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
- dentry->d_inode = inode;
- return inode->i_op->getxattr(dentry, name, buf->lb_buf, buf->lb_len);
+ return __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
}
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, xattr_set);
- if (strcmp(name, XATTR_NAME_VERSION) == 0)
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
- else
- oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
+ osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
+ strcmp(name, XATTR_NAME_VERSION) == 0 ?
+ osd_dto_credits_noquota[DTO_ATTR_SET_BASE] :
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
return 0;
}
LDISKFS_I(inode)->i_fs_version = *new_version;
/** Version is set after all inode operations are finished,
* so we should mark it dirty here */
- inode->i_sb->s_op->dirty_inode(inode);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
}
/*
const struct lu_buf *buf, const char *name, int fl,
struct thandle *handle, struct lustre_capa *capa)
{
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thread_info *info = osd_oti_get(env);
+ int fs_flags = 0;
+ ENTRY;
+
LASSERT(handle != NULL);
/* version set is not real XATTR */
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
return -EACCES;
- OSD_EXEC_OP(handle, xattr_set);
- return __osd_xattr_set(env, dt, buf, name, fl);
+ osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
+ if (fl & LU_XATTR_REPLACE)
+ fs_flags |= XATTR_REPLACE;
+
+ if (fl & LU_XATTR_CREATE)
+ fs_flags |= XATTR_CREATE;
+
+ return __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
+ fs_flags);
}
/*
struct osd_thread_info *info = osd_oti_get(env);
struct dentry *dentry = &info->oti_obj_dentry;
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
return -EACCES;
dentry->d_inode = inode;
+ dentry->d_sb = inode->i_sb;
return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
}
{
struct osd_thandle *oh;
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, xattr_set);
- oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
+ osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
- return 0;
+ return 0;
}
/*
struct dentry *dentry = &info->oti_obj_dentry;
int rc;
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
- LASSERT(osd_write_locked(env, obj));
LASSERT(handle != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
return -EACCES;
- OSD_EXEC_OP(handle, xattr_set);
+ osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
+ ll_vfs_dq_init(inode);
dentry->d_inode = inode;
+ dentry->d_sb = inode->i_sb;
rc = inode->i_op->removexattr(dentry, name);
return rc;
}
static struct obd_capa *osd_capa_get(const struct lu_env *env,
- struct dt_object *dt,
- struct lustre_capa *old,
- __u64 opc)
+ struct dt_object *dt,
+ struct lustre_capa *old, __u64 opc)
{
- struct osd_thread_info *info = osd_oti_get(env);
- const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *dev = osd_obj2dev(obj);
- struct lustre_capa_key *key = &info->oti_capa_key;
- struct lustre_capa *capa = &info->oti_capa;
- struct obd_capa *oc;
- struct md_capainfo *ci;
- int rc;
- ENTRY;
+ struct osd_thread_info *info = osd_oti_get(env);
+ const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct lustre_capa_key *key = &info->oti_capa_key;
+ struct lustre_capa *capa = &info->oti_capa;
+ struct obd_capa *oc;
+ struct lu_capainfo *lci;
+ int rc;
+ ENTRY;
- if (!dev->od_fl_capa)
- RETURN(ERR_PTR(-ENOENT));
+ if (!osd->od_fl_capa)
+ RETURN(ERR_PTR(-ENOENT));
- LASSERT(dt_object_exists(dt));
- LINVRNT(osd_invariant(obj));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
+ LINVRNT(osd_invariant(obj));
- /* renewal sanity check */
- if (old && osd_object_auth(env, dt, old, opc))
- RETURN(ERR_PTR(-EACCES));
-
- ci = md_capainfo(env);
- if (unlikely(!ci))
- RETURN(ERR_PTR(-ENOENT));
-
- switch (ci->mc_auth) {
- case LC_ID_NONE:
- RETURN(NULL);
- case LC_ID_PLAIN:
- capa->lc_uid = obj->oo_inode->i_uid;
- capa->lc_gid = obj->oo_inode->i_gid;
- capa->lc_flags = LC_ID_PLAIN;
- break;
- case LC_ID_CONVERT: {
- __u32 d[4], s[4];
-
- s[0] = obj->oo_inode->i_uid;
- cfs_get_random_bytes(&(s[1]), sizeof(__u32));
- s[2] = obj->oo_inode->i_gid;
- cfs_get_random_bytes(&(s[3]), sizeof(__u32));
- rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
- if (unlikely(rc))
- RETURN(ERR_PTR(rc));
-
- capa->lc_uid = ((__u64)d[1] << 32) | d[0];
- capa->lc_gid = ((__u64)d[3] << 32) | d[2];
- capa->lc_flags = LC_ID_CONVERT;
- break;
- }
- default:
- RETURN(ERR_PTR(-EINVAL));
+ /* renewal sanity check */
+ if (old && osd_object_auth(env, dt, old, opc))
+ RETURN(ERR_PTR(-EACCES));
+
+ lci = lu_capainfo_get(env);
+ if (unlikely(lci == NULL))
+ RETURN(ERR_PTR(-ENOENT));
+
+ switch (lci->lci_auth) {
+ case LC_ID_NONE:
+ RETURN(NULL);
+ case LC_ID_PLAIN:
+ capa->lc_uid = obj->oo_inode->i_uid;
+ capa->lc_gid = obj->oo_inode->i_gid;
+ capa->lc_flags = LC_ID_PLAIN;
+ break;
+ case LC_ID_CONVERT: {
+ __u32 d[4], s[4];
+
+ s[0] = obj->oo_inode->i_uid;
+ cfs_get_random_bytes(&(s[1]), sizeof(__u32));
+ s[2] = obj->oo_inode->i_gid;
+ cfs_get_random_bytes(&(s[3]), sizeof(__u32));
+ rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
+ if (unlikely(rc))
+ RETURN(ERR_PTR(rc));
+
+ capa->lc_uid = ((__u64)d[1] << 32) | d[0];
+ capa->lc_gid = ((__u64)d[3] << 32) | d[2];
+ capa->lc_flags = LC_ID_CONVERT;
+ break;
}
+ default:
+ RETURN(ERR_PTR(-EINVAL));
+ }
- capa->lc_fid = *fid;
- capa->lc_opc = opc;
- capa->lc_flags |= dev->od_capa_alg << 24;
- capa->lc_timeout = dev->od_capa_timeout;
- capa->lc_expiry = 0;
+ capa->lc_fid = *fid;
+ capa->lc_opc = opc;
+ capa->lc_flags |= osd->od_capa_alg << 24;
+ capa->lc_timeout = osd->od_capa_timeout;
+ capa->lc_expiry = 0;
- oc = capa_lookup(dev->od_capa_hash, capa, 1);
- if (oc) {
- LASSERT(!capa_is_expired(oc));
- RETURN(oc);
- }
+ oc = capa_lookup(osd->od_capa_hash, capa, 1);
+ if (oc) {
+ LASSERT(!capa_is_expired(oc));
+ RETURN(oc);
+ }
- cfs_spin_lock(&capa_lock);
- *key = dev->od_capa_keys[1];
- cfs_spin_unlock(&capa_lock);
+ spin_lock(&capa_lock);
+ *key = osd->od_capa_keys[1];
+ spin_unlock(&capa_lock);
- capa->lc_keyid = key->lk_keyid;
- capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
+ capa->lc_keyid = key->lk_keyid;
+ capa->lc_expiry = cfs_time_current_sec() + osd->od_capa_timeout;
- rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
- if (rc) {
- DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
- RETURN(ERR_PTR(rc));
- }
+ rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
+ if (rc) {
+ DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
+ RETURN(ERR_PTR(rc));
+ }
- oc = capa_add(dev->od_capa_hash, capa);
- RETURN(oc);
+ oc = capa_add(osd->od_capa_hash, capa);
+ RETURN(oc);
}
static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
ENTRY;
dentry->d_inode = inode;
+ dentry->d_sb = inode->i_sb;
file->f_dentry = dentry;
file->f_mapping = inode->i_mapping;
file->f_op = inode->i_fop;
+ set_file_inode(file, inode);
+#ifndef HAVE_FILE_FSYNC_4ARGS
mutex_lock(&inode->i_mutex);
- rc = file->f_op->fsync(file, dentry, 0);
+#endif
+ rc = do_fsync(file, 0);
+#ifndef HAVE_FILE_FSYNC_4ARGS
mutex_unlock(&inode->i_mutex);
+#endif
RETURN(rc);
}
int result;
int skip_iam = 0;
struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *osd = osd_obj2dev(obj);
LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
if (osd_object_is_root(obj)) {
dt->do_index_ops = &osd_index_ea_ops;
result = 0;
- } else if (feat == &dt_directory_features && osd->od_iop_mode) {
+ } else if (feat == &dt_directory_features) {
dt->do_index_ops = &osd_index_ea_ops;
- if (S_ISDIR(obj->oo_inode->i_mode))
+ if (obj->oo_inode != NULL && S_ISDIR(obj->oo_inode->i_mode))
result = 0;
else
result = -ENOTDIR;
} else if (unlikely(feat == &dt_otable_features)) {
dt->do_index_ops = &osd_otable_ops;
return 0;
- } else if (feat == &dt_acct_features) {
+ } else if (unlikely(feat == &dt_acct_features)) {
dt->do_index_ops = &osd_acct_index_ops;
result = 0;
skip_iam = 1;
OBD_ALLOC_PTR(dir);
if (dir != NULL) {
- cfs_spin_lock(&obj->oo_guard);
- if (obj->oo_dir == NULL)
- obj->oo_dir = dir;
- else
- /*
- * Concurrent thread allocated container data.
- */
- OBD_FREE_PTR(dir);
- cfs_spin_unlock(&obj->oo_guard);
- /*
- * Now, that we have container data, serialize its
- * initialization.
- */
- cfs_down_write(&obj->oo_ext_idx_sem);
- /*
- * recheck under lock.
- */
- if (!osd_has_index(obj))
- result = osd_iam_container_init(env, obj, dir);
- else
- result = 0;
- cfs_up_write(&obj->oo_ext_idx_sem);
+ spin_lock(&obj->oo_guard);
+ if (obj->oo_dir == NULL)
+ obj->oo_dir = dir;
+ else
+ /*
+ * Concurrent thread allocated container data.
+ */
+ OBD_FREE_PTR(dir);
+ spin_unlock(&obj->oo_guard);
+ /*
+ * Now, that we have container data, serialize its
+ * initialization.
+ */
+ down_write(&obj->oo_ext_idx_sem);
+ /*
+ * recheck under lock.
+ */
+ if (!osd_has_index(obj))
+ result = osd_iam_container_init(env, obj, dir);
+ else
+ result = 0;
+ up_write(&obj->oo_ext_idx_sem);
} else {
result = -ENOMEM;
}
}
LINVRNT(osd_invariant(obj));
+ if (result == 0 && is_quota_glb_feat(feat) &&
+ fid_seq(lu_object_fid(&dt->do_lu)) == FID_SEQ_QUOTA_GLB)
+ result = osd_quota_migration(env, dt, feat);
+
return result;
}
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, delete);
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
+ osd_trans_declare_op(env, oh, OSD_OT_DELETE,
+ osd_dto_credits_noquota[DTO_INDEX_DELETE]);
- return 0;
+ return 0;
}
/**
ENTRY;
LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(bag->ic_object == obj->oo_inode);
LASSERT(handle != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
RETURN(-EACCES);
- OSD_EXEC_OP(handle, delete);
+ osd_trans_exec_op(env, handle, OSD_OT_DELETE);
ipd = osd_idx_ipd_get(env, bag);
if (unlikely(ipd == NULL))
}
static int osd_index_declare_ea_delete(const struct lu_env *env,
- struct dt_object *dt,
- const struct dt_key *key,
- struct thandle *handle)
+ struct dt_object *dt,
+ const struct dt_key *key,
+ struct thandle *handle)
{
- struct osd_thandle *oh;
+ struct osd_thandle *oh;
struct inode *inode;
int rc;
ENTRY;
- LASSERT(dt_object_exists(dt));
- LASSERT(handle != NULL);
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
+ LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, delete);
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
+ osd_trans_declare_op(env, oh, OSD_OT_DELETE,
+ osd_dto_credits_noquota[DTO_INDEX_DELETE]);
inode = osd_dt_obj(dt)->oo_inode;
LASSERT(inode);
rec = (struct osd_fid_pack *) (de->name + de->name_len + 1);
rc = osd_fid_unpack((struct lu_fid *)fid, rec);
}
- RETURN(rc);
+ return rc;
+}
+
+static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
+ struct lu_fid *fid)
+{
+ ENTRY;
+
+ /* FID seqs not in FLDB, must be local seq */
+ if (unlikely(!fid_seq_in_fldb(fid_seq(fid))))
+ RETURN(0);
+
+ if (osd_seq_exists(env, osd, fid_seq(fid)))
+ RETURN(0);
+
+ RETURN(1);
}
/**
struct inode *dir = obj->oo_inode;
struct dentry *dentry;
struct osd_thandle *oh;
- struct ldiskfs_dir_entry_2 *de;
+ struct ldiskfs_dir_entry_2 *de = NULL;
struct buffer_head *bh;
struct htree_lock *hlock = NULL;
- int rc;
-
+ struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ int rc;
ENTRY;
LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(handle != NULL);
- OSD_EXEC_OP(handle, delete);
+ osd_trans_exec_op(env, handle, OSD_OT_DELETE);
oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
RETURN(-EACCES);
+ ll_vfs_dq_init(dir);
dentry = osd_child_dentry_get(env, obj,
(char *)key, strlen((char *)key));
ldiskfs_htree_lock(hlock, obj->oo_hl_head,
dir, LDISKFS_HLOCK_DEL);
} else {
- cfs_down_write(&obj->oo_ext_idx_sem);
+ down_write(&obj->oo_ext_idx_sem);
}
- bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
+ bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
if (bh) {
- rc = ldiskfs_delete_entry(oh->ot_handle,
- dir, de, bh);
+ __u32 ino = 0;
+
+ /* If this is not the ".." entry, it might be a remote DNE
+ * entry and we need to check if the FID is for a remote
+ * MDT. If the FID is not in the directory entry (e.g.
+ * upgraded 1.8 filesystem without dirdata enabled) then
+ * we need to get the FID from the LMA. For a remote directory
+ * there HAS to be an LMA, it cannot be an IGIF inode in this
+ * case.
+ *
+ * Delete the entry before the agent inode in order to
+ * simplify error handling. At worst an error after deleting
+ * the entry first might leak the agent inode afterward. The
+ * reverse would need filesystem abort in case of error deleting
+ * the entry after the agent had been removed, or leave a
+ * dangling entry pointing at a random inode. */
+ if (strcmp((char *)key, dotdot) != 0) {
+ LASSERT(de != NULL);
+ rc = osd_get_fid_from_dentry(de, (struct dt_rec *)fid);
+ /* If Fid is not in dentry, try to get it from LMA */
+ if (rc == -ENODATA) {
+ struct osd_inode_id *id;
+ struct inode *inode;
+
+ /* Before trying to get fid from the inode,
+ * check whether the inode is valid.
+ *
+ * If the inode has been deleted, do not go
+ * ahead to do osd_ea_fid_get, which will set
+ * the inode to bad inode, which might cause
+ * the inode to be deleted uncorrectly */
+ inode = ldiskfs_iget(osd_sb(osd),
+ le32_to_cpu(de->inode));
+ if (IS_ERR(inode)) {
+ CDEBUG(D_INODE, "%s: "DFID"get inode"
+ "error.\n", osd_name(osd),
+ PFID(fid));
+ rc = PTR_ERR(inode);
+ } else {
+ if (likely(inode->i_nlink != 0)) {
+ id = &osd_oti_get(env)->oti_id;
+ rc = osd_ea_fid_get(env, obj,
+ le32_to_cpu(de->inode),
+ fid, id);
+ } else {
+ CDEBUG(D_INFO, "%s: %u "DFID
+ "deleted.\n",
+ osd_name(osd),
+ le32_to_cpu(de->inode),
+ PFID(fid));
+ rc = -ESTALE;
+ }
+ iput(inode);
+ }
+ }
+ if (rc == 0 &&
+ unlikely(osd_remote_fid(env, osd, fid)))
+ /* Need to delete agent inode */
+ ino = le32_to_cpu(de->inode);
+ }
+ rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
brelse(bh);
+ if (rc == 0 && unlikely(ino != 0)) {
+ rc = osd_delete_local_agent_inode(env, osd, fid, ino,
+ oh);
+ if (rc != 0)
+ CERROR("%s: del local inode "DFID": rc = %d\n",
+ osd_name(osd), PFID(fid), rc);
+ }
} else {
rc = -ENOENT;
}
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_write(&obj->oo_ext_idx_sem);
+ up_write(&obj->oo_ext_idx_sem);
+
+ if (rc != 0)
+ GOTO(out, rc);
+
+ /* For inode on the remote MDT, .. will point to
+ * /Agent directory, Check whether it needs to delete
+ * from agent directory */
+ if (unlikely(strcmp((char *)key, dotdot) == 0)) {
+ rc = osd_delete_from_remote_parent(env, osd_obj2dev(obj), obj,
+ oh);
+ if (rc != 0 && rc != -ENOENT) {
+ CERROR("%s: delete agent inode "DFID": rc = %d\n",
+ osd_name(osd), PFID(fid), rc);
+ }
+
+ if (rc == -ENOENT)
+ rc = 0;
+
+ GOTO(out, rc);
+ }
+out:
LASSERT(osd_invariant(obj));
RETURN(rc);
ENTRY;
LASSERT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(bag->ic_object == obj->oo_inode);
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
{
struct osd_thandle *oh;
- LASSERT(dt_object_exists(dt));
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, insert);
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT]);
- return 0;
+ return 0;
}
/**
struct iam_path_descr *ipd;
struct osd_thandle *oh;
struct iam_container *bag = &obj->oo_dir->od_container;
-#ifdef HAVE_QUOTA_SUPPORT
- cfs_cap_t save = cfs_curproc_cap_pack();
-#endif
struct osd_thread_info *oti = osd_oti_get(env);
struct iam_rec *iam_rec;
int rc;
ENTRY;
LINVRNT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(bag->ic_object == obj->oo_inode);
LASSERT(th != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
RETURN(-EACCES);
- OSD_EXEC_OP(th, insert);
+ osd_trans_exec_op(env, th, OSD_OT_INSERT);
ipd = osd_idx_ipd_get(env, bag);
if (unlikely(ipd == NULL))
oh = container_of0(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
LASSERT(oh->ot_handle->h_transaction != NULL);
-#ifdef HAVE_QUOTA_SUPPORT
- if (ignore_quota)
- cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
- else
- cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
-#endif
if (S_ISDIR(obj->oo_inode->i_mode)) {
iam_rec = (struct iam_rec *)oti->oti_ldp;
osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
iam_rec, ipd);
-#ifdef HAVE_QUOTA_SUPPORT
- cfs_curproc_cap_unpack(save);
-#endif
osd_ipd_put(env, bag, ipd);
LINVRNT(osd_invariant(obj));
RETURN(rc);
oth = container_of(th, struct osd_thandle, ot_super);
LASSERT(oth->ot_handle != NULL);
LASSERT(oth->ot_handle->h_transaction != NULL);
+ LASSERT(pobj->oo_inode);
- child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
-
- /* XXX: remove fid_is_igif() check here.
- * IGIF check is just to handle insertion of .. when it is 'ROOT',
- * it is IGIF now but needs FID in dir entry as well for readdir
- * to work.
- * LU-838 should fix that and remove fid_is_igif() check */
- if (fid_is_igif((struct lu_fid *)fid) ||
- fid_is_norm((struct lu_fid *)fid)) {
- ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
- osd_get_ldiskfs_dirent_param(ldp, fid);
- child->d_fsdata = (void *)ldp;
- } else {
- child->d_fsdata = NULL;
- }
- rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
+ ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
+ if (unlikely(pobj->oo_inode ==
+ osd_sb(osd_obj2dev(pobj))->s_root->d_inode))
+ ldp->edp_magic = 0;
+ else
+ osd_get_ldiskfs_dirent_param(ldp, fid);
+ child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
+ child->d_fsdata = (void *)ldp;
+ ll_vfs_dq_init(pobj->oo_inode);
+ rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
- RETURN(rc);
+ RETURN(rc);
}
/**
struct thandle *th)
{
struct inode *inode = dir->oo_inode;
- struct ldiskfs_dentry_param *dot_ldp;
- struct ldiskfs_dentry_param *dot_dot_ldp;
struct osd_thandle *oth;
int result = 0;
dir->oo_compat_dot_created = 1;
result = 0;
}
- } else if(strcmp(name, dotdot) == 0) {
- dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
- dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
-
- if (!dir->oo_compat_dot_created)
- return -EINVAL;
- if (!fid_is_igif((struct lu_fid *)dot_fid)) {
- osd_get_ldiskfs_dirent_param(dot_ldp, dot_fid);
- osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
- } else {
- dot_ldp = NULL;
- dot_dot_ldp = NULL;
- }
- /* in case of rename, dotdot is already created */
- if (dir->oo_compat_dotdot_created) {
- return __osd_ea_add_rec(info, dir, parent_dir, name,
- dot_dot_fid, NULL, th);
- }
+ } else if (strcmp(name, dotdot) == 0) {
+ if (!dir->oo_compat_dot_created)
+ return -EINVAL;
+ /* in case of rename, dotdot is already created */
+ if (dir->oo_compat_dotdot_created) {
+ return __osd_ea_add_rec(info, dir, parent_dir, name,
+ dot_dot_fid, NULL, th);
+ }
- result = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
- inode, dot_ldp, dot_dot_ldp);
- if (result == 0)
- dir->oo_compat_dotdot_created = 1;
- }
+ result = osd_add_dot_dotdot_internal(info, dir->oo_inode,
+ parent_dir, dot_fid,
+ dot_dot_fid, oth);
+ if (result == 0)
+ dir->oo_compat_dotdot_created = 1;
+ }
- return result;
+ return result;
}
ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
pobj->oo_inode, 0);
} else {
- cfs_down_write(&pobj->oo_ext_idx_sem);
+ down_write(&pobj->oo_ext_idx_sem);
}
rc = osd_add_dot_dotdot(info, pobj, cinode, name,
(struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
pobj->oo_inode, LDISKFS_HLOCK_ADD);
} else {
- cfs_down_write(&pobj->oo_ext_idx_sem);
+ down_write(&pobj->oo_ext_idx_sem);
}
- rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
- hlock, th);
+ if (OBD_FAIL_CHECK(OBD_FAIL_FID_INDIR)) {
+ struct lu_fid *tfid = &info->oti_fid;
+
+ *tfid = *(const struct lu_fid *)fid;
+ tfid->f_ver = ~0;
+ rc = __osd_ea_add_rec(info, pobj, cinode, name,
+ (const struct dt_rec *)tfid,
+ hlock, th);
+ } else {
+ rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
+ hlock, th);
+ }
}
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_write(&pobj->oo_ext_idx_sem);
+ up_write(&pobj->oo_ext_idx_sem);
return rc;
}
-static int
+static void
osd_consistency_check(struct osd_thread_info *oti, struct osd_device *dev,
struct osd_idmap_cache *oic)
{
int rc;
ENTRY;
+ if (!fid_is_norm(fid) && !fid_is_igif(fid))
+ RETURN_EXIT;
+
again:
- rc = osd_oi_lookup(oti, dev, fid, id);
+ rc = osd_oi_lookup(oti, dev, fid, id, OI_CHECK_FLD);
if (rc != 0 && rc != -ENOENT)
- RETURN(rc);
+ RETURN_EXIT;
if (rc == 0 && osd_id_eq(id, &oic->oic_lid))
- RETURN(0);
+ RETURN_EXIT;
if (thread_is_running(&scrub->os_thread)) {
rc = osd_oii_insert(dev, oic, rc == -ENOENT);
if (unlikely(rc == -EAGAIN))
goto again;
- RETURN(rc);
+ RETURN_EXIT;
}
- if (!scrub->os_no_scrub && ++once == 1) {
+ if (!dev->od_noscrub && ++once == 1) {
CDEBUG(D_LFSCK, "Trigger OI scrub by RPC for "DFID"\n",
PFID(fid));
rc = osd_scrub_start(dev);
goto again;
}
- RETURN(rc = -EREMCHG);
+ EXIT;
}
-/**
- * Calls ->lookup() to find dentry. From dentry get inode and
- * read inode's ea to get fid. This is required for interoperability
- * mode (b11826)
- *
- * \retval 0, on success
- * \retval -ve, on error
+static int osd_fail_fid_lookup(struct osd_thread_info *oti,
+ struct osd_device *dev,
+ struct osd_idmap_cache *oic,
+ struct lu_fid *fid, __u32 ino)
+{
+ struct lustre_mdt_attrs *lma = &oti->oti_mdt_attrs;
+ struct inode *inode;
+ int rc;
+
+ osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
+ inode = osd_iget(oti, dev, &oic->oic_lid);
+ if (IS_ERR(inode)) {
+ fid_zero(&oic->oic_fid);
+ return PTR_ERR(inode);
+ }
+
+ rc = osd_get_lma(oti, inode, &oti->oti_obj_dentry, lma);
+ iput(inode);
+ if (rc != 0)
+ fid_zero(&oic->oic_fid);
+ else
+ *fid = oic->oic_fid = lma->lma_self_fid;
+ return rc;
+}
+
+int osd_add_oi_cache(struct osd_thread_info *info, struct osd_device *osd,
+ struct osd_inode_id *id, const struct lu_fid *fid)
+{
+ CDEBUG(D_INODE, "add "DFID" %u:%u to info %p\n", PFID(fid),
+ id->oii_ino, id->oii_gen, info);
+ info->oti_cache.oic_lid = *id;
+ info->oti_cache.oic_fid = *fid;
+ info->oti_cache.oic_dev = osd;
+
+ return 0;
+}
+
+/**
+ * Calls ->lookup() to find dentry. From dentry get inode and
+ * read inode's ea to get fid. This is required for interoperability
+ * mode (b11826)
+ *
+ * \retval 0, on success
+ * \retval -ve, on error
*/
static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
struct dt_rec *rec, const struct dt_key *key)
struct htree_lock *hlock = NULL;
int ino;
int rc;
+ ENTRY;
LASSERT(dir->i_op != NULL && dir->i_op->lookup != NULL);
ldiskfs_htree_lock(hlock, obj->oo_hl_head,
dir, LDISKFS_HLOCK_LOOKUP);
} else {
- cfs_down_read(&obj->oo_ext_idx_sem);
+ down_read(&obj->oo_ext_idx_sem);
}
- bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
+ bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
if (bh) {
struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_inode_id *id = &oti->oti_id;
struct osd_idmap_cache *oic = &oti->oti_cache;
struct osd_device *dev = osd_obj2dev(obj);
struct osd_scrub *scrub = &dev->od_scrub;
struct scrub_file *sf = &scrub->os_file;
ino = le32_to_cpu(de->inode);
+ if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP)) {
+ brelse(bh);
+ rc = osd_fail_fid_lookup(oti, dev, oic, fid, ino);
+ GOTO(out, rc);
+ }
+
rc = osd_get_fid_from_dentry(de, rec);
/* done with de, release bh */
brelse(bh);
if (rc != 0)
- rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
+ rc = osd_ea_fid_get(env, obj, ino, fid, id);
else
- osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
- if (rc != 0 || !fid_is_norm(fid)) {
+ osd_id_gen(id, ino, OSD_OII_NOGEN);
+ if (rc != 0) {
fid_zero(&oic->oic_fid);
GOTO(out, rc);
}
- oic->oic_fid = *fid;
+ rc = osd_add_oi_cache(osd_oti_get(env), osd_obj2dev(obj), id,
+ fid);
+ if (rc != 0)
+ GOTO(out, rc);
if ((scrub->os_pos_current <= ino) &&
- (sf->sf_flags & SF_INCONSISTENT ||
+ ((sf->sf_flags & SF_INCONSISTENT) ||
+ (sf->sf_flags & SF_UPGRADE && fid_is_igif(fid)) ||
ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
sf->sf_oi_bitmap)))
- rc = osd_consistency_check(oti, dev, oic);
+ osd_consistency_check(oti, dev, oic);
} else {
rc = -ENOENT;
}
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_read(&obj->oo_ext_idx_sem);
+ up_read(&obj->oo_ext_idx_sem);
return rc;
}
lu_object_put(env, luch);
child = ERR_PTR(-ENOENT);
}
- } else
- child = (void *)luch;
+ } else {
+ child = ERR_CAST(luch);
+ }
- return child;
+ return child;
}
/**
}
static int osd_index_declare_ea_insert(const struct lu_env *env,
- struct dt_object *dt,
- const struct dt_rec *rec,
- const struct dt_key *key,
- struct thandle *handle)
+ struct dt_object *dt,
+ const struct dt_rec *rec,
+ const struct dt_key *key,
+ struct thandle *handle)
{
- struct osd_thandle *oh;
- struct inode *inode;
- int rc;
+ struct osd_thandle *oh;
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct lu_fid *fid = (struct lu_fid *)rec;
+ int rc;
ENTRY;
- LASSERT(dt_object_exists(dt));
- LASSERT(handle != NULL);
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
+ LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, insert);
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT]);
- inode = osd_dt_obj(dt)->oo_inode;
- LASSERT(inode);
+ if (osd_dt_obj(dt)->oo_inode == NULL) {
+ const char *name = (const char *)key;
+ /* Object is not being created yet. Only happens when
+ * 1. declare directory create
+ * 2. declare insert .
+ * 3. declare insert ..
+ */
+ LASSERT(strcmp(name, dotdot) == 0 || strcmp(name, dot) == 0);
+ } else {
+ struct inode *inode = osd_dt_obj(dt)->oo_inode;
+
+ /* We ignore block quota on meta pool (MDTs), so needn't
+ * calculate how many blocks will be consumed by this index
+ * insert */
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0,
+ oh, true, true, NULL, false);
+ }
+
+ if (fid == NULL)
+ RETURN(0);
+
+ rc = osd_remote_fid(env, osd, fid);
+ if (rc <= 0)
+ RETURN(rc);
+
+ rc = 0;
+
+ osd_trans_declare_op(env, oh, OSD_OT_CREATE,
+ osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
- /* We ignore block quota on meta pool (MDTs), so needn't
- * calculate how many blocks will be consumed by this index
- * insert */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
- true, true, NULL, false);
RETURN(rc);
}
const struct dt_key *key, struct thandle *th,
struct lustre_capa *capa, int ignore_quota)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct lu_fid *fid = (struct lu_fid *) rec;
- const char *name = (const char *)key;
- struct osd_object *child;
-#ifdef HAVE_QUOTA_SUPPORT
- cfs_cap_t save = cfs_curproc_cap_pack();
-#endif
- int rc;
-
- ENTRY;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ struct lu_fid *fid = (struct lu_fid *) rec;
+ const char *name = (const char *)key;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_inode_id *id = &oti->oti_id;
+ struct inode *child_inode = NULL;
+ struct osd_object *child = NULL;
+ int rc;
+ ENTRY;
LASSERT(osd_invariant(obj));
- LASSERT(dt_object_exists(dt));
+ LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
LASSERT(th != NULL);
+ osd_trans_exec_op(env, th, OSD_OT_INSERT);
+
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
RETURN(-EACCES);
- child = osd_object_find(env, dt, fid);
- if (!IS_ERR(child)) {
-#ifdef HAVE_QUOTA_SUPPORT
- if (ignore_quota)
- cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
- else
- cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
-#endif
- rc = osd_ea_add_rec(env, obj, child->oo_inode, name, rec, th);
-#ifdef HAVE_QUOTA_SUPPORT
- cfs_curproc_cap_unpack(save);
-#endif
- osd_object_put(env, child);
- } else {
- rc = PTR_ERR(child);
- }
+ LASSERTF(fid_is_sane(fid), "fid"DFID" is insane!", PFID(fid));
- LASSERT(osd_invariant(obj));
- RETURN(rc);
+ rc = osd_remote_fid(env, osd, fid);
+ if (rc < 0) {
+ CERROR("%s: Can not find object "DFID" rc %d\n",
+ osd_name(osd), PFID(fid), rc);
+ RETURN(rc);
+ }
+
+ if (rc == 1) {
+ /* Insert remote entry */
+ if (strcmp(name, dotdot) == 0 && strlen(name) == 2) {
+ struct osd_mdobj_map *omm = osd->od_mdt_map;
+ struct osd_thandle *oh;
+
+ /* If parent on remote MDT, we need put this object
+ * under AGENT */
+ oh = container_of(th, typeof(*oh), ot_super);
+ rc = osd_add_to_remote_parent(env, osd, obj, oh);
+ if (rc != 0) {
+ CERROR("%s: add "DFID" error: rc = %d\n",
+ osd_name(osd),
+ PFID(lu_object_fid(&dt->do_lu)), rc);
+ RETURN(rc);
+ }
+
+ child_inode = igrab(omm->omm_remote_parent->d_inode);
+ } else {
+ child_inode = osd_create_local_agent_inode(env, osd,
+ obj, fid,
+ th);
+ if (IS_ERR(child_inode))
+ RETURN(PTR_ERR(child_inode));
+ }
+ } else {
+ /* Insert local entry */
+ child = osd_object_find(env, dt, fid);
+ if (IS_ERR(child)) {
+ CERROR("%s: Can not find object "DFID"%u:%u: rc = %d\n",
+ osd_name(osd), PFID(fid),
+ id->oii_ino, id->oii_gen,
+ (int)PTR_ERR(child));
+ RETURN(PTR_ERR(child));
+ }
+ child_inode = igrab(child->oo_inode);
+ }
+
+ rc = osd_ea_add_rec(env, obj, child_inode, name, rec, th);
+
+ iput(child_inode);
+ if (child != NULL)
+ osd_object_put(env, child);
+ LASSERT(osd_invariant(obj));
+ RETURN(rc);
}
/**
*
* \param di osd iterator
*/
-
static void osd_it_iam_put(const struct lu_env *env, struct dt_it *di)
{
struct osd_it_iam *it = (struct osd_it_iam *)di;
return iam_it_key_size(&it->oi_it);
}
-static inline void osd_it_append_attrs(struct lu_dirent *ent, __u32 attr,
- int len, __u16 type)
+static inline void
+osd_it_append_attrs(struct lu_dirent *ent, int len, __u16 type)
{
- struct luda_type *lt;
- const unsigned align = sizeof(struct luda_type) - 1;
+ /* check if file type is required */
+ if (ent->lde_attrs & LUDA_TYPE) {
+ struct luda_type *lt;
+ int align = sizeof(*lt) - 1;
- /* check if file type is required */
- if (attr & LUDA_TYPE) {
- len = (len + align) & ~align;
-
- lt = (void *) ent->lde_name + len;
- lt->lt_type = cpu_to_le16(CFS_DTTOIF(type));
- ent->lde_attrs |= LUDA_TYPE;
- }
+ len = (len + align) & ~align;
+ lt = (struct luda_type *)(ent->lde_name + len);
+ lt->lt_type = cpu_to_le16(DTTOIF(type));
+ }
- ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
+ ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
}
/**
* build lu direct from backend fs dirent.
*/
-static inline void osd_it_pack_dirent(struct lu_dirent *ent,
- struct lu_fid *fid, __u64 offset,
- char *name, __u16 namelen,
- __u16 type, __u32 attr)
+static inline void
+osd_it_pack_dirent(struct lu_dirent *ent, struct lu_fid *fid, __u64 offset,
+ char *name, __u16 namelen, __u16 type, __u32 attr)
{
- fid_cpu_to_le(&ent->lde_fid, fid);
- ent->lde_attrs = LUDA_FID;
+ ent->lde_attrs = attr | LUDA_FID;
+ fid_cpu_to_le(&ent->lde_fid, fid);
- ent->lde_hash = cpu_to_le64(offset);
- ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
+ ent->lde_hash = cpu_to_le64(offset);
+ ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
- strncpy(ent->lde_name, name, namelen);
- ent->lde_namelen = cpu_to_le16(namelen);
+ strncpy(ent->lde_name, name, namelen);
+ ent->lde_name[namelen] = '\0';
+ ent->lde_namelen = cpu_to_le16(namelen);
- /* append lustre attributes */
- osd_it_append_attrs(ent, attr, namelen, type);
+ /* append lustre attributes */
+ osd_it_append_attrs(ent, namelen, type);
}
/**
}
};
+
/**
* Creates or initializes iterator context.
*
struct osd_object *obj = osd_dt_obj(dt);
struct osd_thread_info *info = osd_oti_get(env);
struct osd_it_ea *it = &info->oti_it_ea;
+ struct file *file = &it->oie_file;
struct lu_object *lo = &dt->do_lu;
struct dentry *obj_dentry = &info->oti_it_dentry;
ENTRY;
it->oie_dirent = NULL;
it->oie_buf = info->oti_it_ea_buf;
it->oie_obj = obj;
- it->oie_file.f_pos = 0;
- it->oie_file.f_dentry = obj_dentry;
- if (attr & LUDA_64BITHASH)
- it->oie_file.f_mode |= FMODE_64BITHASH;
- else
- it->oie_file.f_mode |= FMODE_32BITHASH;
- it->oie_file.f_mapping = obj->oo_inode->i_mapping;
- it->oie_file.f_op = obj->oo_inode->i_fop;
- it->oie_file.private_data = NULL;
- lu_object_get(lo);
- RETURN((struct dt_it *) it);
+
+ /* Reset the "file" totally to avoid to reuse any old value from
+ * former readdir handling, the "file->f_pos" should be zero. */
+ memset(file, 0, sizeof(*file));
+ /* Only FMODE_64BITHASH or FMODE_32BITHASH should be set, NOT both. */
+ if (attr & LUDA_64BITHASH)
+ file->f_mode = FMODE_64BITHASH;
+ else
+ file->f_mode = FMODE_32BITHASH;
+ file->f_dentry = obj_dentry;
+ file->f_mapping = obj->oo_inode->i_mapping;
+ file->f_op = obj->oo_inode->i_fop;
+ set_file_inode(file, obj->oo_inode);
+
+ lu_object_get(lo);
+ RETURN((struct dt_it *) it);
}
/**
unsigned d_type)
{
struct osd_it_ea *it = (struct osd_it_ea *)buf;
+ struct osd_object *obj = it->oie_obj;
struct osd_it_ea_dirent *ent = it->oie_dirent;
struct lu_fid *fid = &ent->oied_fid;
struct osd_fid_pack *rec;
OSD_IT_EA_BUFSIZE)
RETURN(1);
- if (d_type & LDISKFS_DIRENT_LUFID) {
- rec = (struct osd_fid_pack*) (name + namelen + 1);
-
- if (osd_fid_unpack(fid, rec) != 0)
- fid_zero(fid);
+ /* "." is just the object itself. */
+ if (namelen == 1 && name[0] == '.') {
+ *fid = obj->oo_dt.do_lu.lo_header->loh_fid;
+ } else if (d_type & LDISKFS_DIRENT_LUFID) {
+ rec = (struct osd_fid_pack*) (name + namelen + 1);
+ if (osd_fid_unpack(fid, rec) != 0)
+ fid_zero(fid);
+ } else {
+ fid_zero(fid);
+ }
+ d_type &= ~LDISKFS_DIRENT_LUFID;
- d_type &= ~LDISKFS_DIRENT_LUFID;
- } else {
- fid_zero(fid);
- }
+ /* NOT export local root. */
+ if (unlikely(osd_sb(osd_obj2dev(obj))->s_root->d_inode->i_ino == ino)) {
+ ino = obj->oo_inode->i_ino;
+ *fid = obj->oo_dt.do_lu.lo_header->loh_fid;
+ }
ent->oied_ino = ino;
ent->oied_off = offset;
ldiskfs_htree_lock(hlock, obj->oo_hl_head,
inode, LDISKFS_HLOCK_READDIR);
} else {
- cfs_down_read(&obj->oo_ext_idx_sem);
+ down_read(&obj->oo_ext_idx_sem);
}
result = inode->i_fop->readdir(&it->oie_file, it,
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_read(&obj->oo_ext_idx_sem);
+ up_read(&obj->oo_ext_idx_sem);
if (it->oie_rd_dirent == 0) {
result = -EIO;
return it->oie_dirent->oied_namelen;
}
+static int
+osd_dirent_update(handle_t *jh, struct super_block *sb,
+ struct osd_it_ea_dirent *ent, struct lu_fid *fid,
+ struct buffer_head *bh, struct ldiskfs_dir_entry_2 *de)
+{
+ struct osd_fid_pack *rec;
+ int rc;
+ ENTRY;
+
+ LASSERT(de->file_type & LDISKFS_DIRENT_LUFID);
+ LASSERT(de->rec_len >= de->name_len + sizeof(struct osd_fid_pack));
+
+ rc = ldiskfs_journal_get_write_access(jh, bh);
+ if (rc != 0) {
+ CERROR("%.16s: fail to write access for update dirent: "
+ "name = %.*s, rc = %d\n",
+ LDISKFS_SB(sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+ RETURN(rc);
+ }
+
+ rec = (struct osd_fid_pack *)(de->name + de->name_len + 1);
+ fid_cpu_to_be((struct lu_fid *)rec->fp_area, fid);
+ rc = ldiskfs_journal_dirty_metadata(jh, bh);
+ if (rc != 0)
+ CERROR("%.16s: fail to dirty metadata for update dirent: "
+ "name = %.*s, rc = %d\n",
+ LDISKFS_SB(sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+
+ RETURN(rc);
+}
+
+static inline int
+osd_dirent_has_space(__u16 reclen, __u16 namelen, unsigned blocksize)
+{
+ if (ldiskfs_rec_len_from_disk(reclen, blocksize) >=
+ __LDISKFS_DIR_REC_LEN(namelen + 1 + sizeof(struct osd_fid_pack)))
+ return 1;
+ else
+ return 0;
+}
+
+static inline int
+osd_dot_dotdot_has_space(struct ldiskfs_dir_entry_2 *de, int dot_dotdot)
+{
+ LASSERTF(dot_dotdot == 1 || dot_dotdot == 2,
+ "dot_dotdot = %d\n", dot_dotdot);
+
+ if (LDISKFS_DIR_REC_LEN(de) >=
+ __LDISKFS_DIR_REC_LEN(dot_dotdot + 1 + sizeof(struct osd_fid_pack)))
+ return 1;
+ else
+ return 0;
+}
+
+static int
+osd_dirent_reinsert(const struct lu_env *env, handle_t *jh,
+ struct inode *dir, struct inode *inode,
+ struct osd_it_ea_dirent *ent, struct lu_fid *fid,
+ struct buffer_head *bh, struct ldiskfs_dir_entry_2 *de,
+ struct htree_lock *hlock)
+{
+ struct dentry *dentry;
+ struct osd_fid_pack *rec;
+ struct ldiskfs_dentry_param *ldp;
+ int rc;
+ ENTRY;
+
+ if (!LDISKFS_HAS_INCOMPAT_FEATURE(inode->i_sb,
+ LDISKFS_FEATURE_INCOMPAT_DIRDATA))
+ RETURN(0);
+
+ /* There is enough space to hold the FID-in-dirent. */
+ if (osd_dirent_has_space(de->rec_len, ent->oied_namelen,
+ dir->i_sb->s_blocksize)) {
+ rc = ldiskfs_journal_get_write_access(jh, bh);
+ if (rc != 0) {
+ CERROR("%.16s: fail to write access for reinsert "
+ "dirent: name = %.*s, rc = %d\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+ RETURN(rc);
+ }
+
+ de->name[de->name_len] = 0;
+ rec = (struct osd_fid_pack *)(de->name + de->name_len + 1);
+ rec->fp_len = sizeof(struct lu_fid) + 1;
+ fid_cpu_to_be((struct lu_fid *)rec->fp_area, fid);
+ de->file_type |= LDISKFS_DIRENT_LUFID;
+
+ rc = ldiskfs_journal_dirty_metadata(jh, bh);
+ if (rc != 0)
+ CERROR("%.16s: fail to dirty metadata for reinsert "
+ "dirent: name = %.*s, rc = %d\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+
+ RETURN(rc);
+ }
+
+ rc = ldiskfs_delete_entry(jh, dir, de, bh);
+ if (rc != 0) {
+ CERROR("%.16s: fail to delete entry for reinsert dirent: "
+ "name = %.*s, rc = %d\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+ RETURN(rc);
+ }
+
+ dentry = osd_child_dentry_by_inode(env, dir, ent->oied_name,
+ ent->oied_namelen);
+ ldp = (struct ldiskfs_dentry_param *)osd_oti_get(env)->oti_ldp;
+ osd_get_ldiskfs_dirent_param(ldp, (const struct dt_rec *)fid);
+ dentry->d_fsdata = (void *)ldp;
+ ll_vfs_dq_init(dir);
+ rc = osd_ldiskfs_add_entry(jh, dentry, inode, hlock);
+ /* It is too bad, we cannot reinsert the name entry back.
+ * That means we lose it! */
+ if (rc != 0)
+ CERROR("%.16s: fail to insert entry for reinsert dirent: "
+ "name = %.*s, rc = %d\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ ent->oied_namelen, ent->oied_name, rc);
+
+ RETURN(rc);
+}
+
+static int
+osd_dirent_check_repair(const struct lu_env *env, struct osd_object *obj,
+ struct osd_it_ea *it, struct lu_fid *fid,
+ struct osd_inode_id *id, __u32 *attr)
+{
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct osd_device *dev = osd_obj2dev(obj);
+ struct super_block *sb = osd_sb(dev);
+ const char *devname =
+ LDISKFS_SB(sb)->s_es->s_volume_name;
+ struct osd_it_ea_dirent *ent = it->oie_dirent;
+ struct inode *dir = obj->oo_inode;
+ struct htree_lock *hlock = NULL;
+ struct buffer_head *bh = NULL;
+ handle_t *jh = NULL;
+ struct ldiskfs_dir_entry_2 *de;
+ struct dentry *dentry;
+ struct inode *inode;
+ int credits;
+ int rc;
+ int dot_dotdot = 0;
+ bool dirty = false;
+ ENTRY;
+
+ if (ent->oied_name[0] == '.') {
+ if (ent->oied_namelen == 1)
+ dot_dotdot = 1;
+ else if (ent->oied_namelen == 2 && ent->oied_name[1] == '.')
+ dot_dotdot = 2;
+ }
+
+ dentry = osd_child_dentry_get(env, obj, ent->oied_name,
+ ent->oied_namelen);
+
+ /* We need to ensure that the name entry is still valid.
+ * Because it may be removed or renamed by other already.
+ *
+ * The unlink or rename operation will start journal before PDO lock,
+ * so to avoid deadlock, here we need to start journal handle before
+ * related PDO lock also. But because we do not know whether there
+ * will be something to be repaired before PDO lock, we just start
+ * journal without conditions.
+ *
+ * We may need to remove the name entry firstly, then insert back.
+ * One credit is for user quota file update.
+ * One credit is for group quota file update.
+ * Two credits are for dirty inode. */
+ credits = osd_dto_credits_noquota[DTO_INDEX_DELETE] +
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1 + 1 + 2;
+
+again:
+ if (dev->od_dirent_journal) {
+ jh = osd_journal_start_sb(sb, LDISKFS_HT_MISC, credits);
+ if (IS_ERR(jh)) {
+ rc = PTR_ERR(jh);
+ CERROR("%.16s: fail to start trans for dirent "
+ "check_repair: credits %d, name %.*s, rc %d\n",
+ devname, credits, ent->oied_namelen,
+ ent->oied_name, rc);
+ RETURN(rc);
+ }
+
+ if (obj->oo_hl_head != NULL) {
+ hlock = osd_oti_get(env)->oti_hlock;
+ /* "0" means exclusive lock for the whole directory.
+ * We need to prevent others access such name entry
+ * during the delete + insert. Neither HLOCK_ADD nor
+ * HLOCK_DEL cannot guarantee the atomicity. */
+ ldiskfs_htree_lock(hlock, obj->oo_hl_head, dir, 0);
+ } else {
+ down_write(&obj->oo_ext_idx_sem);
+ }
+ } else {
+ if (obj->oo_hl_head != NULL) {
+ hlock = osd_oti_get(env)->oti_hlock;
+ ldiskfs_htree_lock(hlock, obj->oo_hl_head, dir,
+ LDISKFS_HLOCK_LOOKUP);
+ } else {
+ down_read(&obj->oo_ext_idx_sem);
+ }
+ }
+
+ bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
+ /* For dot/dotdot entry, if there is not enough space to hold the
+ * FID-in-dirent, just keep them there. It only happens when the
+ * device upgraded from 1.8 or restored from MDT file-level backup.
+ * For the whole directory, only dot/dotdot entry have no FID-in-dirent
+ * and needs to get FID from LMA when readdir, it will not affect the
+ * performance much. */
+ if ((bh == NULL) || (le32_to_cpu(de->inode) != ent->oied_ino) ||
+ (dot_dotdot != 0 && !osd_dot_dotdot_has_space(de, dot_dotdot))) {
+ *attr |= LUDA_IGNORE;
+ GOTO(out_journal, rc = 0);
+ }
+
+ osd_id_gen(id, ent->oied_ino, OSD_OII_NOGEN);
+ inode = osd_iget(info, dev, id);
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+ if (rc == -ENOENT || rc == -ESTALE) {
+ *attr |= LUDA_IGNORE;
+ rc = 0;
+ }
+
+ GOTO(out_journal, rc);
+ }
+
+ /* skip the REMOTE_PARENT_DIR. */
+ if (inode == dev->od_mdt_map->omm_remote_parent->d_inode)
+ GOTO(out_inode, rc = 0);
+
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
+ if (rc == 0) {
+ LASSERT(!(lma->lma_compat & LMAC_NOT_IN_OI));
+
+ if (fid_is_sane(fid)) {
+ /* FID-in-dirent is valid. */
+ if (lu_fid_eq(fid, &lma->lma_self_fid))
+ GOTO(out_inode, rc = 0);
+
+ /* Do not repair under dryrun mode. */
+ if (*attr & LUDA_VERIFY_DRYRUN) {
+ *attr |= LUDA_REPAIR;
+ GOTO(out_inode, rc = 0);
+ }
+
+ if (!dev->od_dirent_journal) {
+ iput(inode);
+ brelse(bh);
+ if (hlock != NULL)
+ ldiskfs_htree_unlock(hlock);
+ else
+ up_read(&obj->oo_ext_idx_sem);
+ dev->od_dirent_journal = 1;
+ goto again;
+ }
+
+ *fid = lma->lma_self_fid;
+ dirty = true;
+ /* Update the FID-in-dirent. */
+ rc = osd_dirent_update(jh, sb, ent, fid, bh, de);
+ if (rc == 0)
+ *attr |= LUDA_REPAIR;
+ } else {
+ /* Do not repair under dryrun mode. */
+ if (*attr & LUDA_VERIFY_DRYRUN) {
+ *fid = lma->lma_self_fid;
+ *attr |= LUDA_REPAIR;
+ GOTO(out_inode, rc = 0);
+ }
+
+ if (!dev->od_dirent_journal) {
+ iput(inode);
+ brelse(bh);
+ if (hlock != NULL)
+ ldiskfs_htree_unlock(hlock);
+ else
+ up_read(&obj->oo_ext_idx_sem);
+ dev->od_dirent_journal = 1;
+ goto again;
+ }
+
+ *fid = lma->lma_self_fid;
+ dirty = true;
+ /* Append the FID-in-dirent. */
+ rc = osd_dirent_reinsert(env, jh, dir, inode, ent,
+ fid, bh, de, hlock);
+ if (rc == 0)
+ *attr |= LUDA_REPAIR;
+ }
+ } else if (rc == -ENODATA) {
+ /* Do not repair under dryrun mode. */
+ if (*attr & LUDA_VERIFY_DRYRUN) {
+ if (fid_is_sane(fid)) {
+ *attr |= LUDA_REPAIR;
+ } else {
+ lu_igif_build(fid, inode->i_ino,
+ inode->i_generation);
+ *attr |= LUDA_UPGRADE;
+ }
+ GOTO(out_inode, rc = 0);
+ }
+
+ if (!dev->od_dirent_journal) {
+ iput(inode);
+ brelse(bh);
+ if (hlock != NULL)
+ ldiskfs_htree_unlock(hlock);
+ else
+ up_read(&obj->oo_ext_idx_sem);
+ dev->od_dirent_journal = 1;
+ goto again;
+ }
+
+ dirty = true;
+ if (unlikely(fid_is_sane(fid))) {
+ /* FID-in-dirent exists, but FID-in-LMA is lost.
+ * Trust the FID-in-dirent, and add FID-in-LMA. */
+ rc = osd_ea_fid_set(info, inode, fid, 0, 0);
+ if (rc == 0)
+ *attr |= LUDA_REPAIR;
+ } else {
+ lu_igif_build(fid, inode->i_ino, inode->i_generation);
+ /* It is probably IGIF object. Only aappend the
+ * FID-in-dirent. OI scrub will process FID-in-LMA. */
+ rc = osd_dirent_reinsert(env, jh, dir, inode, ent,
+ fid, bh, de, hlock);
+ if (rc == 0)
+ *attr |= LUDA_UPGRADE;
+ }
+ }
+
+ GOTO(out_inode, rc);
+
+out_inode:
+ iput(inode);
+
+out_journal:
+ brelse(bh);
+ if (hlock != NULL) {
+ ldiskfs_htree_unlock(hlock);
+ } else {
+ if (dev->od_dirent_journal)
+ up_write(&obj->oo_ext_idx_sem);
+ else
+ up_read(&obj->oo_ext_idx_sem);
+ }
+ if (jh != NULL)
+ ldiskfs_journal_stop(jh);
+ if (rc >= 0 && !dirty)
+ dev->od_dirent_journal = 0;
+ return rc;
+}
/**
- * Returns the value (i.e. fid/igif) at current position from iterator's
- * in memory structure.
+ * Returns the value at current position from iterator's in memory structure.
*
* \param di struct osd_it_ea, iterator's in memory structure
* \param attr attr requested for dirent.
struct osd_scrub *scrub = &dev->od_scrub;
struct scrub_file *sf = &scrub->os_file;
struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_inode_id *id = &oti->oti_id;
struct osd_idmap_cache *oic = &oti->oti_cache;
struct lu_fid *fid = &it->oie_dirent->oied_fid;
struct lu_dirent *lde = (struct lu_dirent *)dtrec;
int rc = 0;
ENTRY;
- if (!fid_is_sane(fid)) {
- rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
- if (rc != 0)
- RETURN(rc);
+ if (attr & LUDA_VERIFY) {
+ attr |= LUDA_TYPE;
+ if (unlikely(ino == osd_sb(dev)->s_root->d_inode->i_ino)) {
+ attr |= LUDA_IGNORE;
+ rc = 0;
+ } else {
+ rc = osd_dirent_check_repair(env, obj, it, fid, id,
+ &attr);
+ }
} else {
- osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
+ attr &= ~LU_DIRENT_ATTRS_MASK;
+ if (!fid_is_sane(fid)) {
+ if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP))
+ RETURN(-ENOENT);
+
+ rc = osd_ea_fid_get(env, obj, ino, fid, id);
+ } else {
+ osd_id_gen(id, ino, OSD_OII_NOGEN);
+ }
}
+ /* Pack the entry anyway, at least the offset is right. */
osd_it_pack_dirent(lde, fid, it->oie_dirent->oied_off,
it->oie_dirent->oied_name,
it->oie_dirent->oied_namelen,
it->oie_dirent->oied_type, attr);
- if (!fid_is_norm(fid)) {
- fid_zero(&oic->oic_fid);
+
+ if (rc < 0)
+ RETURN(rc);
+
+ if (osd_remote_fid(env, dev, fid))
RETURN(0);
- }
- oic->oic_fid = *fid;
- if ((scrub->os_pos_current <= ino) &&
- (sf->sf_flags & SF_INCONSISTENT ||
+ if (likely(!(attr & LUDA_IGNORE)))
+ rc = osd_add_oi_cache(oti, dev, id, fid);
+
+ if (!(attr & LUDA_VERIFY) &&
+ (scrub->os_pos_current <= ino) &&
+ ((sf->sf_flags & SF_INCONSISTENT) ||
+ (sf->sf_flags & SF_UPGRADE && fid_is_igif(fid)) ||
ldiskfs_test_bit(osd_oi_fid2idx(dev, fid), sf->sf_oi_bitmap)))
- rc = osd_consistency_check(oti, dev, oic);
+ osd_consistency_check(oti, dev, oic);
RETURN(rc);
}
return -EACCES;
rc = osd_ea_lookup_rec(env, obj, rec, key);
-
if (rc == 0)
rc = +1;
RETURN(rc);
static void osd_key_fini(const struct lu_context *ctx,
struct lu_context_key *key, void* data)
{
- struct osd_thread_info *info = data;
+ struct osd_thread_info *info = data;
- if (info->oti_hlock != NULL)
- ldiskfs_htree_lock_free(info->oti_hlock);
- OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
- OBD_FREE_PTR(info);
+ if (info->oti_hlock != NULL)
+ ldiskfs_htree_lock_free(info->oti_hlock);
+ OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
+ lu_buf_free(&info->oti_iobuf.dr_pg_buf);
+ lu_buf_free(&info->oti_iobuf.dr_bl_buf);
+ OBD_FREE_PTR(info);
}
static void osd_key_exit(const struct lu_context *ctx,
{
struct osd_device *osd = osd_dev(d);
- strncpy(osd->od_svname, name, MAX_OBD_NAME);
+ if (strlcpy(osd->od_svname, name, sizeof(osd->od_svname))
+ >= sizeof(osd->od_svname))
+ return -E2BIG;
return osd_procfs_init(osd, name);
}
{
ENTRY;
- osd_scrub_cleanup(env, o);
-
- if (o->od_fsops) {
- fsfilt_put_ops(o->od_fsops);
- o->od_fsops = NULL;
- }
-
/* shutdown quota slave instance associated with the device */
if (o->od_quota_slave != NULL) {
qsd_fini(env, o->od_quota_slave);
RETURN(0);
}
+static void osd_umount(const struct lu_env *env, struct osd_device *o)
+{
+ ENTRY;
+
+ if (o->od_mnt != NULL) {
+ shrink_dcache_sb(osd_sb(o));
+ osd_sync(env, &o->od_dt_dev);
+
+ mntput(o->od_mnt);
+ o->od_mnt = NULL;
+ }
+
+ EXIT;
+}
+
static int osd_mount(const struct lu_env *env,
struct osd_device *o, struct lustre_cfg *cfg)
{
struct file_system_type *type;
char *options = NULL;
char *str;
- int rc = 0;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lu_fid *fid = &info->oti_fid;
+ struct inode *inode;
+ int rc = 0;
ENTRY;
if (o->od_mnt != NULL)
RETURN(0);
- o->od_fsops = fsfilt_get_ops(mt_str(LDD_MT_LDISKFS));
- if (o->od_fsops == NULL) {
- CERROR("Can't find fsfilt_ldiskfs\n");
- RETURN(-ENOTSUPP);
- }
+ if (strlen(dev) >= sizeof(o->od_mntdev))
+ RETURN(-E2BIG);
+ strcpy(o->od_mntdev, dev);
- OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD);
+ OBD_PAGE_ALLOC(__page, GFP_IOFS);
if (__page == NULL)
GOTO(out, rc = -ENOMEM);
if (str)
lmd_flags = simple_strtoul(str + 1, NULL, 0);
opts = lustre_cfg_string(cfg, 3);
- page = (unsigned long)cfs_page_address(__page);
+ page = (unsigned long)page_address(__page);
options = (char *)page;
*options = '\0';
if (opts == NULL)
/* Glom up mount options */
if (*options != '\0')
strcat(options, ",");
- strlcat(options, "no_mbcache", CFS_PAGE_SIZE);
+ strlcat(options, "no_mbcache", PAGE_CACHE_SIZE);
type = get_fs_type("ldiskfs");
if (!type) {
}
o->od_mnt = vfs_kern_mount(type, s_flags, dev, options);
- cfs_module_put(type->owner);
+ module_put(type->owner);
if (IS_ERR(o->od_mnt)) {
rc = PTR_ERR(o->od_mnt);
- CERROR("%s: can't mount %s: %d\n", name, dev, rc);
o->od_mnt = NULL;
+ CERROR("%s: can't mount %s: %d\n", name, dev, rc);
GOTO(out, rc);
}
- if (lvfs_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
+#ifdef HAVE_DEV_SET_RDONLY
+ if (dev_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
CERROR("%s: underlying device %s is marked as read-only. "
"Setup failed\n", name, dev);
- mntput(o->od_mnt);
- o->od_mnt = NULL;
- GOTO(out, rc = -EROFS);
+ GOTO(out_mnt, rc = -EROFS);
}
+#endif
if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
CERROR("%s: device %s is mounted w/o journal\n", name, dev);
- mntput(o->od_mnt);
- o->od_mnt = NULL;
- GOTO(out, rc = -EINVAL);
+ GOTO(out_mnt, rc = -EINVAL);
+ }
+
+ inode = osd_sb(o)->s_root->d_inode;
+ ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NO_OI);
+ lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
+ rc = osd_ea_fid_set(info, inode, fid, LMAC_NOT_IN_OI, 0);
+ if (rc != 0) {
+ CERROR("%s: failed to set lma on %s root inode\n", name, dev);
+ GOTO(out_mnt, rc);
}
- if (lmd_flags & LMD_FLG_IAM) {
- o->od_iop_mode = 0;
- LCONSOLE_WARN("%s: OSD: IAM mode enabled\n", name);
- } else
- o->od_iop_mode = 1;
if (lmd_flags & LMD_FLG_NOSCRUB)
- o->od_scrub.os_no_scrub = 1;
+ o->od_noscrub = 1;
+
+ GOTO(out, rc = 0);
+
+out_mnt:
+ mntput(o->od_mnt);
+ o->od_mnt = NULL;
out:
if (__page)
OBD_PAGE_FREE(__page);
- if (rc)
- fsfilt_put_ops(o->od_fsops);
- RETURN(rc);
+ return rc;
}
static struct lu_device *osd_device_fini(const struct lu_env *env,
- struct lu_device *d)
+ struct lu_device *d)
{
- int rc;
- ENTRY;
-
- rc = osd_shutdown(env, osd_dev(d));
-
- osd_compat_fini(osd_dev(d));
-
- shrink_dcache_sb(osd_sb(osd_dev(d)));
- osd_sync(env, lu2dt_dev(d));
-
- rc = osd_procfs_fini(osd_dev(d));
- if (rc) {
- CERROR("proc fini error %d \n", rc);
- RETURN (ERR_PTR(rc));
- }
+ struct osd_device *o = osd_dev(d);
+ ENTRY;
- if (osd_dev(d)->od_mnt) {
- mntput(osd_dev(d)->od_mnt);
- osd_dev(d)->od_mnt = NULL;
- }
+ osd_shutdown(env, o);
+ osd_procfs_fini(o);
+ osd_scrub_cleanup(env, o);
+ osd_obj_map_fini(o);
+ osd_umount(env, o);
- RETURN(NULL);
+ RETURN(NULL);
}
static int osd_device_init0(const struct lu_env *env,
struct lu_device *l = osd2lu_dev(o);
struct osd_thread_info *info;
int rc;
+ int cplen = 0;
/* if the module was re-loaded, env can loose its keys */
rc = lu_env_refill((struct lu_env *) env);
l->ld_ops = &osd_lu_ops;
o->od_dt_dev.dd_ops = &osd_dt_ops;
- cfs_spin_lock_init(&o->od_osfs_lock);
- cfs_mutex_init(&o->od_otable_mutex);
+ spin_lock_init(&o->od_osfs_lock);
+ mutex_init(&o->od_otable_mutex);
o->od_osfs_age = cfs_time_shift_64(-1000);
o->od_capa_hash = init_capa_hash();
o->od_read_cache = 1;
o->od_writethrough_cache = 1;
+ o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
rc = osd_mount(env, o, cfg);
if (rc)
GOTO(out_capa, rc);
- /* setup scrub, including OI files initialization */
- rc = osd_scrub_setup(env, o);
- if (rc < 0)
+ cplen = strlcpy(o->od_svname, lustre_cfg_string(cfg, 4),
+ sizeof(o->od_svname));
+ if (cplen >= sizeof(o->od_svname)) {
+ rc = -E2BIG;
GOTO(out_mnt, rc);
+ }
- strncpy(o->od_svname, lustre_cfg_string(cfg, 4),
- sizeof(o->od_svname) - 1);
+ if (server_name_is_ost(o->od_svname))
+ o->od_is_ost = 1;
- if (strstr(o->od_svname, "-OST")) {
- rc = osd_compat_init(o);
- if (rc != 0)
- GOTO(out_mnt, rc);
- }
+ rc = osd_obj_map_init(env, o);
+ if (rc != 0)
+ GOTO(out_mnt, rc);
rc = lu_site_init(&o->od_site, l);
- if (rc)
+ if (rc != 0)
GOTO(out_compat, rc);
+ o->od_site.ls_bottom_dev = l;
rc = lu_site_init_finish(&o->od_site);
- if (rc)
- GOTO(out_compat, rc);
+ if (rc != 0)
+ GOTO(out_site, rc);
+
+ /* self-repair LMA by default */
+ o->od_lma_self_repair = 1;
+
+ CFS_INIT_LIST_HEAD(&o->od_ios_list);
+ /* setup scrub, including OI files initialization */
+ rc = osd_scrub_setup(env, o);
+ if (rc < 0)
+ GOTO(out_site, rc);
rc = osd_procfs_init(o, o->od_svname);
if (rc != 0) {
CERROR("%s: can't initialize procfs: rc = %d\n",
o->od_svname, rc);
- GOTO(out_compat, rc);
+ GOTO(out_scrub, rc);
}
LASSERT(l->ld_site->ls_linkage.next && l->ld_site->ls_linkage.prev);
+ /* initialize quota slave instance */
+ o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
+ o->od_proc_entry);
+ if (IS_ERR(o->od_quota_slave)) {
+ rc = PTR_ERR(o->od_quota_slave);
+ o->od_quota_slave = NULL;
+ GOTO(out_procfs, rc);
+ }
+
RETURN(0);
+
+out_procfs:
+ osd_procfs_fini(o);
+out_scrub:
+ osd_scrub_cleanup(env, o);
+out_site:
+ lu_site_fini(&o->od_site);
out_compat:
- osd_compat_fini(o);
+ osd_obj_map_fini(o);
out_mnt:
- osd_oi_fini(info, o);
- osd_shutdown(env, o);
- mntput(o->od_mnt);
- o->od_mnt = NULL;
+ osd_umount(env, o);
out_capa:
cleanup_capa_hash(o->od_capa_hash);
out:
- RETURN(rc);
+ return rc;
}
static struct lu_device *osd_device_alloc(const struct lu_env *env,
rc = dt_device_init(&o->od_dt_dev, t);
if (rc == 0) {
+ /* Because the ctx might be revived in dt_device_init,
+ * refill the env here */
+ lu_env_refill((struct lu_env *)env);
rc = osd_device_init0(env, o, cfg);
if (rc)
dt_device_fini(&o->od_dt_dev);
static int osd_recovery_complete(const struct lu_env *env,
struct lu_device *d)
{
- RETURN(0);
+ struct osd_device *osd = osd_dev(d);
+ int rc = 0;
+ ENTRY;
+
+ if (osd->od_quota_slave == NULL)
+ RETURN(0);
+
+ /* start qsd instance on recovery completion, this notifies the quota
+ * slave code that we are about to process new requests now */
+ rc = qsd_start(env, osd->od_quota_slave);
+ RETURN(rc);
}
/*
*exp = class_conn2export(&conn);
- cfs_spin_lock(&osd->od_osfs_lock);
+ spin_lock(&osd->od_osfs_lock);
osd->od_connects++;
- cfs_spin_unlock(&osd->od_osfs_lock);
+ spin_unlock(&osd->od_osfs_lock);
RETURN(0);
}
ENTRY;
/* Only disconnect the underlying layers on the final disconnect. */
- cfs_spin_lock(&osd->od_osfs_lock);
+ spin_lock(&osd->od_osfs_lock);
osd->od_connects--;
if (osd->od_connects == 0)
release = 1;
- cfs_spin_unlock(&osd->od_osfs_lock);
+ spin_unlock(&osd->od_osfs_lock);
rc = class_disconnect(exp); /* bz 9811 */
int result = 0;
ENTRY;
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 3, 55, 0)
- /* Unfortunately, the current MDD implementation relies on some specific
- * code to be executed in the OSD layer. Since OFD now also uses the OSD
- * module, we need a way to skip the metadata-specific code when running
- * with OFD.
- * The hack here is to check the type of the parent device which is
- * either MD (i.e. MDD device) with the current MDT stack or DT (i.e.
- * OFD device) on an OST. As a reminder, obdfilter does not use the OSD
- * layer and still relies on lvfs. This hack won't work any more when
- * LOD is landed since LOD is of DT type.
- * This code should be removed once the orion MDT changes (LOD/OSP, ...)
- * have been landed */
- osd->od_is_md = lu_device_is_md(pdev);
-#else
-#warning "all is_md checks must be removed from osd-ldiskfs"
-#endif
-
- if (osd->od_is_md) {
- /* 1. setup local objects */
- result = llo_local_objects_setup(env, lu2md_dev(pdev),
- lu2dt_dev(dev));
- if (result)
- RETURN(result);
- }
-
- /* 2. setup quota slave instance */
- osd->od_quota_slave = qsd_init(env, osd->od_svname, &osd->od_dt_dev,
- osd->od_proc_entry);
- if (IS_ERR(osd->od_quota_slave)) {
- result = PTR_ERR(osd->od_quota_slave);
- osd->od_quota_slave = NULL;
- }
+ if (osd->od_quota_slave != NULL)
+ /* set up quota slave objects */
+ result = qsd_prepare(env, osd->od_quota_slave);
RETURN(result);
}
static int __init osd_mod_init(void)
{
struct lprocfs_static_vars lvars;
+ int rc;
+
+ osd_oi_mod_init();
+ lprocfs_osd_init_vars(&lvars);
- osd_oi_mod_init();
- lprocfs_osd_init_vars(&lvars);
- return class_register_type(&osd_obd_device_ops, NULL, lvars.module_vars,
- LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
+ rc = lu_kmem_init(ldiskfs_caches);
+ if (rc)
+ return rc;
+
+ rc = class_register_type(&osd_obd_device_ops, NULL, NULL,
+#ifndef HAVE_ONLY_PROCFS_SEQ
+ lvars.module_vars,
+#endif
+ LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
+ if (rc)
+ lu_kmem_fini(ldiskfs_caches);
+ return rc;
}
static void __exit osd_mod_exit(void)
{
class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
+ lu_kmem_fini(ldiskfs_caches);
}
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");