* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <lustre_net.h>
#include <lustre_fid.h>
/* process_config */
-#include <lustre_param.h>
+#include <uapi/linux/lustre_param.h>
#include "osd_internal.h"
#include "osd_dynlocks.h"
#include <lustre_linkea.h>
+#define PFID_STRIPE_IDX_BITS 16
+#define PFID_STRIPE_COUNT_MASK ((1 << PFID_STRIPE_IDX_BITS) - 1)
+
int ldiskfs_pdo = 1;
module_param(ldiskfs_pdo, int, 0644);
MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
static const char dot[] = ".";
static const char dotdot[] = "..";
-static const char remote_obj_dir[] = "REM_OBJ_DIR";
static const struct lu_object_operations osd_lu_obj_ops;
static const struct dt_object_operations osd_obj_ops;
-static const struct dt_object_operations osd_obj_ea_ops;
static const struct dt_object_operations osd_obj_otable_it_ops;
static const struct dt_index_operations osd_index_iam_ops;
static const struct dt_index_operations osd_index_ea_ops;
/*
* Concurrency: doesn't matter
*/
-
-/*
- * Concurrency: doesn't matter
- */
-static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
+static int osd_is_write_locked(const struct lu_env *env, struct osd_object *o)
{
struct osd_thread_info *oti = osd_oti_get(env);
return oti->oti_w_locks > 0 && o->oo_owner == env;
l = &mo->oo_dt.do_lu;
dt_object_init(&mo->oo_dt, NULL, d);
- mo->oo_dt.do_ops = &osd_obj_ea_ops;
+ mo->oo_dt.do_ops = &osd_obj_ops;
l->lo_ops = &osd_lu_obj_ops;
init_rwsem(&mo->oo_sem);
init_rwsem(&mo->oo_ext_idx_sem);
}
int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
- struct dentry *dentry, struct lustre_mdt_attrs *lma)
+ struct dentry *dentry, struct lustre_ost_attrs *loa)
{
int rc;
- CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
- info->oti_mdt_attrs_old, LMA_OLD_SIZE);
+ (void *)loa, sizeof(*loa));
if (rc > 0) {
- if ((void *)lma != (void *)info->oti_mdt_attrs_old)
- memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
+ struct lustre_mdt_attrs *lma = &loa->loa_lma;
+
+ if (rc < sizeof(*lma))
+ return -EINVAL;
+
rc = 0;
- lustre_lma_swab(lma);
+ lustre_loa_swab(loa, true);
/* Check LMA compatibility */
if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
- CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
+ CWARN("%s: unsupported incompat LMA feature(s) %#x "
"for fid = "DFID", ino = %lu\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ osd_ino2name(inode),
lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
PFID(&lma->lma_self_fid), inode->i_ino);
rc = -EOPNOTSUPP;
struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
struct osd_inode_id *id)
{
+ int rc;
struct inode *inode = NULL;
/* if we look for an inode withing a running
iput(inode);
inode = ERR_PTR(-ESTALE);
} else if (is_bad_inode(inode)) {
- CWARN("%.16s: bad inode: ino = %u\n",
- LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
+ CWARN("%s: bad inode: ino = %u\n",
+ osd_dev2name(dev), id->oii_ino);
iput(inode);
inode = ERR_PTR(-ENOENT);
+ } else if ((rc = osd_attach_jinode(inode))) {
+ iput(inode);
+ inode = ERR_PTR(rc);
} else {
ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
if (id->oii_gen == OSD_OII_NOGEN)
rc = __ldiskfs_add_entry(handle, child, inode, hlock);
if (rc == -ENOBUFS || rc == -ENOSPC) {
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
struct inode *parent = child->d_parent->d_inode;
struct lu_fid *fid = NULL;
- rc2 = osd_get_lma(info, parent, child->d_parent, lma);
- if (rc2 == 0) {
- fid = &lma->lma_self_fid;
+ rc2 = osd_get_lma(info, parent, child->d_parent, loa);
+ if (!rc2) {
+ fid = &loa->loa_lma.lma_self_fid;
} else if (rc2 == -ENODATA) {
if (unlikely(parent == inode->i_sb->s_root->d_inode)) {
fid = &info->oti_fid3;
osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
struct osd_inode_id *id, struct lu_fid *fid)
{
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
- struct inode *inode;
- int rc;
+ struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
+ struct inode *inode;
+ int rc;
inode = osd_iget(info, dev, id);
if (IS_ERR(inode))
return inode;
- rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
- if (rc == 0) {
- *fid = lma->lma_self_fid;
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
+ if (!rc) {
+ *fid = loa->loa_lma.lma_self_fid;
} else if (rc == -ENODATA) {
if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
struct osd_device *dev,
const struct lu_fid *fid,
struct osd_inode_id *id,
- bool cached)
+ bool trusted)
{
- struct inode *inode;
- int rc = 0;
+ struct inode *inode;
+ int rc = 0;
ENTRY;
/* The cached OI mapping is trustable. If we cannot locate the inode
* via the cached OI mapping, then return the failure to the caller
* directly without further OI checking. */
+again:
inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
if (IS_ERR(inode)) {
rc = PTR_ERR(inode);
- if (cached || (rc != -ENOENT && rc != -ESTALE)) {
- CDEBUG(D_INODE, "no inode: ino = %u, rc = %d\n",
- id->oii_ino, rc);
+ if (!trusted && (rc == -ENOENT || rc == -ESTALE))
+ goto check_oi;
- GOTO(put, rc);
- }
-
- goto check_oi;
+ CDEBUG(D_INODE, "no inode for FID: "DFID", ino = %u, rc = %d\n",
+ PFID(fid), id->oii_ino, rc);
+ GOTO(put, rc);
}
if (is_bad_inode(inode)) {
rc = -ENOENT;
- if (cached) {
- CDEBUG(D_INODE, "bad inode: ino = %u\n", id->oii_ino);
+ if (!trusted)
+ goto check_oi;
- GOTO(put, rc);
- }
-
- goto check_oi;
+ CDEBUG(D_INODE, "bad inode for FID: "DFID", ino = %u\n",
+ PFID(fid), id->oii_ino);
+ GOTO(put, rc);
}
if (id->oii_gen != OSD_OII_NOGEN &&
inode->i_generation != id->oii_gen) {
rc = -ESTALE;
- if (cached) {
- CDEBUG(D_INODE, "unmatched inode: ino = %u, "
- "oii_gen = %u, i_generation = %u\n",
- id->oii_ino, id->oii_gen, inode->i_generation);
+ if (!trusted)
+ goto check_oi;
- GOTO(put, rc);
- }
-
- goto check_oi;
+ CDEBUG(D_INODE, "unmatched inode for FID: "DFID", ino = %u, "
+ "oii_gen = %u, i_generation = %u\n", PFID(fid),
+ id->oii_ino, id->oii_gen, inode->i_generation);
+ GOTO(put, rc);
}
if (inode->i_nlink == 0) {
rc = -ENOENT;
- if (cached) {
- CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
-
- GOTO(put, rc);
- }
+ if (!trusted)
+ goto check_oi;
- goto check_oi;
+ CDEBUG(D_INODE, "stale inode for FID: "DFID", ino = %u\n",
+ PFID(fid), id->oii_ino);
+ GOTO(put, rc);
}
ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
check_oi:
if (rc != 0) {
+ __u32 saved_ino = id->oii_ino;
+ __u32 saved_gen = id->oii_gen;
+
+ LASSERT(!trusted);
LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
* normal race case. */
GOTO(put, rc);
- if ((!IS_ERR(inode) && inode->i_generation != 0 &&
- inode->i_generation == id->oii_gen) ||
- (IS_ERR(inode) && !(dev->od_scrub.os_file.sf_flags &
- SF_INCONSISTENT)))
+ /* It is the OI scrub updated the OI mapping by race.
+ * The new OI mapping must be valid. */
+ if (saved_ino != id->oii_ino ||
+ (saved_gen != id->oii_gen && saved_gen != OSD_OII_NOGEN)) {
+ if (!IS_ERR(inode))
+ iput(inode);
+
+ trusted = true;
+ goto again;
+ }
+
+ if (IS_ERR(inode)) {
+ if (dev->od_scrub.os_file.sf_flags & SF_INCONSISTENT)
+ /* It still can be the case 2, but we cannot
+ * distinguish it from the case 1. So return
+ * -EREMCHG to block current operation until
+ * OI scrub rebuilt the OI mappings. */
+ rc = -EREMCHG;
+ else
+ rc = -ENOENT;
+
+ GOTO(put, rc);
+ }
+
+ if (inode->i_generation == id->oii_gen)
rc = -ENOENT;
else
rc = -EREMCHG;
if (rc == sizeof(*ff)) {
rc = 0;
ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
- ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
- /* XXX: use 0 as the index for compatibility, the caller will
- * handle index related issues when necessarry. */
- ostid_to_fid(fid, ostid, 0);
+ rc = ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
+ /*
+ * XXX: use 0 as the index for compatibility, the caller will
+ * handle index related issues when necessary.
+ */
+ if (!rc)
+ ostid_to_fid(fid, ostid, 0);
} else if (rc == sizeof(struct filter_fid)) {
rc = 1;
} else if (rc >= 0) {
{
struct osd_thread_info *info = osd_oti_get(env);
struct osd_device *osd = osd_obj2dev(obj);
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
+ struct lustre_mdt_attrs *lma = &loa->loa_lma;
struct inode *inode = obj->oo_inode;
struct dentry *dentry = &info->oti_obj_dentry;
struct lu_fid *fid = NULL;
int rc;
ENTRY;
- CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
- info->oti_mdt_attrs_old, LMA_OLD_SIZE);
+ (void *)loa, sizeof(*loa));
if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
fid = &lma->lma_self_fid;
rc = osd_get_idif(info, inode, dentry, fid);
rc = 0;
lustre_lma_swab(lma);
if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
- CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
+ (CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT) &&
+ S_ISREG(inode->i_mode)))) {
CWARN("%s: unsupported incompat LMA feature(s) %#x for "
"fid = "DFID", ino = %lu\n", osd_name(osd),
lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
out:
if (rc < 0)
- CDEBUG(D_LFSCK, "%.16s: fail to check LMV EA, inode = %lu/%u,"
- DFID": rc = %d\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ CDEBUG(D_LFSCK, "%s: fail to check LMV EA, inode = %lu/%u,"
+ DFID": rc = %d\n", osd_ino2name(inode),
inode->i_ino, inode->i_generation,
PFID(&oic->oic_fid), rc);
else
const struct lu_object_conf *conf)
{
struct osd_thread_info *info;
- struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
- struct osd_device *dev;
+ struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
+ struct osd_device *dev;
struct osd_idmap_cache *oic;
- struct osd_inode_id *id;
- struct osd_inode_id *tid;
- struct inode *inode = NULL;
- struct osd_scrub *scrub;
- struct scrub_file *sf;
- __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT |
- SS_AUTO_FULL;
- __u32 saved_ino;
- __u32 saved_gen;
- int result = 0;
- int rc1 = 0;
- bool cached = true;
- bool remote = false;
+ struct osd_inode_id *id;
+ struct inode *inode = NULL;
+ struct osd_scrub *scrub;
+ struct scrub_file *sf;
+ __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT | SS_AUTO_FULL;
+ __u32 saved_ino;
+ __u32 saved_gen;
+ int result = 0;
+ int rc1 = 0;
+ bool remote = false;
+ bool trusted = true;
+ bool updated = false;
ENTRY;
LINVRNT(osd_invariant(obj));
* shouldn't never be re-used, if it's really a duplicate FID from
* unexpected reason, we should be able to detect it later by calling
* do_create->osd_oi_insert(). */
- if (conf != NULL && conf->loc_flags & LOC_F_NEW)
+ if (conf && conf->loc_flags & LOC_F_NEW)
GOTO(out, result = 0);
/* Search order: 1. per-thread cache. */
- if (lu_fid_eq(fid, &oic->oic_fid) &&
- likely(oic->oic_dev == dev)) {
+ if (lu_fid_eq(fid, &oic->oic_fid) && likely(oic->oic_dev == dev)) {
id = &oic->oic_lid;
goto iget;
}
if (!list_empty(&scrub->os_inconsistent_items)) {
/* Search order: 2. OI scrub pending list. */
result = osd_oii_lookup(dev, fid, id);
- if (result == 0)
+ if (!result)
goto iget;
}
- cached = false;
+ /* The OI mapping in the OI file can be updated by the OI scrub
+ * when we locate the inode via FID. So it may be not trustable. */
+ trusted = false;
+
/* Search order: 3. OI files. */
result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
if (result == -ENOENT) {
goto trigger;
}
- if (result != 0)
+ if (result)
GOTO(out, result);
iget:
- inode = osd_iget_check(info, dev, fid, id, cached);
- if (IS_ERR(inode)) {
- result = PTR_ERR(inode);
- if (result == -ENOENT || result == -ESTALE)
- GOTO(out, result = 0);
+ obj->oo_inode = NULL;
+ /* for later passes through checks, not true on first pass */
+ if (!IS_ERR_OR_NULL(inode))
+ iput(inode);
- if (result == -EREMCHG) {
+ inode = osd_iget_check(info, dev, fid, id, trusted);
+ if (!IS_ERR(inode)) {
+ obj->oo_inode = inode;
+ result = 0;
+ if (remote)
+ goto trigger;
-trigger:
- /* We still have chance to get the valid inode: for the
- * object which is referenced by remote name entry, the
- * object on the local MDT will be linked under the dir
- * of "/REMOTE_PARENT_DIR" with its FID string as name.
- *
- * We do not know whether the object for the given FID
- * is referenced by some remote name entry or not, and
- * especially for DNE II, a multiple-linked object may
- * have many name entries reside on many MDTs.
- *
- * To simplify the operation, OSD will not distinguish
- * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
- * only happened for the RPC from other MDT during the
- * OI scrub, or for the client side RPC with FID only,
- * such as FID to path, or from old connected client. */
- if (!remote &&
- !fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
- rc1 = osd_lookup_in_remote_parent(info, dev,
- fid, id);
- if (rc1 == 0) {
- remote = true;
- cached = true;
- flags |= SS_AUTO_PARTIAL;
- flags &= ~SS_AUTO_FULL;
- goto iget;
- }
- }
+ goto check_lma;
+ }
- if (thread_is_running(&scrub->os_thread)) {
- if (scrub->os_partial_scan &&
- !scrub->os_in_join) {
- goto join;
- } else {
- if (inode != NULL && !IS_ERR(inode)) {
- LASSERT(remote);
-
- osd_add_oi_cache(info, dev, id,
- fid);
- osd_oii_insert(dev, oic, true);
- } else {
- result = -EINPROGRESS;
- }
- }
- } else if (!dev->od_noscrub) {
+ result = PTR_ERR(inode);
+ if (result == -ENOENT || result == -ESTALE)
+ GOTO(out, result = 0);
-join:
- rc1 = osd_scrub_start(dev, flags);
- LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
- "for the "DFID" with flags 0x%x,"
- " rc = %d\n", osd_name(dev),
- PFID(fid), flags, rc1);
- if (rc1 == 0 || rc1 == -EALREADY) {
- if (inode != NULL && !IS_ERR(inode)) {
- LASSERT(remote);
-
- osd_add_oi_cache(info, dev, id,
- fid);
- osd_oii_insert(dev, oic, true);
- } else {
- result = -EINPROGRESS;
- }
- } else {
- result = -EREMCHG;
- }
- } else {
- result = -EREMCHG;
- }
+ if (result != -EREMCHG)
+ GOTO(out, result);
+
+trigger:
+ /* We still have chance to get the valid inode: for the
+ * object which is referenced by remote name entry, the
+ * object on the local MDT will be linked under the dir
+ * of "/REMOTE_PARENT_DIR" with its FID string as name.
+ *
+ * We do not know whether the object for the given FID
+ * is referenced by some remote name entry or not, and
+ * especially for DNE II, a multiple-linked object may
+ * have many name entries reside on many MDTs.
+ *
+ * To simplify the operation, OSD will not distinguish
+ * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
+ * only happened for the RPC from other MDT during the
+ * OI scrub, or for the client side RPC with FID only,
+ * such as FID to path, or from old connected client. */
+ if (!remote && !fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
+ rc1 = osd_lookup_in_remote_parent(info, dev, fid, id);
+ if (!rc1) {
+ remote = true;
+ trusted = true;
+ flags |= SS_AUTO_PARTIAL;
+ flags &= ~SS_AUTO_FULL;
+ goto iget;
}
+ }
- if (inode == NULL || IS_ERR(inode))
- GOTO(out, result);
- } else if (remote) {
- goto trigger;
+ if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_partial_scan && !scrub->os_in_join)
+ goto join;
+
+ if (IS_ERR_OR_NULL(inode) || result)
+ GOTO(out, result = -EINPROGRESS);
+
+ LASSERT(remote);
+ LASSERT(obj->oo_inode == inode);
+
+ osd_add_oi_cache(info, dev, id, fid);
+ osd_oii_insert(dev, oic, true);
+ goto found;
}
- obj->oo_inode = inode;
- LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
+ if (dev->od_noscrub) {
+ if (!remote)
+ GOTO(out, result = -EREMCHG);
+
+ LASSERT(!result);
+ LASSERT(obj->oo_inode == inode);
+
+ osd_add_oi_cache(info, dev, id, fid);
+ goto found;
+ }
+
+join:
+ rc1 = osd_scrub_start(dev, flags);
+ LCONSOLE_WARN("%s: trigger OI scrub by RPC for the " DFID" with flags "
+ "0x%x, rc = %d\n", osd_name(dev), PFID(fid), flags, rc1);
+ if (rc1 && rc1 != -EALREADY)
+ GOTO(out, result = -EREMCHG);
+
+ if (IS_ERR_OR_NULL(inode) || result)
+ GOTO(out, result = -EINPROGRESS);
+
+ LASSERT(remote);
+ LASSERT(obj->oo_inode == inode);
+ osd_add_oi_cache(info, dev, id, fid);
+ osd_oii_insert(dev, oic, true);
+ goto found;
+
+check_lma:
result = osd_check_lma(env, obj);
- if (result == 0)
+ if (!result)
goto found;
- tid = &info->oti_id3;
- LASSERT(tid != id);
+ LASSERTF(id->oii_ino == inode->i_ino &&
+ id->oii_gen == inode->i_generation,
+ "locate wrong inode for FID: "DFID", %u/%u => %ld/%u\n",
+ PFID(fid), id->oii_ino, id->oii_gen,
+ inode->i_ino, inode->i_generation);
- if (result == -ENODATA) {
- if (!cached)
- /* The current OI mapping is from the OI file,
- * since the inode has been found via
- * osd_iget_check(), no need recheck OI. */
- goto found;
+ saved_ino = inode->i_ino;
+ saved_gen = inode->i_generation;
- result = osd_oi_lookup(info, dev, fid, tid, OI_CHECK_FLD);
- if (result == 0) {
- LASSERTF(tid->oii_ino == id->oii_ino &&
- tid->oii_gen == id->oii_gen,
- "OI mapping changed(1): %u/%u => %u/%u",
- tid->oii_ino, tid->oii_gen,
- id->oii_ino, id->oii_gen);
-
- LASSERTF(tid->oii_ino == inode->i_ino &&
- tid->oii_gen == inode->i_generation,
- "locate wrong inode(1): %u/%u => %ld/%u",
- tid->oii_ino, tid->oii_gen,
- inode->i_ino, inode->i_generation);
-
- /* "result == 0" means the cached OI mapping is still in
- * the OI file, so the target the inode is valid. */
+ if (unlikely(result == -ENODATA)) {
+ /* If the OI scrub updated the OI mapping by race, it
+ * must be valid. Trust the inode that has no LMA EA. */
+ if (updated)
goto found;
+
+ result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
+ if (!result) {
+ /* The OI mapping is still there, the inode is still
+ * valid. It is just becaues the inode has no LMA EA. */
+ if (saved_ino == id->oii_ino &&
+ saved_gen == id->oii_gen)
+ goto found;
+
+ /* It is the OI scrub updated the OI mapping by race.
+ * The new OI mapping must be valid. */
+ trusted = true;
+ updated = true;
+ goto iget;
}
- /* "result == -ENOENT" means that the OI mappinghas been removed
- * by race, the target inode belongs to other object.
+ /* "result == -ENOENT" means that the OI mappinghas been
+ * removed by race, so the inode belongs to other object.
*
* Others error can be returned directly. */
- if (result == -ENOENT)
+ if (result == -ENOENT) {
+ LASSERT(trusted);
+
+ obj->oo_inode = NULL;
result = 0;
+ }
}
- saved_ino = inode->i_ino;
- saved_gen = inode->i_generation;
- iput(inode);
- inode = NULL;
- obj->oo_inode = NULL;
-
if (result != -EREMCHG)
GOTO(out, result);
- if (!cached)
- /* The current OI mapping is from the OI file,
- * since the inode has been found via
- * osd_iget_check(), no need recheck OI. */
- goto trigger;
+ LASSERT(!updated);
- result = osd_oi_lookup(info, dev, fid, tid, OI_CHECK_FLD);
- /* "result == -ENOENT" means the cached OI mapping has been removed from
- * the OI file by race, above target inode belongs to other object.
- *
- * Others error can be returned directly. */
- if (result != 0)
- GOTO(out, result = (result == -ENOENT ? 0 : result));
+ result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
+ /* "result == -ENOENT" means the cached OI mapping has been removed
+ * from the OI file by race, above inode belongs to other object. */
+ if (result == -ENOENT) {
+ LASSERT(trusted);
+
+ obj->oo_inode = NULL;
+ GOTO(out, result = 0);
+ }
- LASSERTF(tid->oii_ino == id->oii_ino && tid->oii_gen == id->oii_gen,
- "OI mapping changed(2): %u/%u => %u/%u",
- tid->oii_ino, tid->oii_gen, id->oii_ino, id->oii_gen);
+ if (result)
+ GOTO(out, result);
- LASSERTF(tid->oii_ino == saved_ino && tid->oii_gen == saved_gen,
- "locate wrong inode(2): %u/%u => %u/%u",
- tid->oii_ino, tid->oii_gen, saved_ino, saved_gen);
+ if (saved_ino == id->oii_ino && saved_gen == id->oii_gen) {
+ result = -EREMCHG;
+ goto trigger;
+ }
- goto trigger;
+ /* It is the OI scrub updated the OI mapping by race.
+ * The new OI mapping must be valid. */
+ trusted = true;
+ updated = true;
+ goto iget;
found:
obj->oo_compat_dot_created = 1;
(flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
osd_check_lmv(info, dev, inode, oic);
+ result = osd_attach_jinode(inode);
+ if (result)
+ GOTO(out, result);
+
if (!ldiskfs_pdo)
GOTO(out, result = 0);
- LASSERT(obj->oo_hl_head == NULL);
+ LASSERT(!obj->oo_hl_head);
obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
- if (obj->oo_hl_head == NULL) {
- obj->oo_inode = NULL;
- iput(inode);
- GOTO(out, result = -ENOMEM);
- }
- GOTO(out, result = 0);
+
+ GOTO(out, result = (!obj->oo_hl_head ? -ENOMEM : 0));
out:
- if (result != 0 && cached)
- fid_zero(&oic->oic_fid);
+ if (result || !obj->oo_inode) {
+ if (!IS_ERR_OR_NULL(inode))
+ iput(inode);
+
+ obj->oo_inode = NULL;
+ if (trusted)
+ fid_zero(&oic->oic_fid);
+ }
LINVRNT(osd_invariant(obj));
return result;
obj->oo_dt.do_body_ops = &osd_body_ops_new;
if (result == 0 && obj->oo_inode != NULL) {
struct osd_thread_info *oti = osd_oti_get(env);
- struct lustre_mdt_attrs *lma = &oti->oti_mdt_attrs;
+ struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
osd_object_init0(obj);
result = osd_get_lma(oti, obj->oo_inode,
- &oti->oti_obj_dentry, lma);
- if (result == 0) {
+ &oti->oti_obj_dentry, loa);
+ if (!result) {
/* Convert LMAI flags to lustre LMA flags
* and cache it to oo_lma_flags */
obj->oo_lma_flags =
- lma_to_lustre_flags(lma->lma_incompat);
+ lma_to_lustre_flags(loa->loa_lma.lma_incompat);
} else if (result == -ENODATA) {
result = 0;
}
*/
static void osd_th_alloced(struct osd_thandle *oth)
{
- oth->oth_alloced = cfs_time_current();
+ oth->oth_alloced = ktime_get();
}
/**
*/
static void osd_th_started(struct osd_thandle *oth)
{
- oth->oth_started = cfs_time_current();
+ oth->oth_started = ktime_get();
}
/**
- * Helper function to convert time interval to microseconds packed in
- * long int.
+ * Check whether the we deal with this handle for too long.
*/
-static long interval_to_usec(cfs_time_t start, cfs_time_t end)
+static void __osd_th_check_slow(void *oth, struct osd_device *dev,
+ ktime_t alloced, ktime_t started,
+ ktime_t closed)
{
- struct timeval val;
+ ktime_t now = ktime_get();
- cfs_duration_usec(cfs_time_sub(end, start), &val);
- return val.tv_sec * 1000000 + val.tv_usec;
-}
+ LASSERT(dev != NULL);
-/**
- * Check whether the we deal with this handle for too long.
- */
-static void __osd_th_check_slow(void *oth, struct osd_device *dev,
- cfs_time_t alloced, cfs_time_t started,
- cfs_time_t closed)
-{
- cfs_time_t now = cfs_time_current();
-
- LASSERT(dev != NULL);
-
- lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
- interval_to_usec(alloced, started));
- lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
- interval_to_usec(started, closed));
- lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
- interval_to_usec(closed, now));
-
- if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
- CWARN("transaction handle %p was open for too long: "
- "now "CFS_TIME_T" ,"
- "alloced "CFS_TIME_T" ,"
- "started "CFS_TIME_T" ,"
- "closed "CFS_TIME_T"\n",
+ lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
+ ktime_us_delta(started, alloced));
+ lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
+ ktime_us_delta(closed, started));
+ lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
+ ktime_us_delta(now, closed));
+
+ if (ktime_before(ktime_add_ns(alloced, 30 * NSEC_PER_SEC), now)) {
+ CWARN("transaction handle %p was open for too long: now %lld, alloced %lld, started %lld, closed %lld\n",
oth, now, alloced, started, closed);
libcfs_debug_dumpstack(NULL);
}
}
-#define OSD_CHECK_SLOW_TH(oth, dev, expr) \
-{ \
- cfs_time_t __closed = cfs_time_current(); \
- cfs_time_t __alloced = oth->oth_alloced; \
- cfs_time_t __started = oth->oth_started; \
- \
- expr; \
- __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
+#define OSD_CHECK_SLOW_TH(oth, dev, expr) \
+{ \
+ ktime_t __closed = ktime_get(); \
+ ktime_t __alloced = oth->oth_alloced; \
+ ktime_t __started = oth->oth_started; \
+ \
+ expr; \
+ __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
}
#else /* OSD_THANDLE_STATS */
struct thandle *th;
ENTRY;
+ if (d->dd_rdonly) {
+ CERROR("%s: someone try to start transaction under "
+ "readonly mode, should be disabled.\n",
+ osd_name(osd_dt_dev(d)));
+ dump_stack();
+ RETURN(ERR_PTR(-EROFS));
+ }
+
/* on pending IO in this thread should left from prev. request */
LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
* This should be removed when we can calculate the
* credits precisely. */
oh->ot_credits = osd_transaction_size(dev);
+ } else if (ldiskfs_track_declares_assert != 0) {
+ /* reserve few credits to prevent an assertion in JBD
+ * our debugging mechanism will be able to detected
+ * overuse. this can help to debug single-update
+ * transactions */
+ oh->ot_credits += 10;
+ if (unlikely(osd_param_is_not_sane(dev, th)))
+ oh->ot_credits = osd_transaction_size(dev);
}
/*
oh->ot_quota_trans = NULL;
if (oh->ot_handle != NULL) {
+ int rc2;
handle_t *hdl = oh->ot_handle;
/*
hdl->h_sync = th->th_sync;
oh->ot_handle = NULL;
- OSD_CHECK_SLOW_TH(oh, osd, rc = ldiskfs_journal_stop(hdl));
- if (rc != 0)
+ OSD_CHECK_SLOW_TH(oh, osd, rc2 = ldiskfs_journal_stop(hdl));
+ if (rc2 != 0)
CERROR("%s: failed to stop transaction: rc = %d\n",
- osd_name(osd), rc);
+ osd_name(osd), rc2);
+ if (!rc)
+ rc = rc2;
} else {
osd_trans_stop_cb(oh, th->th_result);
OBD_FREE_PTR(oh);
qi->lqi_id.qid_uid = gid;
qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
+
+ qi->lqi_id.qid_uid = i_projid_read(inode);
+ qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
}
}
}
{
int rc;
- CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
+ CDEBUG(D_CACHE, "%s: syncing OSD\n", osd_dt_dev(d)->od_svname);
rc = ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
- CDEBUG(D_CACHE, "synced OSD %s: rc = %d\n",
- LUSTRE_OSD_LDISKFS_NAME, rc);
+ CDEBUG(D_CACHE, "%s: synced OSD: rc = %d\n", osd_dt_dev(d)->od_svname,
+ rc);
return rc;
}
struct super_block *s = osd_sb(osd_dt_dev(d));
ENTRY;
- CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
+ CDEBUG(D_HA, "%s: async commit OSD\n", osd_dt_dev(d)->od_svname);
RETURN(s->s_op->sync_fs(s, 0));
}
+/* Our own copy of the set readonly functions if present, or NU if not. */
+static int (*priv_dev_set_rdonly)(struct block_device *bdev);
+static int (*priv_dev_check_rdonly)(struct block_device *bdev);
+/* static int (*priv_dev_clear_rdonly)(struct block_device *bdev); */
+
/*
* Concurrency: shouldn't matter.
*/
-
static int osd_ro(const struct lu_env *env, struct dt_device *d)
{
struct super_block *sb = osd_sb(osd_dt_dev(d));
struct block_device *dev = sb->s_bdev;
-#ifdef HAVE_DEV_SET_RDONLY
- struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
- int rc = 0;
-#else
int rc = -EOPNOTSUPP;
-#endif
ENTRY;
-#ifdef HAVE_DEV_SET_RDONLY
- CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
+ if (priv_dev_set_rdonly) {
+ struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
- if (sb->s_op->freeze_fs) {
- rc = sb->s_op->freeze_fs(sb);
- if (rc)
- goto out;
- }
+ rc = 0;
+ CERROR("*** setting %s read-only ***\n",
+ osd_dt_dev(d)->od_svname);
- if (jdev && (jdev != dev)) {
- CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
- (long)jdev);
- dev_set_rdonly(jdev);
- }
- CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
- dev_set_rdonly(dev);
+ if (sb->s_op->freeze_fs) {
+ rc = sb->s_op->freeze_fs(sb);
+ if (rc)
+ goto out;
+ }
- if (sb->s_op->unfreeze_fs)
- sb->s_op->unfreeze_fs(sb);
+ if (jdev && (jdev != dev)) {
+ CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
+ (long)jdev);
+ priv_dev_set_rdonly(jdev);
+ }
+ CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
+ priv_dev_set_rdonly(dev);
+
+ if (sb->s_op->unfreeze_fs)
+ sb->s_op->unfreeze_fs(sb);
+ }
out:
-#endif
if (rc)
CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
osd_dt_dev(d)->od_svname, (long)dev, rc);
.dt_commit_async = osd_commit_async,
};
-static void osd_object_read_lock(const struct lu_env *env,
- struct dt_object *dt, unsigned role)
+static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
+ unsigned role)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thread_info *oti = osd_oti_get(env);
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- LASSERT(obj->oo_owner != env);
+ LASSERT(obj->oo_owner != env);
down_read_nested(&obj->oo_sem, role);
- LASSERT(obj->oo_owner == NULL);
- oti->oti_r_locks++;
+ LASSERT(obj->oo_owner == NULL);
+ oti->oti_r_locks++;
}
-static void osd_object_write_lock(const struct lu_env *env,
- struct dt_object *dt, unsigned role)
+static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
+ unsigned role)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thread_info *oti = osd_oti_get(env);
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- LASSERT(obj->oo_owner != env);
+ LASSERT(obj->oo_owner != env);
down_write_nested(&obj->oo_sem, role);
- LASSERT(obj->oo_owner == NULL);
- obj->oo_owner = env;
- oti->oti_w_locks++;
+ LASSERT(obj->oo_owner == NULL);
+ obj->oo_owner = env;
+ oti->oti_w_locks++;
}
-static void osd_object_read_unlock(const struct lu_env *env,
- struct dt_object *dt)
+static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thread_info *oti = osd_oti_get(env);
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- LASSERT(oti->oti_r_locks > 0);
- oti->oti_r_locks--;
+ LASSERT(oti->oti_r_locks > 0);
+ oti->oti_r_locks--;
up_read(&obj->oo_sem);
}
-static void osd_object_write_unlock(const struct lu_env *env,
- struct dt_object *dt)
+static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thread_info *oti = osd_oti_get(env);
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- LASSERT(obj->oo_owner == env);
- LASSERT(oti->oti_w_locks > 0);
- oti->oti_w_locks--;
- obj->oo_owner = NULL;
+ LASSERT(obj->oo_owner == env);
+ LASSERT(oti->oti_w_locks > 0);
+ oti->oti_w_locks--;
+ obj->oo_owner = NULL;
up_write(&obj->oo_sem);
}
-static int osd_object_write_locked(const struct lu_env *env,
- struct dt_object *dt)
+static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
{
- struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_object *obj = osd_dt_obj(dt);
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- return obj->oo_owner == env;
+ return obj->oo_owner == env;
}
static struct timespec *osd_inode_time(const struct lu_env *env,
{
attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
- LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
- LA_TYPE;
+ LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
+ LA_BLKSIZE | LA_TYPE;
attr->la_atime = LTIME_S(inode->i_atime);
attr->la_mtime = LTIME_S(inode->i_mtime);
attr->la_blocks = inode->i_blocks;
attr->la_uid = i_uid_read(inode);
attr->la_gid = i_gid_read(inode);
+ attr->la_projid = i_projid_read(inode);
attr->la_flags = ll_inode_to_ext_flags(inode->i_flags);
attr->la_nlink = inode->i_nlink;
attr->la_rdev = inode->i_rdev;
attr->la_blksize = 1 << inode->i_blkbits;
attr->la_blkbits = inode->i_blkbits;
+ /*
+ * Ext4 did not transfer inherit flags from raw inode
+ * to inode flags, and ext4 internally test raw inode
+ * @i_flags directly. Instead of patching ext4, we do it here.
+ */
+ if (LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL)
+ attr->la_flags |= LUSTRE_PROJINHERIT_FL;
}
static int osd_attr_get(const struct lu_env *env,
return 0;
}
+static int osd_declare_attr_qid(const struct lu_env *env,
+ struct osd_object *obj,
+ struct osd_thandle *oh, long long bspace,
+ qid_t old_id, qid_t new_id, bool enforce,
+ unsigned type)
+{
+ int rc;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lquota_id_info *qi = &info->oti_qi;
+
+ qi->lqi_type = type;
+ /* inode accounting */
+ qi->lqi_is_blk = false;
+
+ /* one more inode for the new id ... */
+ qi->lqi_id.qid_uid = new_id;
+ qi->lqi_space = 1;
+ /* Reserve credits for the new id */
+ rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and one less inode for the current id */
+ qi->lqi_id.qid_uid = old_id;
+ qi->lqi_space = -1;
+ rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* block accounting */
+ qi->lqi_is_blk = true;
+
+ /* more blocks for the new id ... */
+ qi->lqi_id.qid_uid = new_id;
+ qi->lqi_space = bspace;
+ /*
+ * Credits for the new uid has been reserved, re-use "obj"
+ * to save credit reservation.
+ */
+ rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and finally less blocks for the current uid */
+ qi->lqi_id.qid_uid = old_id;
+ qi->lqi_space = -bspace;
+ rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+
+ RETURN(rc);
+}
+
static int osd_declare_attr_set(const struct lu_env *env,
struct dt_object *dt,
const struct lu_attr *attr,
{
struct osd_thandle *oh;
struct osd_object *obj;
- struct osd_thread_info *info = osd_oti_get(env);
- struct lquota_id_info *qi = &info->oti_qi;
qid_t uid;
qid_t gid;
long long bspace;
if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
/* USERQUOTA */
uid = i_uid_read(obj->oo_inode);
- qi->lqi_type = USRQUOTA;
enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
- /* inode accounting */
- qi->lqi_is_blk = false;
-
- /* one more inode for the new uid ... */
- qi->lqi_id.qid_uid = attr->la_uid;
- qi->lqi_space = 1;
- /* Reserve credits for the new uid */
- rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
- if (rc)
- RETURN(rc);
-
- /* and one less inode for the current uid */
- qi->lqi_id.qid_uid = uid;
- qi->lqi_space = -1;
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
+ rc = osd_declare_attr_qid(env, obj, oh, bspace, uid,
+ attr->la_uid, enforce, USRQUOTA);
if (rc)
RETURN(rc);
- /* block accounting */
- qi->lqi_is_blk = true;
-
- /* more blocks for the new uid ... */
- qi->lqi_id.qid_uid = attr->la_uid;
- qi->lqi_space = bspace;
- /*
- * Credits for the new uid has been reserved, re-use "obj"
- * to save credit reservation.
- */
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
- if (rc)
- RETURN(rc);
-
- /* and finally less blocks for the current uid */
- qi->lqi_id.qid_uid = uid;
- qi->lqi_space = -bspace;
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
- if (rc)
- RETURN(rc);
-
- /* GROUP QUOTA */
gid = i_gid_read(obj->oo_inode);
- qi->lqi_type = GRPQUOTA;
enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
-
- /* inode accounting */
- qi->lqi_is_blk = false;
-
- /* one more inode for the new gid ... */
- qi->lqi_id.qid_gid = attr->la_gid;
- qi->lqi_space = 1;
- rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
+ rc = osd_declare_attr_qid(env, obj, oh, bspace,
+ i_gid_read(obj->oo_inode),
+ attr->la_gid, enforce, GRPQUOTA);
if (rc)
RETURN(rc);
- /* and one less inode for the current gid */
- qi->lqi_id.qid_gid = gid;
- qi->lqi_space = -1;
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
- if (rc)
- RETURN(rc);
-
- /* block accounting */
- qi->lqi_is_blk = true;
-
- /* more blocks for the new gid ... */
- qi->lqi_id.qid_gid = attr->la_gid;
- qi->lqi_space = bspace;
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
- if (rc)
- RETURN(rc);
-
- /* and finally less blocks for the current gid */
- qi->lqi_id.qid_gid = gid;
- qi->lqi_space = -bspace;
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
+ }
+#ifdef HAVE_PROJECT_QUOTA
+ if (attr->la_valid & LA_PROJID) {
+ __u32 projid = i_projid_read(obj->oo_inode);
+ enforce = (attr->la_valid & LA_PROJID) &&
+ (attr->la_projid != projid);
+ rc = osd_declare_attr_qid(env, obj, oh, bspace,
+ (qid_t)projid, (qid_t)attr->la_projid,
+ enforce, PRJQUOTA);
if (rc)
RETURN(rc);
}
-
+#endif
RETURN(rc);
}
i_uid_write(inode, attr->la_uid);
if (bits & LA_GID)
i_gid_write(inode, attr->la_gid);
+ if (bits & LA_PROJID)
+ i_projid_write(inode, attr->la_projid);
if (bits & LA_NLINK)
set_nlink(inode, attr->la_nlink);
if (bits & LA_RDEV)
/* always keep S_NOCMTIME */
inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
S_NOCMTIME;
+ /*
+ * Ext4 did not transfer inherit flags from
+ * @inode->i_flags to raw inode i_flags when writing
+ * flags, we do it explictly here.
+ */
+ if (attr->la_flags & LUSTRE_PROJINHERIT_FL)
+ LDISKFS_I(inode)->i_flags |= LUSTRE_PROJINHERIT_FL;
}
return 0;
}
static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
{
+ int rc;
+
if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
(attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
struct iattr iattr;
- int rc;
ll_vfs_dq_init(inode);
iattr.ia_valid = 0;
return rc;
}
}
+
+#ifdef HAVE_PROJECT_QUOTA
+ /* Handle project id transfer here properly */
+ if (attr->la_valid & LA_PROJID &&
+ attr->la_projid != i_projid_read(inode)) {
+ rc = __ldiskfs_ioctl_setproject(inode, attr->la_projid);
+ if (rc) {
+ CERROR("%s: quota transfer failed: rc = %d. Is quota "
+ "enforcement enabled on the ldiskfs "
+ "filesystem?\n", inode->i_sb->s_id, rc);
+ return rc;
+ }
+ }
+#endif
return 0;
}
/* Let's check if there are extra flags need to be set into LMA */
if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
struct osd_thread_info *info = osd_oti_get(env);
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
- rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
- if (rc != 0)
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
+ &info->oti_ost_attrs);
+ if (rc)
GOTO(out, rc);
lma->lma_incompat |=
}
/**
- * Helper function for osd_object_create()
+ * Helper function for osd_create()
*
* \retval 0, on success
*/
-static int __osd_object_create(struct osd_thread_info *info,
- struct osd_object *obj, struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *th)
+static int __osd_create(struct osd_thread_info *info, struct osd_object *obj,
+ struct lu_attr *attr, struct dt_allocation_hint *hint,
+ struct dt_object_format *dof, struct thandle *th)
{
int result;
__u32 umask;
}
/**
- * Helper function for osd_object_create()
+ * Helper function for osd_create()
*
* \retval 0, on success
*/
return fld_local_lookup(env, ss->ss_server_fld, seq, range);
}
-/*
- * Concurrency: no external locking is necessary.
- */
-static int osd_declare_object_create(const struct lu_env *env,
- struct dt_object *dt,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *handle)
+static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *handle)
{
- struct osd_thandle *oh;
- int rc;
+ struct osd_thandle *oh;
+ int rc;
ENTRY;
LASSERT(handle != NULL);
if (!attr)
RETURN(0);
- rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
- osd_dt_obj(dt), false, NULL, false);
+ rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid,
+ attr->la_projid, 1, oh, osd_dt_obj(dt),
+ NULL, OSD_QID_INODE);
if (rc != 0)
RETURN(rc);
RETURN(rc);
}
-static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof, struct thandle *th)
-{
- const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *info = osd_oti_get(env);
- int result;
- ENTRY;
-
- if (dt_object_exists(dt))
- return -EEXIST;
-
- LINVRNT(osd_invariant(obj));
- LASSERT(!dt_object_remote(dt));
- LASSERT(osd_write_locked(env, obj));
- LASSERT(th != NULL);
-
- if (unlikely(fid_is_acct(fid)))
- /* Quota files can't be created from the kernel any more,
- * 'tune2fs -O quota' will take care of creating them */
- RETURN(-EPERM);
-
- result = __osd_object_create(info, obj, attr, hint, dof, th);
- if (result == 0) {
- result = __osd_oi_insert(env, obj, fid, th);
- if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
- obj->oo_dt.do_body_ops = &osd_body_ops;
- }
- LASSERT(ergo(result == 0,
- dt_object_exists(dt) && !dt_object_remote(dt)));
-
- LASSERT(osd_invariant(obj));
- RETURN(result);
-}
-
/**
* Called to destroy on-disk representation of the object
*
* Concurrency: must be locked
*/
-static int osd_declare_object_destroy(const struct lu_env *env,
- struct dt_object *dt,
- struct thandle *th)
+static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
/* one less inode */
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- -1, oh, obj, false, NULL, false);
+ i_projid_read(inode), -1, oh, obj, NULL,
+ OSD_QID_INODE);
if (rc)
RETURN(rc);
/* data to be truncated */
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- 0, oh, obj, true, NULL, false);
+ i_projid_read(inode), 0, oh, obj, NULL,
+ OSD_QID_BLK);
if (rc)
RETURN(rc);
RETURN(rc);
}
-static int osd_object_destroy(const struct lu_env *env,
- struct dt_object *dt,
- struct thandle *th)
+static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *th)
{
- const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_device *osd = osd_obj2dev(obj);
- struct osd_thandle *oh;
- int result;
- ENTRY;
+ const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_thandle *oh;
+ int result;
+ ENTRY;
- oh = container_of0(th, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle);
- LASSERT(inode);
- LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle);
+ LASSERT(inode);
+ LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
if (unlikely(fid_is_acct(fid)))
RETURN(-EPERM);
int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
const struct lu_fid *fid, __u32 compat, __u32 incompat)
{
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
- int rc;
+ struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
+ struct lustre_mdt_attrs *lma = &loa->loa_lma;
+ int rc;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_OST_EA_FID_SET))
rc = -ENOMEM;
- lustre_lma_init(lma, fid, compat, incompat);
- lustre_lma_swab(lma);
-
- rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
- XATTR_CREATE);
+ lustre_loa_init(loa, fid, compat, incompat);
+ lustre_loa_swab(loa, false);
+
+ /* For the OST device with 256 bytes inode size by default,
+ * the PFID EA will be stored together with LMA EA to avoid
+ * performance trouble. Otherwise the PFID EA can be stored
+ * independently. LU-8998 */
+ if ((compat & LMAC_FID_ON_OST) &&
+ LDISKFS_INODE_SIZE(inode->i_sb) <= 256)
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa,
+ sizeof(*loa), XATTR_CREATE);
+ else
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
+ sizeof(*lma), XATTR_CREATE);
/* LMA may already exist, but we need to check that all the
* desired compat/incompat flags have been added. */
if (unlikely(rc == -EEXIST)) {
- if (compat == 0 && incompat == 0)
- RETURN(0);
-
rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
- XATTR_NAME_LMA, info->oti_mdt_attrs_old,
- LMA_OLD_SIZE);
- if (rc <= 0)
+ XATTR_NAME_LMA, (void *)loa, sizeof(*loa));
+ if (rc < 0)
+ RETURN(rc);
+
+ if (rc < sizeof(*lma))
RETURN(-EINVAL);
- lustre_lma_swab(lma);
- if (!(~lma->lma_compat & compat) &&
- !(~lma->lma_incompat & incompat))
+ lustre_loa_swab(loa, true);
+ if (lu_fid_eq(fid, &lma->lma_self_fid) &&
+ ((compat == 0 && incompat == 0) ||
+ (!(~lma->lma_compat & compat) &&
+ !(~lma->lma_incompat & incompat))))
RETURN(0);
+ lma->lma_self_fid = *fid;
lma->lma_compat |= compat;
lma->lma_incompat |= incompat;
- lustre_lma_swab(lma);
- rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
- sizeof(*lma), XATTR_REPLACE);
+ if (rc == sizeof(*lma)) {
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
+ sizeof(*lma), XATTR_REPLACE);
+ } else {
+ lustre_loa_swab(loa, false);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa,
+ sizeof(*loa), XATTR_REPLACE);
+ }
}
RETURN(rc);
ldiskfs_set_inode_state(local, LDISKFS_STATE_LUSTRE_NOSCRUB);
unlock_new_inode(local);
+ /* Agent inode should not have project ID */
+#ifdef HAVE_PROJECT_QUOTA
+ if (LDISKFS_I(pobj->oo_inode)->i_flags & LUSTRE_PROJINHERIT_FL) {
+ rc = __ldiskfs_ioctl_setproject(local, 0);
+ if (rc) {
+ CERROR("%s: quota transfer failed: rc = %d. Is project "
+ "quota enforcement enabled on the ldiskfs "
+ "filesystem?\n", local->i_sb->s_id, rc);
+ RETURN(ERR_PTR(rc));
+ }
+ }
+#endif
/* Set special LMA flag for local agent inode */
rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
if (rc != 0) {
}
/**
- * OSD layer object create function for interoperability mode (b11826).
- * This is mostly similar to osd_object_create(). Only difference being, fid is
- * inserted into inode ea here.
+ * OSD layer object create function for OST objects (b=11826).
+ *
+ * The FID is inserted into inode xattr here.
*
* \retval 0, on success
* \retval -ve, on error
*/
-static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *th)
+static int osd_create(const struct lu_env *env, struct dt_object *dt,
+ struct lu_attr *attr, struct dt_allocation_hint *hint,
+ struct dt_object_format *dof, struct thandle *th)
{
const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
struct osd_object *obj = osd_dt_obj(dt);
if (dt_object_exists(dt))
RETURN(-EEXIST);
- LASSERT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
LASSERT(!dt_object_remote(dt));
- LASSERT(osd_write_locked(env, obj));
+ LASSERT(osd_is_write_locked(env, obj));
LASSERT(th != NULL);
if (unlikely(fid_is_acct(fid)))
* 'tune2fs -O quota' will take care of creating them */
RETURN(-EPERM);
- result = __osd_object_create(info, obj, attr, hint, dof, th);
+ result = __osd_create(info, obj, attr, hint, dof, th);
if (result == 0) {
if (fid_is_idif(fid) &&
!osd_dev(dt->do_lu.lo_dev)->od_index_in_idif) {
LASSERT(ergo(result == 0,
dt_object_exists(dt) && !dt_object_remote(dt)));
- LINVRNT(osd_invariant(obj));
- RETURN(result);
+ LINVRNT(osd_invariant(obj));
+ RETURN(result);
}
-static int osd_declare_object_ref_add(const struct lu_env *env,
- struct dt_object *dt,
- struct thandle *handle)
+static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *handle)
{
- struct osd_thandle *oh;
+ struct osd_thandle *oh;
- /* it's possible that object doesn't exist yet */
- LASSERT(handle != NULL);
+ /* it's possible that object doesn't exist yet */
+ LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
/*
* Concurrency: @dt is write locked.
*/
-static int osd_object_ref_add(const struct lu_env *env,
- struct dt_object *dt, struct thandle *th)
+static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
LINVRNT(osd_invariant(obj));
LASSERT(!dt_object_remote(dt));
- LASSERT(osd_write_locked(env, obj));
+ LASSERT(osd_is_write_locked(env, obj));
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
return rc;
}
-static int osd_declare_object_ref_del(const struct lu_env *env,
- struct dt_object *dt,
- struct thandle *handle)
+static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *handle)
{
struct osd_thandle *oh;
/*
* Concurrency: @dt is write locked.
*/
-static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
- struct thandle *th)
+static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
LINVRNT(osd_invariant(obj));
LASSERT(!dt_object_remote(dt));
- LASSERT(osd_write_locked(env, obj));
+ LASSERT(osd_is_write_locked(env, obj));
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
}
/*
- * Get the 64-bit version for an inode.
- */
-static int osd_object_version_get(const struct lu_env *env,
- struct dt_object *dt, dt_obj_version_t *ver)
-{
- struct inode *inode = osd_dt_obj(dt)->oo_inode;
-
- CDEBUG(D_INODE, "Get version %#llx for inode %lu\n",
- LDISKFS_I(inode)->i_fs_version, inode->i_ino);
- *ver = LDISKFS_I(inode)->i_fs_version;
- return 0;
-}
-
-/*
* Concurrency: @dt is read locked.
*/
static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
bool cache_xattr = false;
int rc;
+ LASSERT(buf);
+
/* version get is not real XATTR but uses xattr API */
if (strcmp(name, XATTR_NAME_VERSION) == 0) {
+ dt_obj_version_t *ver = buf->lb_buf;
+
/* for version we are just using xattr API but change inode
* field instead */
if (buf->lb_len == 0)
if (buf->lb_len < sizeof(dt_obj_version_t))
return -ERANGE;
- osd_object_version_get(env, dt, buf->lb_buf);
+ CDEBUG(D_INODE, "Get version %#llx for inode %lu\n",
+ LDISKFS_I(inode)->i_fs_version, inode->i_ino);
+
+ *ver = LDISKFS_I(inode)->i_fs_version;
return sizeof(dt_obj_version_t);
}
}
rc = __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
+ if (rc == -ENODATA && strcmp(name, XATTR_NAME_FID) == 0) {
+ struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
+ struct lustre_mdt_attrs *lma = &loa->loa_lma;
+ struct filter_fid *ff;
+ struct ost_layout *ol;
+
+ LASSERT(osd_dev(dt->do_lu.lo_dev)->od_is_ost);
+
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
+ if (rc)
+ return rc;
+
+ if (!(lma->lma_compat & LMAC_STRIPE_INFO)) {
+ rc = -ENODATA;
+ goto cache;
+ }
+
+ rc = sizeof(*ff);
+ if (buf->lb_len == 0 || !buf->lb_buf)
+ return rc;
+
+ if (buf->lb_len < rc)
+ return -ERANGE;
+
+ ff = buf->lb_buf;
+ ol = &ff->ff_layout;
+ ol->ol_stripe_count = cpu_to_le32(loa->loa_parent_fid.f_ver >>
+ PFID_STRIPE_IDX_BITS);
+ ol->ol_stripe_size = cpu_to_le32(loa->loa_stripe_size);
+ loa->loa_parent_fid.f_ver &= PFID_STRIPE_COUNT_MASK;
+ fid_cpu_to_le(&ff->ff_parent, &loa->loa_parent_fid);
+ if (lma->lma_compat & LMAC_COMP_INFO) {
+ ol->ol_comp_start = cpu_to_le64(loa->loa_comp_start);
+ ol->ol_comp_end = cpu_to_le64(loa->loa_comp_end);
+ ol->ol_comp_id = cpu_to_le32(loa->loa_comp_id);
+ } else {
+ ol->ol_comp_start = 0;
+ ol->ol_comp_end = 0;
+ ol->ol_comp_id = 0;
+ }
+ }
+
+cache:
if (cache_xattr) {
if (rc == -ENOENT || rc == -ENODATA)
osd_oxc_add(obj, name, NULL, 0);
int fl, struct thandle *handle)
{
struct osd_thandle *oh;
- int credits;
+ int credits = 0;
struct super_block *sb = osd_sb(osd_dev(dt->do_lu.lo_dev));
LASSERT(handle != NULL);
/* For non-upgrading case, the LMA is set first and
* usually fit inode. But for upgrade case, the LMA
* may be in another separated EA block. */
- if (!dt_object_exists(dt))
- credits = 0;
- else if (fl == LU_XATTR_REPLACE)
- credits = 1;
- else
- goto upgrade;
+ if (dt_object_exists(dt)) {
+ if (fl == LU_XATTR_REPLACE)
+ credits = 1;
+ else
+ goto upgrade;
+ }
} else if (strcmp(name, XATTR_NAME_VERSION) == 0) {
credits = 1;
+ } else if (strcmp(name, XATTR_NAME_FID) == 0) {
+ /* We may need to delete the old PFID EA. */
+ credits = LDISKFS_MAXQUOTAS_DEL_BLOCKS(sb);
+ if (fl == LU_XATTR_REPLACE)
+ credits += 1;
+ else
+ goto upgrade;
} else {
+
upgrade:
- credits = osd_dto_credits_noquota[DTO_XATTR_SET];
+ credits += osd_dto_credits_noquota[DTO_XATTR_SET];
if (buf != NULL) {
ssize_t buflen;
}
/*
- * Set the 64-bit version for object
- */
-static void osd_object_version_set(const struct lu_env *env,
- struct dt_object *dt,
- dt_obj_version_t *new_version)
-{
- struct inode *inode = osd_dt_obj(dt)->oo_inode;
-
- CDEBUG(D_INODE, "Set version %#llx (old %#llx) for inode %lu\n",
- *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
-
- LDISKFS_I(inode)->i_fs_version = *new_version;
- /** Version is set after all inode operations are finished,
- * so we should mark it dirty here */
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
-}
-
-/*
* Concurrency: @dt is write locked.
*/
static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
const struct lu_buf *buf, const char *name, int fl,
struct thandle *handle)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = osd_oti_get(env);
- int fs_flags = 0;
- int rc;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
+ struct lustre_mdt_attrs *lma = &loa->loa_lma;
+ int fs_flags = 0;
+ int len;
+ int rc;
ENTRY;
- LASSERT(handle != NULL);
+ LASSERT(handle);
+ LASSERT(buf);
/* version set is not real XATTR */
if (strcmp(name, XATTR_NAME_VERSION) == 0) {
+ dt_obj_version_t *version = buf->lb_buf;
+
/* for version we are just using xattr API but change inode
* field instead */
LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
- osd_object_version_set(env, dt, buf->lb_buf);
- return sizeof(dt_obj_version_t);
+
+ CDEBUG(D_INODE, "Set version %#llx (old %#llx) for inode %lu\n",
+ *version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
+
+ LDISKFS_I(inode)->i_fs_version = *version;
+ /* Version is set after all inode operations are finished,
+ * so we should mark it dirty here */
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+
+ RETURN(0);
}
CDEBUG(D_INODE, DFID" set xattr '%s' with size %zu\n",
PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
+ len = buf->lb_len;
osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
if (fl & LU_XATTR_REPLACE)
fs_flags |= XATTR_REPLACE;
if (fl & LU_XATTR_CREATE)
fs_flags |= XATTR_CREATE;
- if (strcmp(name, XATTR_NAME_LMV) == 0) {
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
- int rc;
+ /* For the OST device with 256 bytes inode size by default,
+ * the PFID EA will be stored together with LMA EA to avoid
+ * performance trouble. Otherwise the PFID EA can be stored
+ * independently. LU-8998 */
+ if (strcmp(name, XATTR_NAME_FID) == 0 &&
+ LDISKFS_INODE_SIZE(inode->i_sb) <= 256) {
+ struct dentry *dentry = &info->oti_obj_dentry;
+ struct filter_fid *ff;
+ struct ost_layout *ol;
+ int fl;
+
+ LASSERT(osd_dev(dt->do_lu.lo_dev)->od_is_ost);
+
+ ff = buf->lb_buf;
+ ol = &ff->ff_layout;
+ /* Old client does not send stripe information, store
+ * the PFID EA on disk directly. */
+ if (buf->lb_len == sizeof(struct lu_fid) ||
+ ol->ol_stripe_size == 0) {
+ len = sizeof(struct lu_fid);
+ goto set;
+ }
- rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
- if (rc != 0)
+ if (buf->lb_len != sizeof(*ff))
+ RETURN(-EINVAL);
+
+ rc = osd_get_lma(info, inode, dentry, loa);
+ if (unlikely(rc == -ENODATA)) {
+ /* Usually for upgarding from old device */
+ lustre_loa_init(loa, lu_object_fid(&dt->do_lu),
+ LMAC_FID_ON_OST, 0);
+ fl = XATTR_CREATE;
+ } else if (rc) {
+ RETURN(rc);
+ } else {
+ fl = XATTR_REPLACE;
+ }
+
+ fid_le_to_cpu(&loa->loa_parent_fid, &ff->ff_parent);
+ loa->loa_parent_fid.f_ver |= le32_to_cpu(ol->ol_stripe_count) <<
+ PFID_STRIPE_IDX_BITS;
+ loa->loa_stripe_size = le32_to_cpu(ol->ol_stripe_size);
+ lma->lma_compat |= LMAC_STRIPE_INFO;
+ if (ol->ol_comp_id != 0) {
+ loa->loa_comp_id = le32_to_cpu(ol->ol_comp_id);
+ loa->loa_comp_start = le64_to_cpu(ol->ol_comp_start);
+ loa->loa_comp_end = le64_to_cpu(ol->ol_comp_end);
+ lma->lma_compat |= LMAC_COMP_INFO;
+ }
+
+ lustre_loa_swab(loa, false);
+
+ /* Remove old PFID EA entry firstly. */
+ ll_vfs_dq_init(inode);
+ rc = inode->i_op->removexattr(dentry, name);
+ if (rc && rc != -ENODATA)
+ RETURN(rc);
+
+ /* Store the PFID EA inside the LMA EA. */
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, loa,
+ sizeof(*loa), fl);
+
+ RETURN(rc);
+ } else if (strcmp(name, XATTR_NAME_LMV) == 0) {
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
+ if (rc)
RETURN(rc);
lma->lma_incompat |= LMAI_STRIPED;
RETURN(rc);
}
- if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LINKEA_OVERFLOW) &&
- strcmp(name, XATTR_NAME_LINK) == 0)
- return -ENOSPC;
-
- rc = __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
- fs_flags);
+set:
+ rc = __osd_xattr_set(info, inode, name, buf->lb_buf, len, fs_flags);
osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
if (rc == 0 &&
dentry->d_inode = inode;
dentry->d_sb = inode->i_sb;
rc = inode->i_op->removexattr(dentry, name);
+ if (rc == -ENODATA && strcmp(name, XATTR_NAME_FID) == 0) {
+ struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
+
+ LASSERT(osd_dev(dt->do_lu.lo_dev)->od_is_ost);
+
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
+ &info->oti_ost_attrs);
+ if (!rc) {
+ if (!(lma->lma_compat & LMAC_STRIPE_INFO)) {
+ rc = -ENODATA;
+ goto out;
+ }
+
+ lma->lma_compat &= ~(LMAC_STRIPE_INFO | LMAC_COMP_INFO);
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
+ sizeof(*lma), XATTR_REPLACE);
+ }
+ }
+
+out:
osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
if (rc == 0 &&
}
LINVRNT(osd_invariant(obj));
- if (result == 0 && feat == &dt_quota_glb_features &&
- fid_seq(lu_object_fid(&dt->do_lu)) == FID_SEQ_QUOTA_GLB)
- result = osd_quota_migration(env, dt);
-
return result;
}
}
static const struct dt_object_operations osd_obj_ops = {
- .do_read_lock = osd_object_read_lock,
- .do_write_lock = osd_object_write_lock,
- .do_read_unlock = osd_object_read_unlock,
- .do_write_unlock = osd_object_write_unlock,
- .do_write_locked = osd_object_write_locked,
- .do_attr_get = osd_attr_get,
- .do_declare_attr_set = osd_declare_attr_set,
- .do_attr_set = osd_attr_set,
- .do_ah_init = osd_ah_init,
- .do_declare_create = osd_declare_object_create,
- .do_create = osd_object_create,
- .do_declare_destroy = osd_declare_object_destroy,
- .do_destroy = osd_object_destroy,
- .do_index_try = osd_index_try,
- .do_declare_ref_add = osd_declare_object_ref_add,
- .do_ref_add = osd_object_ref_add,
- .do_declare_ref_del = osd_declare_object_ref_del,
- .do_ref_del = osd_object_ref_del,
- .do_xattr_get = osd_xattr_get,
- .do_declare_xattr_set = osd_declare_xattr_set,
- .do_xattr_set = osd_xattr_set,
- .do_declare_xattr_del = osd_declare_xattr_del,
- .do_xattr_del = osd_xattr_del,
- .do_xattr_list = osd_xattr_list,
- .do_object_sync = osd_object_sync,
- .do_invalidate = osd_invalidate,
-};
-
-/**
- * dt_object_operations for interoperability mode
- * (i.e. to run 2.0 mds on 1.8 disk) (b11826)
- */
-static const struct dt_object_operations osd_obj_ea_ops = {
- .do_read_lock = osd_object_read_lock,
- .do_write_lock = osd_object_write_lock,
- .do_read_unlock = osd_object_read_unlock,
- .do_write_unlock = osd_object_write_unlock,
- .do_write_locked = osd_object_write_locked,
- .do_attr_get = osd_attr_get,
- .do_declare_attr_set = osd_declare_attr_set,
- .do_attr_set = osd_attr_set,
- .do_ah_init = osd_ah_init,
- .do_declare_create = osd_declare_object_create,
- .do_create = osd_object_ea_create,
- .do_declare_destroy = osd_declare_object_destroy,
- .do_destroy = osd_object_destroy,
- .do_index_try = osd_index_try,
- .do_declare_ref_add = osd_declare_object_ref_add,
- .do_ref_add = osd_object_ref_add,
- .do_declare_ref_del = osd_declare_object_ref_del,
- .do_ref_del = osd_object_ref_del,
- .do_xattr_get = osd_xattr_get,
- .do_declare_xattr_set = osd_declare_xattr_set,
- .do_xattr_set = osd_xattr_set,
- .do_declare_xattr_del = osd_declare_xattr_del,
- .do_xattr_del = osd_xattr_del,
- .do_xattr_list = osd_xattr_list,
- .do_object_sync = osd_object_sync,
- .do_invalidate = osd_invalidate,
+ .do_read_lock = osd_read_lock,
+ .do_write_lock = osd_write_lock,
+ .do_read_unlock = osd_read_unlock,
+ .do_write_unlock = osd_write_unlock,
+ .do_write_locked = osd_write_locked,
+ .do_attr_get = osd_attr_get,
+ .do_declare_attr_set = osd_declare_attr_set,
+ .do_attr_set = osd_attr_set,
+ .do_ah_init = osd_ah_init,
+ .do_declare_create = osd_declare_create,
+ .do_create = osd_create,
+ .do_declare_destroy = osd_declare_destroy,
+ .do_destroy = osd_destroy,
+ .do_index_try = osd_index_try,
+ .do_declare_ref_add = osd_declare_ref_add,
+ .do_ref_add = osd_ref_add,
+ .do_declare_ref_del = osd_declare_ref_del,
+ .do_ref_del = osd_ref_del,
+ .do_xattr_get = osd_xattr_get,
+ .do_declare_xattr_set = osd_declare_xattr_set,
+ .do_xattr_set = osd_xattr_set,
+ .do_declare_xattr_del = osd_declare_xattr_del,
+ .do_xattr_del = osd_xattr_del,
+ .do_xattr_list = osd_xattr_list,
+ .do_object_sync = osd_object_sync,
+ .do_invalidate = osd_invalidate,
};
static const struct dt_object_operations osd_obj_otable_it_ops = {
{
struct osd_thandle *oh;
struct inode *inode;
- int rc;
+ int rc, credits;
ENTRY;
LASSERT(!dt_object_remote(dt));
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- /* due to DNE we may need to remove an agent inode */
- osd_trans_declare_op(env, oh, OSD_OT_DELETE,
- osd_dto_credits_noquota[DTO_INDEX_DELETE] +
- osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
+ credits = osd_dto_credits_noquota[DTO_INDEX_DELETE];
+ if (key != NULL && unlikely(strcmp((char *)key, dotdot) == 0)) {
+ /* '..' to a remote object has a local representative */
+ credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
+ /* to reset LMAI_REMOTE_PARENT */
+ credits += 1;
+ }
+ osd_trans_declare_op(env, oh, OSD_OT_DELETE, credits);
inode = osd_dt_obj(dt)->oo_inode;
if (inode == NULL)
RETURN(-ENOENT);
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- 0, oh, osd_dt_obj(dt), true, NULL, false);
+ i_projid_read(inode), 0, oh, osd_dt_obj(dt),
+ NULL, OSD_QID_BLK);
RETURN(rc);
}
if (gen != OSD_OII_NOGEN)
goto trigger;
- iput(inode);
/* The inode may has been reused by others, we do not know,
* leave it to be handled by subsequent osd_fid_lookup(). */
- RETURN(0);
- } else if (rc != 0 || osd_id_eq(id, &oti->oti_id)) {
- RETURN(rc);
- } else {
- insert = false;
+ GOTO(out, rc = 0);
+ } else if (rc || osd_id_eq(id, &oti->oti_id)) {
+ GOTO(out, rc);
}
+ insert = false;
+
trigger:
if (thread_is_running(&scrub->os_thread)) {
if (inode == NULL) {
else
rc = osd_check_lmv(oti, dev, inode, oic);
- iput(inode);
- RETURN(rc);
+ GOTO(out, rc);
}
if (!dev->od_noscrub && ++once == 1) {
rc = osd_scrub_start(dev, SS_AUTO_PARTIAL | SS_CLEAR_DRYRUN |
SS_CLEAR_FAILOUT);
CDEBUG(D_LFSCK | D_CONSOLE | D_WARNING,
- "%.16s: trigger partial OI scrub for RPC inconsistency "
+ "%s: trigger partial OI scrub for RPC inconsistency "
"checking FID "DFID": rc = %d\n",
- LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
- PFID(fid), rc);
+ osd_dev2name(dev), PFID(fid), rc);
if (rc == 0 || rc == -EALREADY)
goto again;
}
- if (inode != NULL)
+ GOTO(out, rc);
+
+out:
+ if (inode)
iput(inode);
RETURN(rc);
struct osd_idmap_cache *oic,
struct lu_fid *fid, __u32 ino)
{
- struct lustre_mdt_attrs *lma = &oti->oti_mdt_attrs;
- struct inode *inode;
- int rc;
+ struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
+ struct inode *inode;
+ int rc;
osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
inode = osd_iget(oti, dev, &oic->oic_lid);
return PTR_ERR(inode);
}
- rc = osd_get_lma(oti, inode, &oti->oti_obj_dentry, lma);
+ rc = osd_get_lma(oti, inode, &oti->oti_obj_dentry, loa);
iput(inode);
if (rc != 0)
fid_zero(&oic->oic_fid);
else
- *fid = oic->oic_fid = lma->lma_self_fid;
+ *fid = oic->oic_fid = loa->loa_lma.lma_self_fid;
return rc;
}
}
ldata.ld_buf = buf;
- rc = linkea_init(&ldata);
- if (rc == 0) {
+ rc = linkea_init_with_rec(&ldata);
+ if (!rc) {
linkea_first_entry(&ldata);
linkea_entry_unpack(ldata.ld_lee, &ldata.ld_reclen, NULL, fid);
}
}
ldata.ld_buf = buf;
- rc = linkea_init(&ldata);
- if (rc == 0)
+ rc = linkea_init_with_rec(&ldata);
+ if (!rc)
rc = linkea_links_find(&ldata, &cname, pfid);
RETURN(rc);
return rc;
}
-/**
- * Put the osd object once done with it.
- *
- * \param obj osd object that needs to be put
- */
-static inline void osd_object_put(const struct lu_env *env,
- struct osd_object *obj)
-{
- lu_object_put(env, &obj->oo_dt.do_lu);
-}
-
static int osd_index_declare_ea_insert(const struct lu_env *env,
struct dt_object *dt,
const struct dt_rec *rec,
* calculate how many blocks will be consumed by this index
* insert */
rc = osd_declare_inode_qid(env, i_uid_read(inode),
- i_gid_read(inode), 0, oh,
- osd_dt_obj(dt), true, NULL, false);
+ i_gid_read(inode),
+ i_projid_read(inode), 0,
+ oh, osd_dt_obj(dt), NULL,
+ OSD_QID_BLK);
}
RETURN(rc);
/**
* free given Iterator.
*/
-
static void osd_it_iam_fini(const struct lu_env *env, struct dt_it *di)
{
struct osd_it_iam *it = (struct osd_it_iam *)di;
iam_it_fini(&it->oi_it);
osd_ipd_put(env, &obj->oo_dir->od_container, it->oi_ipd);
- lu_object_put(env, &obj->oo_dt.do_lu);
+ osd_object_put(env, obj);
OBD_FREE_PTR(it);
}
ENTRY;
oie->oie_file.f_op->release(inode, &oie->oie_file);
- lu_object_put(env, &obj->oo_dt.do_lu);
+ osd_object_put(env, obj);
if (unlikely(oie->oie_buf != info->oti_it_ea_buf))
OBD_FREE(oie->oie_buf, OSD_IT_EA_BUFSIZE);
else
/* It is too bad, we cannot reinsert the name entry back.
* That means we lose it! */
if (rc != 0)
- CDEBUG(D_LFSCK, "%.16s: fail to reinsert the dirent, "
+ CDEBUG(D_LFSCK, "%s: fail to reinsert the dirent, "
"dir = %lu/%u, name = %.*s, "DFID": rc = %d\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ osd_ino2name(inode),
dir->i_ino, dir->i_generation, namelen,
dentry->d_name.name, PFID(fid), rc);
struct osd_inode_id *id, __u32 *attr)
{
struct osd_thread_info *info = osd_oti_get(env);
- struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
struct osd_device *dev = osd_obj2dev(obj);
struct super_block *sb = osd_sb(dev);
const char *devname = osd_name(dev);
dentry = osd_child_dentry_by_inode(env, dir, ent->oied_name,
ent->oied_namelen);
- rc = osd_get_lma(info, inode, dentry, lma);
+ rc = osd_get_lma(info, inode, dentry, &info->oti_ost_attrs);
if (rc == -ENODATA || !fid_is_sane(&lma->lma_self_fid))
lma = NULL;
else if (rc != 0)
struct osd_thread_info *info = osd_oti_get(env);
struct lu_fid *fid = &info->oti_fid;
struct inode *inode;
- int rc = 0, force_over_256tb = 0;
+ int rc = 0, force_over_512tb = 0;
ENTRY;
if (o->od_mnt != NULL)
RETURN(-EINVAL);
}
#endif
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 53, 0)
if (opts != NULL && strstr(opts, "force_over_128tb") != NULL) {
- CWARN("force_over_128tb option is depricated."
- "Filesystems less then 256TB can be created without any"
- "force options. Use force_over_256tb option for"
- "filesystems greather then 256TB.\n");
+ CWARN("force_over_128tb option is deprecated. "
+ "Filesystems less than 512TB can be created without any "
+ "force options. Use force_over_512tb option for "
+ "filesystems greater than 512TB.\n");
}
+#endif
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 1, 53, 0)
+ if (opts != NULL && strstr(opts, "force_over_256tb") != NULL) {
+ CWARN("force_over_256tb option is deprecated. "
+ "Filesystems less than 512TB can be created without any "
+ "force options. Use force_over_512tb option for "
+ "filesystems greater than 512TB.\n");
+ }
+#endif
- if (opts != NULL && strstr(opts, "force_over_256tb") != NULL)
- force_over_256tb = 1;
+ if (opts != NULL && strstr(opts, "force_over_512tb") != NULL)
+ force_over_512tb = 1;
__page = alloc_page(GFP_KERNEL);
if (__page == NULL)
"noextents",
/* strip out option we processed in osd */
"bigendian_extents",
-#if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(3,0,53,0)
-#warning "remove force_over_128 option"
-#else
- "force_over_128tb (deprecated)",
-#endif
+ "force_over_128tb",
"force_over_256tb",
+ "force_over_512tb",
NULL
};
strcat(options, opts);
GOTO(out, rc);
}
- if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) > (64ULL << 30) &&
- force_over_256tb == 0) {
+ if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) <<
+ osd_sb(o)->s_blocksize_bits > 512ULL << 40 &&
+ force_over_512tb == 0) {
CERROR("%s: device %s LDISKFS does not support filesystems "
- "greater than 256TB and can cause data corruption. "
- "Use \"force_over_256tb\" mount option to override.\n",
+ "greater than 512TB and can cause data corruption. "
+ "Use \"force_over_512tb\" mount option to override.\n",
name, dev);
- GOTO(out, rc = -EINVAL);
+ GOTO(out_mnt, rc = -EINVAL);
}
-#ifdef HAVE_DEV_SET_RDONLY
- if (dev_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
- CERROR("%s: underlying device %s is marked as read-only. "
- "Setup failed\n", name, dev);
+ if (lmd_flags & LMD_FLG_DEV_RDONLY) {
+ if (priv_dev_set_rdonly) {
+ priv_dev_set_rdonly(osd_sb(o)->s_bdev);
+ o->od_dt_dev.dd_rdonly = 1;
+ LCONSOLE_WARN("%s: set dev_rdonly on this device\n",
+ name);
+ } else {
+ LCONSOLE_WARN("%s: not support dev_rdonly on this device",
+ name);
+
+ GOTO(out_mnt, rc = -EOPNOTSUPP);
+ }
+ } else if (priv_dev_check_rdonly &&
+ priv_dev_check_rdonly(osd_sb(o)->s_bdev)) {
+ CERROR("%s: underlying device %s is marked as "
+ "read-only. Setup failed\n", name, dev);
+
GOTO(out_mnt, rc = -EROFS);
}
-#endif
if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
LASSERT(&o->od_dt_dev);
rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
cfg, &o->od_dt_dev);
- if (rc > 0 || rc == -ENOSYS)
+ if (rc > 0 || rc == -ENOSYS) {
rc = class_process_proc_param(PARAM_OST,
lprocfs_osd_obd_vars,
cfg, &o->od_dt_dev);
+ if (rc > 0)
+ rc = 0;
+ }
break;
default:
rc = -ENOSYS;
if (rc)
return rc;
+#ifdef CONFIG_KALLSYMS
+ priv_dev_set_rdonly = (void *)kallsyms_lookup_name("dev_set_rdonly");
+ priv_dev_check_rdonly = (void *)kallsyms_lookup_name("dev_check_rdonly");
+ /* Clear readonly is unused at this time */
+ /*priv_dev_clear_rdonly = (void *)kallsyms_lookup_name("dev_clear_rdonly");*/
+#endif
+
rc = class_register_type(&osd_obd_device_ops, NULL, true,
lprocfs_osd_module_vars,
LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);