#include <lustre_net.h>
#include <lustre_fid.h>
/* process_config */
-#include <lustre_param.h>
+#include <uapi/linux/lustre_param.h>
#include "osd_internal.h"
#include "osd_dynlocks.h"
static const char dot[] = ".";
static const char dotdot[] = "..";
-static const char remote_obj_dir[] = "REM_OBJ_DIR";
static const struct lu_object_operations osd_lu_obj_ops;
static const struct dt_object_operations osd_obj_ops;
-static const struct dt_object_operations osd_obj_ea_ops;
static const struct dt_object_operations osd_obj_otable_it_ops;
static const struct dt_index_operations osd_index_iam_ops;
static const struct dt_index_operations osd_index_ea_ops;
/*
* Concurrency: doesn't matter
*/
-
-/*
- * Concurrency: doesn't matter
- */
-static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
+static int osd_is_write_locked(const struct lu_env *env, struct osd_object *o)
{
struct osd_thread_info *oti = osd_oti_get(env);
return oti->oti_w_locks > 0 && o->oo_owner == env;
l = &mo->oo_dt.do_lu;
dt_object_init(&mo->oo_dt, NULL, d);
- mo->oo_dt.do_ops = &osd_obj_ea_ops;
+ mo->oo_dt.do_ops = &osd_obj_ops;
l->lo_ops = &osd_lu_obj_ops;
init_rwsem(&mo->oo_sem);
init_rwsem(&mo->oo_ext_idx_sem);
lustre_loa_swab(loa, true);
/* Check LMA compatibility */
if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
- CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
+ CWARN("%s: unsupported incompat LMA feature(s) %#x "
"for fid = "DFID", ino = %lu\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ osd_ino2name(inode),
lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
PFID(&lma->lma_self_fid), inode->i_ino);
rc = -EOPNOTSUPP;
iput(inode);
inode = ERR_PTR(-ESTALE);
} else if (is_bad_inode(inode)) {
- CWARN("%.16s: bad inode: ino = %u\n",
- LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
+ CWARN("%s: bad inode: ino = %u\n",
+ osd_dev2name(dev), id->oii_ino);
iput(inode);
inode = ERR_PTR(-ENOENT);
} else if ((rc = osd_attach_jinode(inode))) {
/* It is the OI scrub updated the OI mapping by race.
* The new OI mapping must be valid. */
- if (saved_ino != id->oii_ino || saved_gen != id->oii_gen) {
+ if (saved_ino != id->oii_ino ||
+ (saved_gen != id->oii_gen && saved_gen != OSD_OII_NOGEN)) {
+ if (!IS_ERR(inode))
+ iput(inode);
+
trusted = true;
goto again;
}
if (rc == sizeof(*ff)) {
rc = 0;
ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
- ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
- /* XXX: use 0 as the index for compatibility, the caller will
- * handle index related issues when necessarry. */
- ostid_to_fid(fid, ostid, 0);
+ rc = ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
+ /*
+ * XXX: use 0 as the index for compatibility, the caller will
+ * handle index related issues when necessary.
+ */
+ if (!rc)
+ ostid_to_fid(fid, ostid, 0);
} else if (rc == sizeof(struct filter_fid)) {
rc = 1;
} else if (rc >= 0) {
rc = 0;
lustre_lma_swab(lma);
if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
- CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
+ (CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT) &&
+ S_ISREG(inode->i_mode)))) {
CWARN("%s: unsupported incompat LMA feature(s) %#x for "
"fid = "DFID", ino = %lu\n", osd_name(osd),
lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
out:
if (rc < 0)
- CDEBUG(D_LFSCK, "%.16s: fail to check LMV EA, inode = %lu/%u,"
- DFID": rc = %d\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ CDEBUG(D_LFSCK, "%s: fail to check LMV EA, inode = %lu/%u,"
+ DFID": rc = %d\n", osd_ino2name(inode),
inode->i_ino, inode->i_generation,
PFID(&oic->oic_fid), rc);
else
* shouldn't never be re-used, if it's really a duplicate FID from
* unexpected reason, we should be able to detect it later by calling
* do_create->osd_oi_insert(). */
- if (conf != NULL && conf->loc_flags & LOC_F_NEW)
+ if (conf && conf->loc_flags & LOC_F_NEW)
GOTO(out, result = 0);
/* Search order: 1. per-thread cache. */
- if (lu_fid_eq(fid, &oic->oic_fid) &&
- likely(oic->oic_dev == dev)) {
+ if (lu_fid_eq(fid, &oic->oic_fid) && likely(oic->oic_dev == dev)) {
id = &oic->oic_lid;
goto iget;
}
if (!list_empty(&scrub->os_inconsistent_items)) {
/* Search order: 2. OI scrub pending list. */
result = osd_oii_lookup(dev, fid, id);
- if (result == 0)
+ if (!result)
goto iget;
}
goto trigger;
}
- if (result != 0)
+ if (result)
GOTO(out, result);
iget:
+ obj->oo_inode = NULL;
+ /* for later passes through checks, not true on first pass */
+ if (!IS_ERR_OR_NULL(inode))
+ iput(inode);
+
inode = osd_iget_check(info, dev, fid, id, trusted);
- if (IS_ERR(inode)) {
- result = PTR_ERR(inode);
- if (result == -ENOENT || result == -ESTALE)
- GOTO(out, result = 0);
+ if (!IS_ERR(inode)) {
+ obj->oo_inode = inode;
+ result = 0;
+ if (remote)
+ goto trigger;
- if (result == -EREMCHG) {
+ goto check_lma;
+ }
-trigger:
- /* We still have chance to get the valid inode: for the
- * object which is referenced by remote name entry, the
- * object on the local MDT will be linked under the dir
- * of "/REMOTE_PARENT_DIR" with its FID string as name.
- *
- * We do not know whether the object for the given FID
- * is referenced by some remote name entry or not, and
- * especially for DNE II, a multiple-linked object may
- * have many name entries reside on many MDTs.
- *
- * To simplify the operation, OSD will not distinguish
- * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
- * only happened for the RPC from other MDT during the
- * OI scrub, or for the client side RPC with FID only,
- * such as FID to path, or from old connected client. */
- if (!remote &&
- !fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
- rc1 = osd_lookup_in_remote_parent(info, dev,
- fid, id);
- if (rc1 == 0) {
- remote = true;
- trusted = true;
- flags |= SS_AUTO_PARTIAL;
- flags &= ~SS_AUTO_FULL;
- goto iget;
- }
- }
+ result = PTR_ERR(inode);
+ if (result == -ENOENT || result == -ESTALE)
+ GOTO(out, result = 0);
- if (thread_is_running(&scrub->os_thread)) {
- if (scrub->os_partial_scan &&
- !scrub->os_in_join) {
- goto join;
- } else {
- if (inode != NULL && !IS_ERR(inode)) {
- LASSERT(remote);
-
- osd_add_oi_cache(info, dev, id,
- fid);
- osd_oii_insert(dev, oic, true);
- } else {
- result = -EINPROGRESS;
- }
- }
- } else if (!dev->od_noscrub) {
+ if (result != -EREMCHG)
+ GOTO(out, result);
-join:
- rc1 = osd_scrub_start(dev, flags);
- LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
- "for the "DFID" with flags 0x%x,"
- " rc = %d\n", osd_name(dev),
- PFID(fid), flags, rc1);
- if (rc1 == 0 || rc1 == -EALREADY) {
- if (inode != NULL && !IS_ERR(inode)) {
- LASSERT(remote);
-
- osd_add_oi_cache(info, dev, id,
- fid);
- osd_oii_insert(dev, oic, true);
- } else {
- result = -EINPROGRESS;
- }
- } else {
- result = -EREMCHG;
- }
- } else {
- result = -EREMCHG;
- }
+trigger:
+ /* We still have chance to get the valid inode: for the
+ * object which is referenced by remote name entry, the
+ * object on the local MDT will be linked under the dir
+ * of "/REMOTE_PARENT_DIR" with its FID string as name.
+ *
+ * We do not know whether the object for the given FID
+ * is referenced by some remote name entry or not, and
+ * especially for DNE II, a multiple-linked object may
+ * have many name entries reside on many MDTs.
+ *
+ * To simplify the operation, OSD will not distinguish
+ * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
+ * only happened for the RPC from other MDT during the
+ * OI scrub, or for the client side RPC with FID only,
+ * such as FID to path, or from old connected client. */
+ if (!remote && !fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
+ rc1 = osd_lookup_in_remote_parent(info, dev, fid, id);
+ if (!rc1) {
+ remote = true;
+ trusted = true;
+ flags |= SS_AUTO_PARTIAL;
+ flags &= ~SS_AUTO_FULL;
+ goto iget;
}
+ }
- if (inode == NULL || IS_ERR(inode))
- GOTO(out, result);
- } else if (remote) {
- goto trigger;
+ if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_partial_scan && !scrub->os_in_join)
+ goto join;
+
+ if (IS_ERR_OR_NULL(inode) || result)
+ GOTO(out, result = -EINPROGRESS);
+
+ LASSERT(remote);
+ LASSERT(obj->oo_inode == inode);
+
+ osd_add_oi_cache(info, dev, id, fid);
+ osd_oii_insert(dev, oic, true);
+ goto found;
+ }
+
+ if (dev->od_noscrub) {
+ if (!remote)
+ GOTO(out, result = -EREMCHG);
+
+ LASSERT(!result);
+ LASSERT(obj->oo_inode == inode);
+
+ osd_add_oi_cache(info, dev, id, fid);
+ goto found;
}
- obj->oo_inode = inode;
- LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
+join:
+ rc1 = osd_scrub_start(dev, flags);
+ LCONSOLE_WARN("%s: trigger OI scrub by RPC for the " DFID" with flags "
+ "0x%x, rc = %d\n", osd_name(dev), PFID(fid), flags, rc1);
+ if (rc1 && rc1 != -EALREADY)
+ GOTO(out, result = -EREMCHG);
+
+ if (IS_ERR_OR_NULL(inode) || result)
+ GOTO(out, result = -EINPROGRESS);
+
+ LASSERT(remote);
+ LASSERT(obj->oo_inode == inode);
+
+ osd_add_oi_cache(info, dev, id, fid);
+ osd_oii_insert(dev, oic, true);
+ goto found;
+check_lma:
result = osd_check_lma(env, obj);
- if (result == 0)
+ if (!result)
goto found;
LASSERTF(id->oii_ino == inode->i_ino &&
goto found;
result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
- if (result == 0) {
+ if (!result) {
/* The OI mapping is still there, the inode is still
* valid. It is just becaues the inode has no LMA EA. */
if (saved_ino == id->oii_ino &&
/* It is the OI scrub updated the OI mapping by race.
* The new OI mapping must be valid. */
- iput(inode);
- inode = NULL;
- obj->oo_inode = NULL;
trusted = true;
updated = true;
goto iget;
if (result == -ENOENT) {
LASSERT(trusted);
+ obj->oo_inode = NULL;
result = 0;
}
}
- iput(inode);
- inode = NULL;
- obj->oo_inode = NULL;
-
if (result != -EREMCHG)
GOTO(out, result);
if (result == -ENOENT) {
LASSERT(trusted);
+ obj->oo_inode = NULL;
GOTO(out, result = 0);
}
- if (result != 0)
+ if (result)
GOTO(out, result);
- if (saved_ino == id->oii_ino && saved_gen == id->oii_gen)
+ if (saved_ino == id->oii_ino && saved_gen == id->oii_gen) {
+ result = -EREMCHG;
goto trigger;
+ }
/* It is the OI scrub updated the OI mapping by race.
* The new OI mapping must be valid. */
osd_check_lmv(info, dev, inode, oic);
result = osd_attach_jinode(inode);
- if (result) {
- obj->oo_inode = NULL;
- iput(inode);
+ if (result)
GOTO(out, result);
- }
if (!ldiskfs_pdo)
GOTO(out, result = 0);
- LASSERT(obj->oo_hl_head == NULL);
+ LASSERT(!obj->oo_hl_head);
obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
- if (obj->oo_hl_head == NULL) {
- obj->oo_inode = NULL;
- iput(inode);
- GOTO(out, result = -ENOMEM);
- }
- GOTO(out, result = 0);
+
+ GOTO(out, result = (!obj->oo_hl_head ? -ENOMEM : 0));
out:
- if (result != 0 && trusted)
- fid_zero(&oic->oic_fid);
+ if (result || !obj->oo_inode) {
+ if (!IS_ERR_OR_NULL(inode))
+ iput(inode);
+
+ obj->oo_inode = NULL;
+ if (trusted)
+ fid_zero(&oic->oic_fid);
+ }
LINVRNT(osd_invariant(obj));
return result;
qi->lqi_id.qid_uid = gid;
qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
+
+ qi->lqi_id.qid_uid = i_projid_read(inode);
+ qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
}
}
}
{
int rc;
- CDEBUG(D_CACHE, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
+ CDEBUG(D_CACHE, "%s: syncing OSD\n", osd_dt_dev(d)->od_svname);
rc = ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
- CDEBUG(D_CACHE, "synced OSD %s: rc = %d\n",
- LUSTRE_OSD_LDISKFS_NAME, rc);
+ CDEBUG(D_CACHE, "%s: synced OSD: rc = %d\n", osd_dt_dev(d)->od_svname,
+ rc);
return rc;
}
struct super_block *s = osd_sb(osd_dt_dev(d));
ENTRY;
- CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
+ CDEBUG(D_HA, "%s: async commit OSD\n", osd_dt_dev(d)->od_svname);
RETURN(s->s_op->sync_fs(s, 0));
}
+/* Our own copy of the set readonly functions if present, or NU if not. */
+static int (*priv_dev_set_rdonly)(struct block_device *bdev);
+static int (*priv_dev_check_rdonly)(struct block_device *bdev);
+/* static int (*priv_dev_clear_rdonly)(struct block_device *bdev); */
+
/*
* Concurrency: shouldn't matter.
*/
-
static int osd_ro(const struct lu_env *env, struct dt_device *d)
{
struct super_block *sb = osd_sb(osd_dt_dev(d));
struct block_device *dev = sb->s_bdev;
-#ifdef HAVE_DEV_SET_RDONLY
- struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
- int rc = 0;
-#else
int rc = -EOPNOTSUPP;
-#endif
ENTRY;
-#ifdef HAVE_DEV_SET_RDONLY
- CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
+ if (priv_dev_set_rdonly) {
+ struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
- if (sb->s_op->freeze_fs) {
- rc = sb->s_op->freeze_fs(sb);
- if (rc)
- goto out;
- }
+ rc = 0;
+ CERROR("*** setting %s read-only ***\n",
+ osd_dt_dev(d)->od_svname);
- if (jdev && (jdev != dev)) {
- CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
- (long)jdev);
- dev_set_rdonly(jdev);
- }
- CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
- dev_set_rdonly(dev);
+ if (sb->s_op->freeze_fs) {
+ rc = sb->s_op->freeze_fs(sb);
+ if (rc)
+ goto out;
+ }
+
+ if (jdev && (jdev != dev)) {
+ CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
+ (long)jdev);
+ priv_dev_set_rdonly(jdev);
+ }
+ CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
+ priv_dev_set_rdonly(dev);
- if (sb->s_op->unfreeze_fs)
- sb->s_op->unfreeze_fs(sb);
+ if (sb->s_op->unfreeze_fs)
+ sb->s_op->unfreeze_fs(sb);
+ }
out:
-#endif
if (rc)
CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
osd_dt_dev(d)->od_svname, (long)dev, rc);
.dt_commit_async = osd_commit_async,
};
-static void osd_object_read_lock(const struct lu_env *env,
- struct dt_object *dt, unsigned role)
+static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
+ unsigned role)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thread_info *oti = osd_oti_get(env);
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- LASSERT(obj->oo_owner != env);
+ LASSERT(obj->oo_owner != env);
down_read_nested(&obj->oo_sem, role);
- LASSERT(obj->oo_owner == NULL);
- oti->oti_r_locks++;
+ LASSERT(obj->oo_owner == NULL);
+ oti->oti_r_locks++;
}
-static void osd_object_write_lock(const struct lu_env *env,
- struct dt_object *dt, unsigned role)
+static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
+ unsigned role)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thread_info *oti = osd_oti_get(env);
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- LASSERT(obj->oo_owner != env);
+ LASSERT(obj->oo_owner != env);
down_write_nested(&obj->oo_sem, role);
- LASSERT(obj->oo_owner == NULL);
- obj->oo_owner = env;
- oti->oti_w_locks++;
+ LASSERT(obj->oo_owner == NULL);
+ obj->oo_owner = env;
+ oti->oti_w_locks++;
}
-static void osd_object_read_unlock(const struct lu_env *env,
- struct dt_object *dt)
+static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thread_info *oti = osd_oti_get(env);
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- LASSERT(oti->oti_r_locks > 0);
- oti->oti_r_locks--;
+ LASSERT(oti->oti_r_locks > 0);
+ oti->oti_r_locks--;
up_read(&obj->oo_sem);
}
-static void osd_object_write_unlock(const struct lu_env *env,
- struct dt_object *dt)
+static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thread_info *oti = osd_oti_get(env);
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- LASSERT(obj->oo_owner == env);
- LASSERT(oti->oti_w_locks > 0);
- oti->oti_w_locks--;
- obj->oo_owner = NULL;
+ LASSERT(obj->oo_owner == env);
+ LASSERT(oti->oti_w_locks > 0);
+ oti->oti_w_locks--;
+ obj->oo_owner = NULL;
up_write(&obj->oo_sem);
}
-static int osd_object_write_locked(const struct lu_env *env,
- struct dt_object *dt)
+static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
{
- struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_object *obj = osd_dt_obj(dt);
- LINVRNT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
- return obj->oo_owner == env;
+ return obj->oo_owner == env;
}
static struct timespec *osd_inode_time(const struct lu_env *env,
{
attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
- LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
- LA_TYPE;
+ LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
+ LA_BLKSIZE | LA_TYPE;
attr->la_atime = LTIME_S(inode->i_atime);
attr->la_mtime = LTIME_S(inode->i_mtime);
attr->la_blocks = inode->i_blocks;
attr->la_uid = i_uid_read(inode);
attr->la_gid = i_gid_read(inode);
+ attr->la_projid = i_projid_read(inode);
attr->la_flags = ll_inode_to_ext_flags(inode->i_flags);
attr->la_nlink = inode->i_nlink;
attr->la_rdev = inode->i_rdev;
attr->la_blksize = 1 << inode->i_blkbits;
attr->la_blkbits = inode->i_blkbits;
+ /*
+ * Ext4 did not transfer inherit flags from raw inode
+ * to inode flags, and ext4 internally test raw inode
+ * @i_flags directly. Instead of patching ext4, we do it here.
+ */
+ if (LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL)
+ attr->la_flags |= LUSTRE_PROJINHERIT_FL;
}
static int osd_attr_get(const struct lu_env *env,
return 0;
}
+static int osd_declare_attr_qid(const struct lu_env *env,
+ struct osd_object *obj,
+ struct osd_thandle *oh, long long bspace,
+ qid_t old_id, qid_t new_id, bool enforce,
+ unsigned type)
+{
+ int rc;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lquota_id_info *qi = &info->oti_qi;
+
+ qi->lqi_type = type;
+ /* inode accounting */
+ qi->lqi_is_blk = false;
+
+ /* one more inode for the new id ... */
+ qi->lqi_id.qid_uid = new_id;
+ qi->lqi_space = 1;
+ /* Reserve credits for the new id */
+ rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and one less inode for the current id */
+ qi->lqi_id.qid_uid = old_id;
+ qi->lqi_space = -1;
+ rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* block accounting */
+ qi->lqi_is_blk = true;
+
+ /* more blocks for the new id ... */
+ qi->lqi_id.qid_uid = new_id;
+ qi->lqi_space = bspace;
+ /*
+ * Credits for the new uid has been reserved, re-use "obj"
+ * to save credit reservation.
+ */
+ rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and finally less blocks for the current uid */
+ qi->lqi_id.qid_uid = old_id;
+ qi->lqi_space = -bspace;
+ rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+
+ RETURN(rc);
+}
+
static int osd_declare_attr_set(const struct lu_env *env,
struct dt_object *dt,
const struct lu_attr *attr,
{
struct osd_thandle *oh;
struct osd_object *obj;
- struct osd_thread_info *info = osd_oti_get(env);
- struct lquota_id_info *qi = &info->oti_qi;
qid_t uid;
qid_t gid;
long long bspace;
if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
/* USERQUOTA */
uid = i_uid_read(obj->oo_inode);
- qi->lqi_type = USRQUOTA;
enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
- /* inode accounting */
- qi->lqi_is_blk = false;
-
- /* one more inode for the new uid ... */
- qi->lqi_id.qid_uid = attr->la_uid;
- qi->lqi_space = 1;
- /* Reserve credits for the new uid */
- rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
- if (rc)
- RETURN(rc);
-
- /* and one less inode for the current uid */
- qi->lqi_id.qid_uid = uid;
- qi->lqi_space = -1;
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
- if (rc)
- RETURN(rc);
-
- /* block accounting */
- qi->lqi_is_blk = true;
-
- /* more blocks for the new uid ... */
- qi->lqi_id.qid_uid = attr->la_uid;
- qi->lqi_space = bspace;
- /*
- * Credits for the new uid has been reserved, re-use "obj"
- * to save credit reservation.
- */
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
+ rc = osd_declare_attr_qid(env, obj, oh, bspace, uid,
+ attr->la_uid, enforce, USRQUOTA);
if (rc)
RETURN(rc);
- /* and finally less blocks for the current uid */
- qi->lqi_id.qid_uid = uid;
- qi->lqi_space = -bspace;
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
- if (rc)
- RETURN(rc);
-
- /* GROUP QUOTA */
gid = i_gid_read(obj->oo_inode);
- qi->lqi_type = GRPQUOTA;
enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
-
- /* inode accounting */
- qi->lqi_is_blk = false;
-
- /* one more inode for the new gid ... */
- qi->lqi_id.qid_gid = attr->la_gid;
- qi->lqi_space = 1;
- rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
- if (rc)
- RETURN(rc);
-
- /* and one less inode for the current gid */
- qi->lqi_id.qid_gid = gid;
- qi->lqi_space = -1;
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
+ rc = osd_declare_attr_qid(env, obj, oh, bspace,
+ i_gid_read(obj->oo_inode),
+ attr->la_gid, enforce, GRPQUOTA);
if (rc)
RETURN(rc);
- /* block accounting */
- qi->lqi_is_blk = true;
-
- /* more blocks for the new gid ... */
- qi->lqi_id.qid_gid = attr->la_gid;
- qi->lqi_space = bspace;
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
- if (rc)
- RETURN(rc);
-
- /* and finally less blocks for the current gid */
- qi->lqi_id.qid_gid = gid;
- qi->lqi_space = -bspace;
- rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
- rc = 0;
+ }
+#ifdef HAVE_PROJECT_QUOTA
+ if (attr->la_valid & LA_PROJID) {
+ __u32 projid = i_projid_read(obj->oo_inode);
+ enforce = (attr->la_valid & LA_PROJID) &&
+ (attr->la_projid != projid);
+ rc = osd_declare_attr_qid(env, obj, oh, bspace,
+ (qid_t)projid, (qid_t)attr->la_projid,
+ enforce, PRJQUOTA);
if (rc)
RETURN(rc);
}
-
+#endif
RETURN(rc);
}
i_uid_write(inode, attr->la_uid);
if (bits & LA_GID)
i_gid_write(inode, attr->la_gid);
+ if (bits & LA_PROJID)
+ i_projid_write(inode, attr->la_projid);
if (bits & LA_NLINK)
set_nlink(inode, attr->la_nlink);
if (bits & LA_RDEV)
/* always keep S_NOCMTIME */
inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
S_NOCMTIME;
+ /*
+ * Ext4 did not transfer inherit flags from
+ * @inode->i_flags to raw inode i_flags when writing
+ * flags, we do it explictly here.
+ */
+ if (attr->la_flags & LUSTRE_PROJINHERIT_FL)
+ LDISKFS_I(inode)->i_flags |= LUSTRE_PROJINHERIT_FL;
}
return 0;
}
static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
{
+ int rc;
+
if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
(attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
struct iattr iattr;
- int rc;
ll_vfs_dq_init(inode);
iattr.ia_valid = 0;
return rc;
}
}
+
+#ifdef HAVE_PROJECT_QUOTA
+ /* Handle project id transfer here properly */
+ if (attr->la_valid & LA_PROJID &&
+ attr->la_projid != i_projid_read(inode)) {
+ rc = __ldiskfs_ioctl_setproject(inode, attr->la_projid);
+ if (rc) {
+ CERROR("%s: quota transfer failed: rc = %d. Is quota "
+ "enforcement enabled on the ldiskfs "
+ "filesystem?\n", inode->i_sb->s_id, rc);
+ return rc;
+ }
+ }
+#endif
return 0;
}
}
/**
- * Helper function for osd_object_create()
+ * Helper function for osd_create()
*
* \retval 0, on success
*/
-static int __osd_object_create(struct osd_thread_info *info,
- struct osd_object *obj, struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *th)
+static int __osd_create(struct osd_thread_info *info, struct osd_object *obj,
+ struct lu_attr *attr, struct dt_allocation_hint *hint,
+ struct dt_object_format *dof, struct thandle *th)
{
int result;
__u32 umask;
}
/**
- * Helper function for osd_object_create()
+ * Helper function for osd_create()
*
* \retval 0, on success
*/
return fld_local_lookup(env, ss->ss_server_fld, seq, range);
}
-/*
- * Concurrency: no external locking is necessary.
- */
-static int osd_declare_object_create(const struct lu_env *env,
- struct dt_object *dt,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *handle)
+static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *handle)
{
- struct osd_thandle *oh;
- int rc;
+ struct osd_thandle *oh;
+ int rc;
ENTRY;
LASSERT(handle != NULL);
if (!attr)
RETURN(0);
- rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
- osd_dt_obj(dt), false, NULL, false);
+ rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid,
+ attr->la_projid, 1, oh, osd_dt_obj(dt),
+ NULL, OSD_QID_INODE);
if (rc != 0)
RETURN(rc);
RETURN(rc);
}
-static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof, struct thandle *th)
-{
- const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thread_info *info = osd_oti_get(env);
- int result;
- ENTRY;
-
- if (dt_object_exists(dt))
- return -EEXIST;
-
- LINVRNT(osd_invariant(obj));
- LASSERT(!dt_object_remote(dt));
- LASSERT(osd_write_locked(env, obj));
- LASSERT(th != NULL);
-
- if (unlikely(fid_is_acct(fid)))
- /* Quota files can't be created from the kernel any more,
- * 'tune2fs -O quota' will take care of creating them */
- RETURN(-EPERM);
-
- result = __osd_object_create(info, obj, attr, hint, dof, th);
- if (result == 0) {
- result = __osd_oi_insert(env, obj, fid, th);
- if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
- obj->oo_dt.do_body_ops = &osd_body_ops;
- }
- LASSERT(ergo(result == 0,
- dt_object_exists(dt) && !dt_object_remote(dt)));
-
- LASSERT(osd_invariant(obj));
- RETURN(result);
-}
-
/**
* Called to destroy on-disk representation of the object
*
* Concurrency: must be locked
*/
-static int osd_declare_object_destroy(const struct lu_env *env,
- struct dt_object *dt,
- struct thandle *th)
+static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
/* one less inode */
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- -1, oh, obj, false, NULL, false);
+ i_projid_read(inode), -1, oh, obj, NULL,
+ OSD_QID_INODE);
if (rc)
RETURN(rc);
/* data to be truncated */
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- 0, oh, obj, true, NULL, false);
+ i_projid_read(inode), 0, oh, obj, NULL,
+ OSD_QID_BLK);
if (rc)
RETURN(rc);
RETURN(rc);
}
-static int osd_object_destroy(const struct lu_env *env,
- struct dt_object *dt,
- struct thandle *th)
+static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *th)
{
- const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_device *osd = osd_obj2dev(obj);
- struct osd_thandle *oh;
- int result;
- ENTRY;
+ const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_device *osd = osd_obj2dev(obj);
+ struct osd_thandle *oh;
+ int result;
+ ENTRY;
- oh = container_of0(th, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle);
- LASSERT(inode);
- LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle);
+ LASSERT(inode);
+ LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
if (unlikely(fid_is_acct(fid)))
RETURN(-EPERM);
ldiskfs_set_inode_state(local, LDISKFS_STATE_LUSTRE_NOSCRUB);
unlock_new_inode(local);
+ /* Agent inode should not have project ID */
+#ifdef HAVE_PROJECT_QUOTA
+ if (LDISKFS_I(pobj->oo_inode)->i_flags & LUSTRE_PROJINHERIT_FL) {
+ rc = __ldiskfs_ioctl_setproject(local, 0);
+ if (rc) {
+ CERROR("%s: quota transfer failed: rc = %d. Is project "
+ "quota enforcement enabled on the ldiskfs "
+ "filesystem?\n", local->i_sb->s_id, rc);
+ RETURN(ERR_PTR(rc));
+ }
+ }
+#endif
/* Set special LMA flag for local agent inode */
rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
if (rc != 0) {
}
/**
- * OSD layer object create function for interoperability mode (b11826).
- * This is mostly similar to osd_object_create(). Only difference being, fid is
- * inserted into inode ea here.
+ * OSD layer object create function for OST objects (b=11826).
+ *
+ * The FID is inserted into inode xattr here.
*
* \retval 0, on success
* \retval -ve, on error
*/
-static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *th)
+static int osd_create(const struct lu_env *env, struct dt_object *dt,
+ struct lu_attr *attr, struct dt_allocation_hint *hint,
+ struct dt_object_format *dof, struct thandle *th)
{
const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
struct osd_object *obj = osd_dt_obj(dt);
if (dt_object_exists(dt))
RETURN(-EEXIST);
- LASSERT(osd_invariant(obj));
+ LINVRNT(osd_invariant(obj));
LASSERT(!dt_object_remote(dt));
- LASSERT(osd_write_locked(env, obj));
+ LASSERT(osd_is_write_locked(env, obj));
LASSERT(th != NULL);
if (unlikely(fid_is_acct(fid)))
* 'tune2fs -O quota' will take care of creating them */
RETURN(-EPERM);
- result = __osd_object_create(info, obj, attr, hint, dof, th);
+ result = __osd_create(info, obj, attr, hint, dof, th);
if (result == 0) {
if (fid_is_idif(fid) &&
!osd_dev(dt->do_lu.lo_dev)->od_index_in_idif) {
LASSERT(ergo(result == 0,
dt_object_exists(dt) && !dt_object_remote(dt)));
- LINVRNT(osd_invariant(obj));
- RETURN(result);
+ LINVRNT(osd_invariant(obj));
+ RETURN(result);
}
-static int osd_declare_object_ref_add(const struct lu_env *env,
- struct dt_object *dt,
- struct thandle *handle)
+static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *handle)
{
- struct osd_thandle *oh;
+ struct osd_thandle *oh;
- /* it's possible that object doesn't exist yet */
- LASSERT(handle != NULL);
+ /* it's possible that object doesn't exist yet */
+ LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
/*
* Concurrency: @dt is write locked.
*/
-static int osd_object_ref_add(const struct lu_env *env,
- struct dt_object *dt, struct thandle *th)
+static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
LINVRNT(osd_invariant(obj));
LASSERT(!dt_object_remote(dt));
- LASSERT(osd_write_locked(env, obj));
+ LASSERT(osd_is_write_locked(env, obj));
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
return rc;
}
-static int osd_declare_object_ref_del(const struct lu_env *env,
- struct dt_object *dt,
- struct thandle *handle)
+static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *handle)
{
struct osd_thandle *oh;
/*
* Concurrency: @dt is write locked.
*/
-static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
- struct thandle *th)
+static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *th)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
LINVRNT(osd_invariant(obj));
LASSERT(!dt_object_remote(dt));
- LASSERT(osd_write_locked(env, obj));
+ LASSERT(osd_is_write_locked(env, obj));
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
}
LINVRNT(osd_invariant(obj));
- if (result == 0 && feat == &dt_quota_glb_features &&
- fid_seq(lu_object_fid(&dt->do_lu)) == FID_SEQ_QUOTA_GLB)
- result = osd_quota_migration(env, dt);
-
return result;
}
}
static const struct dt_object_operations osd_obj_ops = {
- .do_read_lock = osd_object_read_lock,
- .do_write_lock = osd_object_write_lock,
- .do_read_unlock = osd_object_read_unlock,
- .do_write_unlock = osd_object_write_unlock,
- .do_write_locked = osd_object_write_locked,
- .do_attr_get = osd_attr_get,
- .do_declare_attr_set = osd_declare_attr_set,
- .do_attr_set = osd_attr_set,
- .do_ah_init = osd_ah_init,
- .do_declare_create = osd_declare_object_create,
- .do_create = osd_object_create,
- .do_declare_destroy = osd_declare_object_destroy,
- .do_destroy = osd_object_destroy,
- .do_index_try = osd_index_try,
- .do_declare_ref_add = osd_declare_object_ref_add,
- .do_ref_add = osd_object_ref_add,
- .do_declare_ref_del = osd_declare_object_ref_del,
- .do_ref_del = osd_object_ref_del,
- .do_xattr_get = osd_xattr_get,
- .do_declare_xattr_set = osd_declare_xattr_set,
- .do_xattr_set = osd_xattr_set,
- .do_declare_xattr_del = osd_declare_xattr_del,
- .do_xattr_del = osd_xattr_del,
- .do_xattr_list = osd_xattr_list,
- .do_object_sync = osd_object_sync,
- .do_invalidate = osd_invalidate,
-};
-
-/**
- * dt_object_operations for interoperability mode
- * (i.e. to run 2.0 mds on 1.8 disk) (b11826)
- */
-static const struct dt_object_operations osd_obj_ea_ops = {
- .do_read_lock = osd_object_read_lock,
- .do_write_lock = osd_object_write_lock,
- .do_read_unlock = osd_object_read_unlock,
- .do_write_unlock = osd_object_write_unlock,
- .do_write_locked = osd_object_write_locked,
- .do_attr_get = osd_attr_get,
- .do_declare_attr_set = osd_declare_attr_set,
- .do_attr_set = osd_attr_set,
- .do_ah_init = osd_ah_init,
- .do_declare_create = osd_declare_object_create,
- .do_create = osd_object_ea_create,
- .do_declare_destroy = osd_declare_object_destroy,
- .do_destroy = osd_object_destroy,
- .do_index_try = osd_index_try,
- .do_declare_ref_add = osd_declare_object_ref_add,
- .do_ref_add = osd_object_ref_add,
- .do_declare_ref_del = osd_declare_object_ref_del,
- .do_ref_del = osd_object_ref_del,
- .do_xattr_get = osd_xattr_get,
- .do_declare_xattr_set = osd_declare_xattr_set,
- .do_xattr_set = osd_xattr_set,
- .do_declare_xattr_del = osd_declare_xattr_del,
- .do_xattr_del = osd_xattr_del,
- .do_xattr_list = osd_xattr_list,
- .do_object_sync = osd_object_sync,
- .do_invalidate = osd_invalidate,
+ .do_read_lock = osd_read_lock,
+ .do_write_lock = osd_write_lock,
+ .do_read_unlock = osd_read_unlock,
+ .do_write_unlock = osd_write_unlock,
+ .do_write_locked = osd_write_locked,
+ .do_attr_get = osd_attr_get,
+ .do_declare_attr_set = osd_declare_attr_set,
+ .do_attr_set = osd_attr_set,
+ .do_ah_init = osd_ah_init,
+ .do_declare_create = osd_declare_create,
+ .do_create = osd_create,
+ .do_declare_destroy = osd_declare_destroy,
+ .do_destroy = osd_destroy,
+ .do_index_try = osd_index_try,
+ .do_declare_ref_add = osd_declare_ref_add,
+ .do_ref_add = osd_ref_add,
+ .do_declare_ref_del = osd_declare_ref_del,
+ .do_ref_del = osd_ref_del,
+ .do_xattr_get = osd_xattr_get,
+ .do_declare_xattr_set = osd_declare_xattr_set,
+ .do_xattr_set = osd_xattr_set,
+ .do_declare_xattr_del = osd_declare_xattr_del,
+ .do_xattr_del = osd_xattr_del,
+ .do_xattr_list = osd_xattr_list,
+ .do_object_sync = osd_object_sync,
+ .do_invalidate = osd_invalidate,
};
static const struct dt_object_operations osd_obj_otable_it_ops = {
RETURN(-ENOENT);
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
- 0, oh, osd_dt_obj(dt), true, NULL, false);
+ i_projid_read(inode), 0, oh, osd_dt_obj(dt),
+ NULL, OSD_QID_BLK);
RETURN(rc);
}
if (gen != OSD_OII_NOGEN)
goto trigger;
- iput(inode);
/* The inode may has been reused by others, we do not know,
* leave it to be handled by subsequent osd_fid_lookup(). */
- RETURN(0);
- } else if (rc != 0 || osd_id_eq(id, &oti->oti_id)) {
- RETURN(rc);
- } else {
- insert = false;
+ GOTO(out, rc = 0);
+ } else if (rc || osd_id_eq(id, &oti->oti_id)) {
+ GOTO(out, rc);
}
+ insert = false;
+
trigger:
if (thread_is_running(&scrub->os_thread)) {
if (inode == NULL) {
else
rc = osd_check_lmv(oti, dev, inode, oic);
- iput(inode);
- RETURN(rc);
+ GOTO(out, rc);
}
if (!dev->od_noscrub && ++once == 1) {
rc = osd_scrub_start(dev, SS_AUTO_PARTIAL | SS_CLEAR_DRYRUN |
SS_CLEAR_FAILOUT);
CDEBUG(D_LFSCK | D_CONSOLE | D_WARNING,
- "%.16s: trigger partial OI scrub for RPC inconsistency "
+ "%s: trigger partial OI scrub for RPC inconsistency "
"checking FID "DFID": rc = %d\n",
- LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
- PFID(fid), rc);
+ osd_dev2name(dev), PFID(fid), rc);
if (rc == 0 || rc == -EALREADY)
goto again;
}
- if (inode != NULL)
+ GOTO(out, rc);
+
+out:
+ if (inode)
iput(inode);
RETURN(rc);
* calculate how many blocks will be consumed by this index
* insert */
rc = osd_declare_inode_qid(env, i_uid_read(inode),
- i_gid_read(inode), 0, oh,
- osd_dt_obj(dt), true, NULL, false);
+ i_gid_read(inode),
+ i_projid_read(inode), 0,
+ oh, osd_dt_obj(dt), NULL,
+ OSD_QID_BLK);
}
RETURN(rc);
/* It is too bad, we cannot reinsert the name entry back.
* That means we lose it! */
if (rc != 0)
- CDEBUG(D_LFSCK, "%.16s: fail to reinsert the dirent, "
+ CDEBUG(D_LFSCK, "%s: fail to reinsert the dirent, "
"dir = %lu/%u, name = %.*s, "DFID": rc = %d\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ osd_ino2name(inode),
dir->i_ino, dir->i_generation, namelen,
dentry->d_name.name, PFID(fid), rc);
}
if (lmd_flags & LMD_FLG_DEV_RDONLY) {
-#ifdef HAVE_DEV_SET_RDONLY
- dev_set_rdonly(osd_sb(o)->s_bdev);
- o->od_dt_dev.dd_rdonly = 1;
- LCONSOLE_WARN("%s: set dev_rdonly on this device\n", name);
-#else
- LCONSOLE_WARN("%s: not support dev_rdonly on this device",
- name);
-
- GOTO(out_mnt, rc = -EOPNOTSUPP);
-#endif
- } else {
-#ifdef HAVE_DEV_SET_RDONLY
- if (dev_check_rdonly(osd_sb(o)->s_bdev)) {
- CERROR("%s: underlying device %s is marked as "
- "read-only. Setup failed\n", name, dev);
+ if (priv_dev_set_rdonly) {
+ priv_dev_set_rdonly(osd_sb(o)->s_bdev);
+ o->od_dt_dev.dd_rdonly = 1;
+ LCONSOLE_WARN("%s: set dev_rdonly on this device\n",
+ name);
+ } else {
+ LCONSOLE_WARN("%s: not support dev_rdonly on this device",
+ name);
- GOTO(out_mnt, rc = -EROFS);
+ GOTO(out_mnt, rc = -EOPNOTSUPP);
}
-#endif
+ } else if (priv_dev_check_rdonly &&
+ priv_dev_check_rdonly(osd_sb(o)->s_bdev)) {
+ CERROR("%s: underlying device %s is marked as "
+ "read-only. Setup failed\n", name, dev);
+
+ GOTO(out_mnt, rc = -EROFS);
}
if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
LASSERT(&o->od_dt_dev);
rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
cfg, &o->od_dt_dev);
- if (rc > 0 || rc == -ENOSYS)
+ if (rc > 0 || rc == -ENOSYS) {
rc = class_process_proc_param(PARAM_OST,
lprocfs_osd_obd_vars,
cfg, &o->od_dt_dev);
+ if (rc > 0)
+ rc = 0;
+ }
break;
default:
rc = -ENOSYS;
if (rc)
return rc;
+#ifdef CONFIG_KALLSYMS
+ priv_dev_set_rdonly = (void *)kallsyms_lookup_name("dev_set_rdonly");
+ priv_dev_check_rdonly = (void *)kallsyms_lookup_name("dev_check_rdonly");
+ /* Clear readonly is unused at this time */
+ /*priv_dev_clear_rdonly = (void *)kallsyms_lookup_name("dev_clear_rdonly");*/
+#endif
+
rc = class_register_type(&osd_obd_device_ops, NULL, true,
lprocfs_osd_module_vars,
LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);