#include <ldiskfs/ldiskfs.h>
#include <ldiskfs/xattr.h>
+#include <ldiskfs/ldiskfs_extents.h>
#undef ENTRY
/*
* struct OBD_{ALLOC,FREE}*()
#include <lustre_linkea.h>
int ldiskfs_pdo = 1;
-CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
- "ldiskfs with parallel directory operations");
+module_param(ldiskfs_pdo, int, 0644);
+MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
int ldiskfs_track_declares_assert;
-CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
- "LBUG during tracking of declares");
+module_param(ldiskfs_track_declares_assert, int, 0644);
+MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
/* Slab to allocate dynlocks */
struct kmem_cache *dynlock_cachep;
}
/*
+ * the following set of functions are used to maintain per-thread
+ * cache of FID->ino mapping. this mechanism is needed to resolve
+ * FID to inode at dt_insert() which in turn stores ino in the
+ * directory entries to keep ldiskfs compatible with ext[34].
+ * due to locking-originated restrictions we can't lookup ino
+ * using LU cache (deadlock is possible). lookup using OI is quite
+ * expensive. so instead we maintain this cache and methods like
+ * dt_create() fill it. so in the majority of cases dt_insert() is
+ * able to find needed mapping in lockless manner.
+ */
+static struct osd_idmap_cache *
+osd_idc_find(const struct lu_env *env, struct osd_device *osd,
+ const struct lu_fid *fid)
+{
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_idmap_cache *idc = oti->oti_ins_cache;
+ int i;
+ for (i = 0; i < oti->oti_ins_cache_used; i++) {
+ if (!lu_fid_eq(&idc[i].oic_fid, fid))
+ continue;
+ if (idc[i].oic_dev != osd)
+ continue;
+
+ return idc + i;
+ }
+
+ return NULL;
+}
+
+static struct osd_idmap_cache *
+osd_idc_add(const struct lu_env *env, struct osd_device *osd,
+ const struct lu_fid *fid)
+{
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_idmap_cache *idc;
+ int i;
+
+ if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
+ i = oti->oti_ins_cache_size * 2;
+ if (i == 0)
+ i = OSD_INS_CACHE_SIZE;
+ OBD_ALLOC(idc, sizeof(*idc) * i);
+ if (idc == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (oti->oti_ins_cache != NULL) {
+ memcpy(idc, oti->oti_ins_cache,
+ oti->oti_ins_cache_used * sizeof(*idc));
+ OBD_FREE(oti->oti_ins_cache,
+ oti->oti_ins_cache_used * sizeof(*idc));
+ }
+ oti->oti_ins_cache = idc;
+ oti->oti_ins_cache_size = i;
+ }
+
+ idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
+ idc->oic_fid = *fid;
+ idc->oic_dev = osd;
+ idc->oic_lid.oii_ino = 0;
+ idc->oic_lid.oii_gen = 0;
+ idc->oic_remote = 0;
+
+ return idc;
+}
+
+/*
+ * lookup mapping for the given fid in the cache, initialize a
+ * new one if not found. the initialization checks whether the
+ * object is local or remote. for local objects, OI is used to
+ * learn ino/generation. the function is used when the caller
+ * has no information about the object, e.g. at dt_insert().
+ */
+static struct osd_idmap_cache *
+osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
+ const struct lu_fid *fid)
+{
+ struct osd_idmap_cache *idc;
+ int rc;
+
+ idc = osd_idc_find(env, osd, fid);
+ LASSERT(!IS_ERR(idc));
+ if (idc != NULL)
+ return idc;
+
+ /* new mapping is needed */
+ idc = osd_idc_add(env, osd, fid);
+ if (IS_ERR(idc))
+ return idc;
+
+ /* initialize it */
+ rc = osd_remote_fid(env, osd, fid);
+ if (unlikely(rc < 0))
+ return ERR_PTR(rc);
+
+ if (rc == 0) {
+ /* the object is local, lookup in OI */
+ /* XXX: probably cheaper to lookup in LU first? */
+ rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
+ &idc->oic_lid, 0);
+ if (unlikely(rc < 0)) {
+ CERROR("can't lookup: rc = %d\n", rc);
+ return ERR_PTR(rc);
+ }
+ } else {
+ /* the object is remote */
+ idc->oic_remote = 1;
+ }
+
+ return idc;
+}
+
+/*
+ * lookup mapping for given FID and fill it from the given object.
+ * the object is lolcal by definition.
+ */
+static int osd_idc_find_and_init(const struct lu_env *env,
+ struct osd_device *osd,
+ struct osd_object *obj)
+{
+ const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
+ struct osd_idmap_cache *idc;
+
+ idc = osd_idc_find(env, osd, fid);
+ LASSERT(!IS_ERR(idc));
+ if (idc != NULL) {
+ if (obj->oo_inode == NULL)
+ return 0;
+ if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
+ LASSERT(idc->oic_lid.oii_ino == 0);
+ idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
+ idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
+ }
+ return 0;
+ }
+
+ /* new mapping is needed */
+ idc = osd_idc_add(env, osd, fid);
+ if (IS_ERR(idc))
+ return PTR_ERR(idc);
+
+ if (obj->oo_inode != NULL) {
+ idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
+ idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
+ }
+ return 0;
+}
+
+/*
* OSD object methods.
*/
return inode;
}
+int osd_ldiskfs_add_entry(struct osd_thread_info *info,
+ handle_t *handle, struct dentry *child,
+ struct inode *inode, struct htree_lock *hlock)
+{
+ int rc, rc2;
+
+ rc = __ldiskfs_add_entry(handle, child, inode, hlock);
+ if (rc == -ENOBUFS || rc == -ENOSPC) {
+ char fidbuf[FID_LEN + 1];
+ struct lustre_mdt_attrs lma;
+ struct lu_fid fid = { };
+ char *errstr;
+ struct dentry *p_dentry = child->d_parent;
+
+ rc2 = osd_get_lma(info, p_dentry->d_inode, p_dentry,
+ &lma);
+ if (rc2 == 0) {
+ fid = lma.lma_self_fid;
+ snprintf(fidbuf, sizeof(fidbuf), DFID, PFID(&fid));
+ } else if (rc2 == -ENODATA) {
+ if (unlikely(p_dentry->d_inode ==
+ inode->i_sb->s_root->d_inode))
+ lu_local_obj_fid(&fid, OSD_FS_ROOT_OID);
+ else if (info->oti_dev && !info->oti_dev->od_is_ost &&
+ fid_seq_is_mdt0(fid_seq(&fid)))
+ lu_igif_build(&fid, p_dentry->d_inode->i_ino,
+ p_dentry->d_inode->i_generation);
+ snprintf(fidbuf, sizeof(fidbuf), DFID, PFID(&fid));
+ } else {
+ snprintf(fidbuf, FID_LEN, "%s", "unknown");
+ }
+
+ if (rc == -ENOSPC)
+ errstr = "has reached";
+ else
+ errstr = "is approaching";
+ CWARN("%.16s: directory (inode: %lu FID: %s) %s maximum entry limit\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ p_dentry->d_inode->i_ino, fidbuf, errstr);
+ /* ignore such error now */
+ if (rc == -ENOBUFS)
+ rc = 0;
+ }
+ return rc;
+}
+
+
static struct inode *
osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
struct osd_inode_id *id, struct lu_fid *fid)
struct osd_scrub *scrub;
struct scrub_file *sf;
int result;
- int saved = 0;
- bool cached = true;
- bool triggered = false;
+ int rc1 = 0;
+ bool cached = true;
+ bool remote = false;
ENTRY;
LINVRNT(osd_invariant(obj));
LASSERT(info);
oic = &info->oti_cache;
- if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
+ if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
RETURN(-ENOENT);
/* For the object is created as locking anchor, or for the object to
if (result == -EREMCHG) {
trigger:
- if (unlikely(triggered))
- GOTO(out, result = saved);
-
- triggered = true;
- if (thread_is_running(&scrub->os_thread)) {
- result = -EINPROGRESS;
- } else if (!dev->od_noscrub) {
- result = osd_scrub_start(dev, SS_AUTO_FULL |
- SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT);
- LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
- "for "DFID", rc = %d [1]\n",
- osd_name(dev), PFID(fid), result);
- if (result == 0 || result == -EALREADY)
- result = -EINPROGRESS;
- else
- result = -EREMCHG;
- }
-
- if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD))
- GOTO(out, result);
-
/* We still have chance to get the valid inode: for the
* object which is referenced by remote name entry, the
* object on the local MDT will be linked under the dir
* only happened for the RPC from other MDT during the
* OI scrub, or for the client side RPC with FID only,
* such as FID to path, or from old connected client. */
- saved = result;
- result = osd_lookup_in_remote_parent(info, dev,
- fid, id);
- if (result == 0) {
- cached = true;
- goto iget;
+ if (!remote &&
+ !fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
+ rc1 = osd_lookup_in_remote_parent(info, dev,
+ fid, id);
+ if (rc1 == 0) {
+ remote = true;
+ cached = true;
+ goto iget;
+ }
}
- result = saved;
+ if (thread_is_running(&scrub->os_thread)) {
+ if (remote) {
+ osd_add_oi_cache(info, dev, id, fid);
+ osd_oii_insert(dev, oic, true);
+ } else {
+ result = -EINPROGRESS;
+ }
+ } else if (!dev->od_noscrub) {
+ __u32 flags = SS_CLEAR_DRYRUN |
+ SS_CLEAR_FAILOUT;
+
+ flags |= (remote ? SS_AUTO_PARTIAL :
+ SS_AUTO_FULL);
+ rc1 = osd_scrub_start(dev, flags);
+ LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
+ "for the "DFID" with flags 0x%x,"
+ " rc = %d\n", osd_name(dev),
+ PFID(fid), flags, rc1);
+ if (rc1 == 0 || rc1 == -EALREADY) {
+ result = -EINPROGRESS;
+ if (remote) {
+ osd_add_oi_cache(info, dev, id,
+ fid);
+ osd_oii_insert(dev, oic, true);
+ }
+ } else {
+ result = -EREMCHG;
+ }
+ } else {
+ result = -EREMCHG;
+ }
}
GOTO(out, result);
+ } else if (remote) {
+ result = 0;
+ goto trigger;
}
obj->oo_inode = inode;
result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
obj->oo_dt.do_body_ops = &osd_body_ops_new;
- if (result == 0 && obj->oo_inode != NULL)
+ if (result == 0 && obj->oo_inode != NULL) {
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct lustre_mdt_attrs *lma = &oti->oti_mdt_attrs;
+
osd_object_init0(obj);
+ result = osd_get_lma(oti, obj->oo_inode,
+ &oti->oti_obj_dentry, lma);
+ if (result == 0) {
+ /* Convert LMAI flags to lustre LMA flags
+ * and cache it to oo_lma_flags */
+ obj->oo_lma_flags =
+ lma_to_lustre_flags(lma->lma_incompat);
+ } else if (result == -ENODATA) {
+ result = 0;
+ }
+ }
LINVRNT(osd_invariant(obj));
return result;
OBD_FREE_PTR(oh);
}
+#ifndef HAVE_SB_START_WRITE
+# define sb_start_write(sb) do {} while (0)
+# define sb_end_write(sb) do {} while (0)
+#endif
+
static struct thandle *osd_trans_create(const struct lu_env *env,
struct dt_device *d)
{
/* on pending IO in this thread should left from prev. request */
LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
- th = ERR_PTR(-ENOMEM);
+ sb_start_write(osd_sb(osd_dt_dev(d)));
+
OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
if (oh != NULL) {
oh->ot_quota_trans = &oti->oti_quota_trans;
sizeof(oti->oti_declare_ops_cred));
memset(oti->oti_declare_ops_used, 0,
sizeof(oti->oti_declare_ops_used));
+ } else {
+ sb_end_write(osd_sb(osd_dt_dev(d)));
+ th = ERR_PTR(-ENOMEM);
}
RETURN(th);
}
static unsigned long last_printed;
static int last_credits;
- CWARN("%.16s: too many transaction credits (%d > %d)\n",
- LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
- oh->ot_credits,
- osd_journal(dev)->j_max_transaction_buffers);
-
- osd_trans_dump_creds(env, th);
-
+ /* don't make noise on a tiny testing systems
+ * actual credits misuse will be caught anyway */
if (last_credits != oh->ot_credits &&
time_after(jiffies, last_printed +
- msecs_to_jiffies(60 * MSEC_PER_SEC))) {
+ msecs_to_jiffies(60 * MSEC_PER_SEC)) &&
+ osd_transaction_size(dev) > 512) {
+ osd_trans_dump_creds(env, th);
libcfs_debug_dumpstack(NULL);
last_credits = oh->ot_credits;
last_printed = jiffies;
oh = container_of0(th, struct osd_thandle, ot_super);
+ /* reset OI cache for safety */
+ oti->oti_ins_cache_used = 0;
+
remove_agents = oh->ot_remove_agents;
qtrans = oh->ot_quota_trans;
if (unlikely(remove_agents != 0))
osd_process_scheduled_agent_removals(env, osd);
+ sb_end_write(osd_sb(osd));
+
RETURN(rc);
}
static void osd_object_release(const struct lu_env *env,
struct lu_object *l)
{
+ struct osd_object *o = osd_obj(l);
+ /* nobody should be releasing a non-destroyed object with nlink=0
+ * the API allows this, but ldiskfs doesn't like and then report
+ * this inode as deleted */
+ if (unlikely(!o->oo_destroyed && o->oo_inode && o->oo_inode->i_nlink == 0))
+ LBUG();
}
/*
d ? d->id_ops->id_name : "plain");
}
-#define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
-
/*
* Concurrency: shouldn't matter.
*/
int osd_statfs(const struct lu_env *env, struct dt_device *d,
struct obd_statfs *sfs)
{
- struct osd_device *osd = osd_dt_dev(d);
- struct super_block *sb = osd_sb(osd);
- struct kstatfs *ksfs;
- int result = 0;
+ struct osd_device *osd = osd_dt_dev(d);
+ struct super_block *sb = osd_sb(osd);
+ struct kstatfs *ksfs;
+ __u64 reserved;
+ int result = 0;
if (unlikely(osd->od_mnt == NULL))
return -EINPROGRESS;
ksfs = &osd_oti_get(env)->oti_ksfs;
}
- spin_lock(&osd->od_osfs_lock);
result = sb->s_op->statfs(sb->s_root, ksfs);
- if (likely(result == 0)) { /* N.B. statfs can't really fail */
- statfs_pack(sfs, ksfs);
- if (unlikely(sb->s_flags & MS_RDONLY))
- sfs->os_state = OS_STATE_READONLY;
- if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
- LDISKFS_FEATURE_INCOMPAT_EXTENTS))
- sfs->os_maxbytes = sb->s_maxbytes;
- else
- sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
- }
- spin_unlock(&osd->od_osfs_lock);
+ if (result)
+ goto out;
+
+ statfs_pack(sfs, ksfs);
+ if (unlikely(sb->s_flags & MS_RDONLY))
+ sfs->os_state = OS_STATE_READONLY;
+ if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
+ LDISKFS_FEATURE_INCOMPAT_EXTENTS))
+ sfs->os_maxbytes = sb->s_maxbytes;
+ else
+ sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
- if (unlikely(env == NULL))
- OBD_FREE_PTR(ksfs);
+ /*
+ * Reserve some space so to avoid fragmenting the filesystem too much.
+ * Fragmentation not only impacts performance, but can also increase
+ * metadata overhead significantly, causing grant calculation to be
+ * wrong.
+ *
+ * Reserve 0.78% of total space, at least 8MB for small filesystems.
+ */
+ CLASSERT(OSD_STATFS_RESERVED > LDISKFS_MAX_BLOCK_SIZE);
+ reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
+ if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
+ reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
- /* Reserve a small amount of space for local objects like last_rcvd,
- * llog, quota files, ... */
- if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
- sfs->os_bavail = 0;
- } else {
- sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
- /** Take out metadata overhead for indirect blocks */
- sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
- }
+ sfs->os_blocks -= reserved;
+ sfs->os_bfree -= min(reserved, sfs->os_bfree);
+ sfs->os_bavail -= min(reserved, sfs->os_bavail);
- return result;
+out:
+ if (unlikely(env == NULL))
+ OBD_FREE_PTR(ksfs);
+ return result;
}
/**
*/
param->ddp_max_name_len = LDISKFS_NAME_LEN;
param->ddp_max_nlink = LDISKFS_LINK_MAX;
- param->ddp_block_shift = sb->s_blocksize_bits;
+ param->ddp_symlink_max = sb->s_blocksize;
param->ddp_mount_type = LDD_MT_LDISKFS;
if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EXTENTS))
param->ddp_maxbytes = sb->s_maxbytes;
else
param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
- /* Overhead estimate should be fairly accurate, so we really take a tiny
- * error margin which also avoids fragmenting the filesystem too much */
- param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
/* inode are statically allocated, so per-inode space consumption
* is the space consumed by the directory entry */
param->ddp_inodespace = PER_OBJ_USAGE;
- /* per-fragment overhead to be used by the client code */
- param->ddp_grant_frag = 6 * LDISKFS_BLOCK_SIZE(sb);
- param->ddp_mntopts = 0;
+ /* EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
+ * = 128MB) which is unlikely to be hit in real life. Report a smaller
+ * maximum length to not under count the actual number of extents
+ * needed for writing a file. */
+ param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 2;
+ /* worst-case extent insertion metadata overhead */
+ param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
+ param->ddp_mntopts = 0;
if (test_opt(sb, XATTR_USER))
param->ddp_mntopts |= MNTOPT_USERXATTR;
if (test_opt(sb, POSIX_ACL))
#ifdef HAVE_DEV_SET_RDONLY
CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
+ if (sb->s_op->freeze_fs) {
+ rc = sb->s_op->freeze_fs(sb);
+ if (rc)
+ goto out;
+ }
+
if (jdev && (jdev != dev)) {
CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
(long)jdev);
}
CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
dev_set_rdonly(dev);
-#else
- CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
- osd_dt_dev(d)->od_svname, (long)dev, rc);
+
+ if (sb->s_op->unfreeze_fs)
+ sb->s_op->unfreeze_fs(sb);
+
+out:
#endif
+ if (rc)
+ CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
+ osd_dt_dev(d)->od_svname, (long)dev, rc);
+
RETURN(rc);
}
return t;
}
-
static void osd_inode_getattr(const struct lu_env *env,
struct inode *inode, struct lu_attr *attr)
{
attr->la_blocks = inode->i_blocks;
attr->la_uid = i_uid_read(inode);
attr->la_gid = i_gid_read(inode);
- attr->la_flags = LDISKFS_I(inode)->i_flags;
+ attr->la_flags = ll_inode_to_ext_flags(inode->i_flags);
attr->la_nlink = inode->i_nlink;
attr->la_rdev = inode->i_rdev;
attr->la_blksize = 1 << inode->i_blkbits;
{
struct osd_object *obj = osd_dt_obj(dt);
- if (!dt_object_exists(dt))
+ if (unlikely(!dt_object_exists(dt)))
+ return -ENOENT;
+ if (unlikely(obj->oo_destroyed))
return -ENOENT;
LASSERT(!dt_object_remote(dt));
spin_lock(&obj->oo_guard);
osd_inode_getattr(env, obj->oo_inode, attr);
+ if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
+ attr->la_flags |= LUSTRE_ORPHAN_FL;
spin_unlock(&obj->oo_guard);
+
return 0;
}
osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
+ osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
+
if (attr == NULL || obj->oo_inode == NULL)
RETURN(rc);
spin_lock(&obj->oo_guard);
rc = osd_inode_setattr(env, inode, attr);
spin_unlock(&obj->oo_guard);
+ if (rc != 0)
+ GOTO(out, rc);
- if (!rc)
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+
+ if (!(attr->la_valid & LA_FLAGS))
+ GOTO(out, rc);
+
+ /* Let's check if there are extra flags need to be set into LMA */
+ if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
+ if (rc != 0)
+ GOTO(out, rc);
+ lma->lma_incompat |=
+ lustre_to_lma_flags(attr->la_flags);
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
+ lma, sizeof(*lma), XATTR_REPLACE);
+ if (rc != 0) {
+ struct osd_device *osd = osd_obj2dev(obj);
+
+ CWARN("%s: set "DFID" lma flags %u failed: rc = %d\n",
+ osd_name(osd), PFID(lu_object_fid(&dt->do_lu)),
+ lma->lma_incompat, rc);
+ } else {
+ obj->oo_lma_flags =
+ attr->la_flags & LUSTRE_LMA_FL_MASKS;
+ }
+ osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
+ }
+out:
osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
return rc;
ah->dah_parent = parent;
ah->dah_mode = child_mode;
+
+ if (parent != NULL && !dt_object_remote(parent)) {
+ /* will help to find FID->ino at dt_insert("..") */
+ struct osd_object *pobj = osd_dt_obj(parent);
+ osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
+ }
}
static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
if (result)
return;
- if (attr->la_valid != 0) {
- result = osd_inode_setattr(info->oti_env, inode, attr);
- /*
- * The osd_inode_setattr() should always succeed here. The
- * only error that could be returned is EDQUOT when we are
- * trying to change the UID or GID of the inode. However, this
- * should not happen since quota enforcement is no longer
- * enabled on ldiskfs (lquota takes care of it).
- */
+ if (attr->la_valid != 0) {
+ result = osd_inode_setattr(info->oti_env, inode, attr);
+ /*
+ * The osd_inode_setattr() should always succeed here. The
+ * only error that could be returned is EDQUOT when we are
+ * trying to change the UID or GID of the inode. However, this
+ * should not happen since quota enforcement is no longer
+ * enabled on ldiskfs (lquota takes care of it).
+ */
LASSERTF(result == 0, "%d\n", result);
ll_dirty_inode(inode, I_DIRTY_DATASYNC);
- }
+ }
- attr->la_valid = valid;
+ attr->la_valid = valid;
}
/**
osd_trans_exec_op(env, th, OSD_OT_INSERT);
osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
- rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
+ rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle,
+ OI_CHECK_FLD, NULL);
osd_trans_exec_check(env, th, OSD_OT_INSERT);
return rc;
if (rc != 0)
RETURN(rc);
+ /* will help to find FID->ino mapping at dt_insert() */
+ rc = osd_idc_find_and_init(env, osd_obj2dev(osd_dt_obj(dt)),
+ osd_dt_obj(dt));
+
RETURN(rc);
}
/* data to be truncated */
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
0, oh, obj, true, NULL, false);
+ if (rc)
+ RETURN(rc);
+
+ /* will help to find FID->ino when this object is being
+ * added to PENDING/ */
+ rc = osd_idc_find_and_init(env, osd_obj2dev(obj), obj);
+
RETURN(rc);
}
const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
struct osd_object *obj = osd_dt_obj(dt);
struct osd_thread_info *info = osd_oti_get(env);
- int result;
+ int result, on_ost = 0;
ENTRY;
fid_to_ostid(fid, oi);
ostid_to_fid(tfid, oi, 0);
+ on_ost = 1;
result = osd_ea_fid_set(info, obj->oo_inode, tfid,
LMAC_FID_ON_OST, 0);
} else {
+ on_ost = fid_is_on_ost(info, osd_obj2dev(obj),
+ fid, OI_CHECK_FLD);
result = osd_ea_fid_set(info, obj->oo_inode, fid,
- fid_is_on_ost(info, osd_obj2dev(obj),
- fid, OI_CHECK_FLD) ?
- LMAC_FID_ON_OST : 0, 0);
+ on_ost ? LMAC_FID_ON_OST : 0,
+ 0);
}
if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
obj->oo_dt.do_body_ops = &osd_body_ops;
if (result == 0)
result = __osd_oi_insert(env, obj, fid, th);
+ /* a small optimization - dt_insert() isn't usually applied
+ * to OST objects, so we don't need to cache OI mapping for
+ * OST objects */
+ if (result == 0 && on_ost == 0) {
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ result = osd_idc_find_and_init(env, osd, obj);
+ LASSERT(result == 0);
+ }
+
LASSERT(ergo(result == 0,
dt_object_exists(dt) && !dt_object_remote(dt)));
LINVRNT(osd_invariant(obj));
struct osd_thandle *oh;
int rc = 0;
- if (!dt_object_exists(dt))
+ if (!dt_object_exists(dt) || obj->oo_destroyed)
return -ENOENT;
LINVRNT(osd_invariant(obj));
result = 0;
} else if (feat == &dt_directory_features) {
dt->do_index_ops = &osd_index_ea_ops;
- if (obj->oo_inode != NULL && S_ISDIR(obj->oo_inode->i_mode))
+ if (obj->oo_inode == NULL || S_ISDIR(obj->oo_inode->i_mode))
result = 0;
else
result = -ENOTDIR;
child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
child->d_fsdata = (void *)ldp;
ll_vfs_dq_init(pobj->oo_inode);
- rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
+ rc = osd_ldiskfs_add_entry(info, oth->ot_handle, child,
+ cinode, hlock);
if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_TYPE)) {
struct ldiskfs_dir_entry_2 *de;
struct buffer_head *bh;
if (dir->oo_compat_dot_created) {
result = -EEXIST;
} else {
- LASSERT(inode == parent_dir);
+ LASSERT(inode->i_ino == parent_dir->i_ino);
dir->oo_compat_dot_created = 1;
result = 0;
}
}
/**
- * Find the osd object for given fid.
- *
- * \param fid need to find the osd object having this fid
- *
- * \retval osd_object on success
- * \retval -ve on error
- */
-static struct osd_object *osd_object_find(const struct lu_env *env,
- struct dt_object *dt,
- const struct lu_fid *fid)
-{
- struct lu_device *ludev = dt->do_lu.lo_dev;
- struct osd_object *child = NULL;
- struct lu_object *luch;
- struct lu_object *lo;
-
- /*
- * at this point topdev might not exist yet
- * (i.e. MGS is preparing profiles). so we can
- * not rely on topdev and instead lookup with
- * our device passed as topdev. this can't work
- * if the object isn't cached yet (as osd doesn't
- * allocate lu_header). IOW, the object must be
- * in the cache, otherwise lu_object_alloc() crashes
- * -bzzz
- */
- luch = lu_object_find_at(env, ludev->ld_site->ls_top_dev == NULL ?
- ludev : ludev->ld_site->ls_top_dev,
- fid, NULL);
- if (!IS_ERR(luch)) {
- if (lu_object_exists(luch)) {
- lo = lu_object_locate(luch->lo_header, ludev->ld_type);
- if (lo != NULL)
- child = osd_obj(lo);
- else
- LU_OBJECT_DEBUG(D_ERROR, env, luch,
- "lu_object can't be located"
- DFID"\n", PFID(fid));
-
- if (child == NULL) {
- lu_object_put(env, luch);
- CERROR("Unable to get osd_object\n");
- child = ERR_PTR(-ENOENT);
- }
- } else {
- LU_OBJECT_DEBUG(D_ERROR, env, luch,
- "lu_object does not exists "DFID"\n",
- PFID(fid));
- lu_object_put(env, luch);
- child = ERR_PTR(-ENOENT);
- }
- } else {
- child = ERR_CAST(luch);
- }
-
- return child;
-}
-
-/**
* Put the osd object once done with it.
*
* \param obj osd object that needs to be put
{
struct osd_thandle *oh;
struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
- struct lu_fid *fid = (struct lu_fid *)rec;
+ struct dt_insert_rec *rec1 = (struct dt_insert_rec *)rec;
+ const struct lu_fid *fid = rec1->rec_fid;
int credits, rc = 0;
+ struct osd_idmap_cache *idc;
ENTRY;
LASSERT(!dt_object_remote(dt));
LASSERT(handle != NULL);
+ LASSERT(fid != NULL);
+ LASSERT(rec1->rec_type != 0);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
credits = osd_dto_credits_noquota[DTO_INDEX_INSERT];
- if (fid != NULL) {
- rc = osd_remote_fid(env, osd, fid);
- if (unlikely(rc < 0))
- RETURN(rc);
- if (rc > 0) {
- /* a reference to remote inode is represented by an
- * agent inode which we have to create */
- credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
- credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
- }
- rc = 0;
+
+ /* we can't call iget() while a transactions is running
+ * (this can lead to a deadlock), but we need to know
+ * inum and object type. so we find this information at
+ * declaration and cache in per-thread info */
+ idc = osd_idc_find_or_init(env, osd, fid);
+ if (IS_ERR(idc))
+ RETURN(PTR_ERR(idc));
+ if (idc->oic_remote) {
+ /* a reference to remote inode is represented by an
+ * agent inode which we have to create */
+ credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
+ credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
}
osd_trans_declare_op(env, oh, OSD_OT_INSERT, credits);
const struct lu_fid *fid = rec1->rec_fid;
const char *name = (const char *)key;
struct osd_thread_info *oti = osd_oti_get(env);
- struct osd_inode_id *id = &oti->oti_id;
struct inode *child_inode = NULL;
- struct osd_object *child = NULL;
+ struct osd_idmap_cache *idc;
int rc;
ENTRY;
LASSERTF(fid_is_sane(fid), "fid"DFID" is insane!\n", PFID(fid));
- rc = osd_remote_fid(env, osd, fid);
- if (rc < 0) {
- CERROR("%s: Can not find object "DFID" rc %d\n",
- osd_name(osd), PFID(fid), rc);
- RETURN(rc);
+ idc = osd_idc_find(env, osd, fid);
+ if (unlikely(idc == NULL)) {
+ /* this dt_insert() wasn't declared properly, so
+ * FID is missing in OI cache. we better do not
+ * lookup FID in FLDB/OI and don't risk to deadlock,
+ * but in some special cases (lfsck testing, etc)
+ * it's much simpler than fixing a caller */
+ CERROR("%s: "DFID" wasn't declared for insert\n",
+ osd_name(osd), PFID(fid));
+ dump_stack();
+ idc = osd_idc_find_or_init(env, osd, fid);
+ if (IS_ERR(idc))
+ RETURN(PTR_ERR(idc));
}
- if (rc == 1) {
+ if (idc->oic_remote) {
/* Insert remote entry */
if (strcmp(name, dotdot) == 0 && strlen(name) == 2) {
struct osd_mdobj_map *omm = osd->od_mdt_map;
}
} else {
/* Insert local entry */
- child = osd_object_find(env, dt, fid);
- if (IS_ERR(child)) {
- CERROR("%s: Can not find object "DFID"%u:%u: rc = %d\n",
- osd_name(osd), PFID(fid),
- id->oii_ino, id->oii_gen,
- (int)PTR_ERR(child));
- RETURN(PTR_ERR(child));
+ if (unlikely(idc->oic_lid.oii_ino == 0)) {
+ /* for a reason OI cache wasn't filled properly */
+ CERROR("%s: OIC for "DFID" isn't filled\n",
+ osd_name(osd), PFID(fid));
+ RETURN(-EINVAL);
+ }
+ child_inode = oti->oti_inode;
+ if (unlikely(child_inode == NULL)) {
+ struct ldiskfs_inode_info *lii;
+ OBD_ALLOC_PTR(lii);
+ if (lii == NULL)
+ RETURN(-ENOMEM);
+ child_inode = oti->oti_inode = &lii->vfs_inode;
}
- child_inode = igrab(child->oo_inode);
+ child_inode->i_sb = osd_sb(osd);
+ child_inode->i_ino = idc->oic_lid.oii_ino;
+ child_inode->i_mode = rec1->rec_type & S_IFMT;
}
rc = osd_ea_add_rec(env, obj, child_inode, name, fid, th);
CDEBUG(D_INODE, "parent %lu insert %s:%lu rc = %d\n",
obj->oo_inode->i_ino, name, child_inode->i_ino, rc);
- iput(child_inode);
- if (child != NULL)
- osd_object_put(env, child);
+ if (child_inode && child_inode != oti->oti_inode)
+ iput(child_inode);
LASSERT(osd_invariant(obj));
osd_trans_exec_check(env, th, OSD_OT_INSERT);
RETURN(rc);
* \retval 0 on success
* \retval 1 on buffer full
*/
+#ifdef HAVE_FILLDIR_USE_CTX
+static int osd_ldiskfs_filldir(struct dir_context *buf,
+ const char *name, int namelen,
+#else
static int osd_ldiskfs_filldir(void *buf, const char *name, int namelen,
+#endif
loff_t offset, __u64 ino,
unsigned d_type)
{
- struct osd_it_ea *it = ((struct osd_filldir_cbs *)buf)->it;
+ struct osd_it_ea *it =
+ ((struct osd_filldir_cbs *)buf)->it;
struct osd_object *obj = it->oie_obj;
struct osd_it_ea_dirent *ent = it->oie_dirent;
struct lu_fid *fid = &ent->oied_fid;
struct ldiskfs_dentry_param *ldp;
int namelen = dentry->d_name.len;
int rc;
+ struct osd_thread_info *info = osd_oti_get(env);
ENTRY;
if (!LDISKFS_HAS_INCOMPAT_FEATURE(inode->i_sb,
osd_get_ldiskfs_dirent_param(ldp, fid);
dentry->d_fsdata = (void *)ldp;
ll_vfs_dq_init(dir);
- rc = osd_ldiskfs_add_entry(jh, dentry, inode, hlock);
+ rc = osd_ldiskfs_add_entry(info, jh, dentry, inode, hlock);
/* It is too bad, we cannot reinsert the name entry back.
* That means we lose it! */
if (rc != 0)
{
struct osd_thread_info *info = data;
struct ldiskfs_inode_info *lli = LDISKFS_I(info->oti_inode);
+ struct osd_idmap_cache *idc = info->oti_ins_cache;
if (info->oti_inode != NULL)
OBD_FREE_PTR(lli);
lu_buf_free(&info->oti_iobuf.dr_pg_buf);
lu_buf_free(&info->oti_iobuf.dr_bl_buf);
lu_buf_free(&info->oti_big_buf);
+ if (idc != NULL) {
+ LASSERT(info->oti_ins_cache_size > 0);
+ OBD_FREE(idc, sizeof(*idc) * info->oti_ins_cache_size);
+ info->oti_ins_cache = NULL;
+ info->oti_ins_cache_size = 0;
+ }
OBD_FREE_PTR(info);
}
struct osd_thread_info *info = osd_oti_get(env);
struct lu_fid *fid = &info->oti_fid;
struct inode *inode;
- int rc = 0, force_over_128tb = 0;
+ int rc = 0, force_over_256tb = 0;
ENTRY;
if (o->od_mnt != NULL)
RETURN(-EINVAL);
}
#endif
- if (opts != NULL && strstr(opts, "force_over_128tb") != NULL)
- force_over_128tb = 1;
+ if (opts != NULL && strstr(opts, "force_over_128tb") != NULL) {
+ CWARN("force_over_128tb option is depricated."
+ "Filesystems less then 256TB can be created without any"
+ "force options. Use force_over_256tb option for"
+ "filesystems greather then 256TB.\n");
+ }
- __page = alloc_page(GFP_IOFS);
+ if (opts != NULL && strstr(opts, "force_over_256tb") != NULL)
+ force_over_256tb = 1;
+
+ __page = alloc_page(GFP_KERNEL);
if (__page == NULL)
GOTO(out, rc = -ENOMEM);
page = (unsigned long)page_address(__page);
"noextents",
/* strip out option we processed in osd */
"bigendian_extents",
- "force_over_128tb",
+#if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(3,0,53,0)
+#warning "remove force_over_128 option"
+#else
+ "force_over_128tb (deprecated)",
+#endif
+ "force_over_256tb",
NULL
};
strcat(options, opts);
GOTO(out, rc);
}
- if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) > (8ULL << 32) &&
- force_over_128tb == 0) {
+ if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) > (64ULL << 30) &&
+ force_over_256tb == 0) {
CERROR("%s: device %s LDISKFS does not support filesystems "
- "greater than 128TB and can cause data corruption. "
- "Use \"force_over_128tb\" mount option to override.\n",
+ "greater than 256TB and can cause data corruption. "
+ "Use \"force_over_256tb\" mount option to override.\n",
name, dev);
GOTO(out, rc = -EINVAL);
}