* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <ldiskfs/ldiskfs.h>
#include <ldiskfs/xattr.h>
+#include <ldiskfs/ldiskfs_extents.h>
#undef ENTRY
/*
* struct OBD_{ALLOC,FREE}*()
#include <lustre_linkea.h>
int ldiskfs_pdo = 1;
-CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
- "ldiskfs with parallel directory operations");
+module_param(ldiskfs_pdo, int, 0644);
+MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
int ldiskfs_track_declares_assert;
-CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
- "LBUG during tracking of declares");
+module_param(ldiskfs_track_declares_assert, int, 0644);
+MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
/* Slab to allocate dynlocks */
struct kmem_cache *dynlock_cachep;
}
/*
+ * the following set of functions are used to maintain per-thread
+ * cache of FID->ino mapping. this mechanism is needed to resolve
+ * FID to inode at dt_insert() which in turn stores ino in the
+ * directory entries to keep ldiskfs compatible with ext[34].
+ * due to locking-originated restrictions we can't lookup ino
+ * using LU cache (deadlock is possible). lookup using OI is quite
+ * expensive. so instead we maintain this cache and methods like
+ * dt_create() fill it. so in the majority of cases dt_insert() is
+ * able to find needed mapping in lockless manner.
+ */
+static struct osd_idmap_cache *
+osd_idc_find(const struct lu_env *env, struct osd_device *osd,
+ const struct lu_fid *fid)
+{
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_idmap_cache *idc = oti->oti_ins_cache;
+ int i;
+ for (i = 0; i < oti->oti_ins_cache_used; i++) {
+ if (!lu_fid_eq(&idc[i].oic_fid, fid))
+ continue;
+ if (idc[i].oic_dev != osd)
+ continue;
+
+ return idc + i;
+ }
+
+ return NULL;
+}
+
+static struct osd_idmap_cache *
+osd_idc_add(const struct lu_env *env, struct osd_device *osd,
+ const struct lu_fid *fid)
+{
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_idmap_cache *idc;
+ int i;
+
+ if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
+ i = oti->oti_ins_cache_size * 2;
+ if (i == 0)
+ i = OSD_INS_CACHE_SIZE;
+ OBD_ALLOC(idc, sizeof(*idc) * i);
+ if (idc == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (oti->oti_ins_cache != NULL) {
+ memcpy(idc, oti->oti_ins_cache,
+ oti->oti_ins_cache_used * sizeof(*idc));
+ OBD_FREE(oti->oti_ins_cache,
+ oti->oti_ins_cache_used * sizeof(*idc));
+ }
+ oti->oti_ins_cache = idc;
+ oti->oti_ins_cache_size = i;
+ }
+
+ idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
+ idc->oic_fid = *fid;
+ idc->oic_dev = osd;
+ idc->oic_lid.oii_ino = 0;
+ idc->oic_lid.oii_gen = 0;
+ idc->oic_remote = 0;
+
+ return idc;
+}
+
+/*
+ * lookup mapping for the given fid in the cache, initialize a
+ * new one if not found. the initialization checks whether the
+ * object is local or remote. for local objects, OI is used to
+ * learn ino/generation. the function is used when the caller
+ * has no information about the object, e.g. at dt_insert().
+ */
+static struct osd_idmap_cache *
+osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
+ const struct lu_fid *fid)
+{
+ struct osd_idmap_cache *idc;
+ int rc;
+
+ idc = osd_idc_find(env, osd, fid);
+ LASSERT(!IS_ERR(idc));
+ if (idc != NULL)
+ return idc;
+
+ /* new mapping is needed */
+ idc = osd_idc_add(env, osd, fid);
+ if (IS_ERR(idc))
+ return idc;
+
+ /* initialize it */
+ rc = osd_remote_fid(env, osd, fid);
+ if (unlikely(rc < 0))
+ return ERR_PTR(rc);
+
+ if (rc == 0) {
+ /* the object is local, lookup in OI */
+ /* XXX: probably cheaper to lookup in LU first? */
+ rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
+ &idc->oic_lid, 0);
+ if (unlikely(rc < 0)) {
+ CERROR("can't lookup: rc = %d\n", rc);
+ return ERR_PTR(rc);
+ }
+ } else {
+ /* the object is remote */
+ idc->oic_remote = 1;
+ }
+
+ return idc;
+}
+
+/*
+ * lookup mapping for given FID and fill it from the given object.
+ * the object is lolcal by definition.
+ */
+static int osd_idc_find_and_init(const struct lu_env *env,
+ struct osd_device *osd,
+ struct osd_object *obj)
+{
+ const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
+ struct osd_idmap_cache *idc;
+
+ idc = osd_idc_find(env, osd, fid);
+ LASSERT(!IS_ERR(idc));
+ if (idc != NULL) {
+ if (obj->oo_inode == NULL)
+ return 0;
+ if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
+ LASSERT(idc->oic_lid.oii_ino == 0);
+ idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
+ idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
+ }
+ return 0;
+ }
+
+ /* new mapping is needed */
+ idc = osd_idc_add(env, osd, fid);
+ if (IS_ERR(idc))
+ return PTR_ERR(idc);
+
+ if (obj->oo_inode != NULL) {
+ idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
+ idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
+ }
+ return 0;
+}
+
+/*
* OSD object methods.
*/
init_rwsem(&mo->oo_sem);
init_rwsem(&mo->oo_ext_idx_sem);
spin_lock_init(&mo->oo_guard);
+ INIT_LIST_HEAD(&mo->oo_xattr_list);
return l;
} else {
return NULL;
return inode;
}
+int osd_ldiskfs_add_entry(struct osd_thread_info *info,
+ handle_t *handle, struct dentry *child,
+ struct inode *inode, struct htree_lock *hlock)
+{
+ int rc, rc2;
+
+ rc = __ldiskfs_add_entry(handle, child, inode, hlock);
+ if (rc == -ENOBUFS || rc == -ENOSPC) {
+ char fidbuf[FID_LEN + 1];
+ struct lustre_mdt_attrs lma;
+ struct lu_fid fid = { };
+ char *errstr;
+ struct dentry *p_dentry = child->d_parent;
+
+ rc2 = osd_get_lma(info, p_dentry->d_inode, p_dentry,
+ &lma);
+ if (rc2 == 0) {
+ fid = lma.lma_self_fid;
+ snprintf(fidbuf, sizeof(fidbuf), DFID, PFID(&fid));
+ } else if (rc2 == -ENODATA) {
+ if (unlikely(p_dentry->d_inode ==
+ inode->i_sb->s_root->d_inode))
+ lu_local_obj_fid(&fid, OSD_FS_ROOT_OID);
+ else if (info->oti_dev && !info->oti_dev->od_is_ost &&
+ fid_seq_is_mdt0(fid_seq(&fid)))
+ lu_igif_build(&fid, p_dentry->d_inode->i_ino,
+ p_dentry->d_inode->i_generation);
+ snprintf(fidbuf, sizeof(fidbuf), DFID, PFID(&fid));
+ } else {
+ snprintf(fidbuf, FID_LEN, "%s", "unknown");
+ }
+
+ if (rc == -ENOSPC)
+ errstr = "has reached";
+ else
+ errstr = "is approaching";
+ CWARN("%.16s: directory (inode: %lu FID: %s) %s maximum entry limit\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ p_dentry->d_inode->i_ino, fidbuf, errstr);
+ /* ignore such error now */
+ if (rc == -ENOBUFS)
+ rc = 0;
+ }
+ return rc;
+}
+
+
static struct inode *
osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
struct osd_inode_id *id, struct lu_fid *fid)
struct osd_device *dev;
struct osd_idmap_cache *oic;
struct osd_inode_id *id;
- struct inode *inode;
+ struct inode *inode = NULL;
struct osd_scrub *scrub;
struct scrub_file *sf;
- int result;
- int saved = 0;
- bool cached = true;
- bool triggered = false;
+ __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT |
+ SS_AUTO_FULL;
+ int result = 0;
+ int rc1 = 0;
+ bool cached = true;
+ bool remote = false;
ENTRY;
LINVRNT(osd_invariant(obj));
LASSERT(info);
oic = &info->oti_cache;
- if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
+ if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
RETURN(-ENOENT);
/* For the object is created as locking anchor, or for the object to
if (result == -EREMCHG) {
trigger:
- if (unlikely(triggered))
- GOTO(out, result = saved);
-
- triggered = true;
- if (thread_is_running(&scrub->os_thread)) {
- result = -EINPROGRESS;
- } else if (!dev->od_noscrub) {
- result = osd_scrub_start(dev, SS_AUTO_FULL |
- SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT);
- LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
- "for "DFID", rc = %d [1]\n",
- osd_name(dev), PFID(fid), result);
- if (result == 0 || result == -EALREADY)
- result = -EINPROGRESS;
- else
- result = -EREMCHG;
- }
-
- if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD))
- GOTO(out, result);
-
/* We still have chance to get the valid inode: for the
* object which is referenced by remote name entry, the
* object on the local MDT will be linked under the dir
* only happened for the RPC from other MDT during the
* OI scrub, or for the client side RPC with FID only,
* such as FID to path, or from old connected client. */
- saved = result;
- result = osd_lookup_in_remote_parent(info, dev,
- fid, id);
- if (result == 0) {
- cached = true;
- goto iget;
+ if (!remote &&
+ !fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
+ rc1 = osd_lookup_in_remote_parent(info, dev,
+ fid, id);
+ if (rc1 == 0) {
+ remote = true;
+ cached = true;
+ flags |= SS_AUTO_PARTIAL;
+ flags &= ~SS_AUTO_FULL;
+ goto iget;
+ }
}
- result = saved;
+ if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_partial_scan &&
+ !scrub->os_in_join) {
+ goto join;
+ } else {
+ if (inode != NULL && !IS_ERR(inode)) {
+ LASSERT(remote);
+
+ osd_add_oi_cache(info, dev, id,
+ fid);
+ osd_oii_insert(dev, oic, true);
+ } else {
+ result = -EINPROGRESS;
+ }
+ }
+ } else if (!dev->od_noscrub) {
+
+join:
+ rc1 = osd_scrub_start(dev, flags);
+ LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
+ "for the "DFID" with flags 0x%x,"
+ " rc = %d\n", osd_name(dev),
+ PFID(fid), flags, rc1);
+ if (rc1 == 0 || rc1 == -EALREADY) {
+ if (inode != NULL && !IS_ERR(inode)) {
+ LASSERT(remote);
+
+ osd_add_oi_cache(info, dev, id,
+ fid);
+ osd_oii_insert(dev, oic, true);
+ } else {
+ result = -EINPROGRESS;
+ }
+ } else {
+ result = -EREMCHG;
+ }
+ } else {
+ result = -EREMCHG;
+ }
}
- GOTO(out, result);
+ if (inode == NULL || IS_ERR(inode))
+ GOTO(out, result);
+ } else if (remote) {
+ goto trigger;
}
obj->oo_inode = inode;
}
iput(inode);
+ inode = NULL;
obj->oo_inode = NULL;
if (result != -EREMCHG)
GOTO(out, result);
result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
obj->oo_dt.do_body_ops = &osd_body_ops_new;
- if (result == 0 && obj->oo_inode != NULL)
+ if (result == 0 && obj->oo_inode != NULL) {
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct lustre_mdt_attrs *lma = &oti->oti_mdt_attrs;
+
osd_object_init0(obj);
+ result = osd_get_lma(oti, obj->oo_inode,
+ &oti->oti_obj_dentry, lma);
+ if (result == 0) {
+ /* Convert LMAI flags to lustre LMA flags
+ * and cache it to oo_lma_flags */
+ obj->oo_lma_flags =
+ lma_to_lustre_flags(lma->lma_incompat);
+ } else if (result == -ENODATA) {
+ result = 0;
+ }
+ }
LINVRNT(osd_invariant(obj));
return result;
}
+/* The first part of oxe_buf is xattr name, and is '\0' terminated.
+ * The left part is for value, binary mode. */
+struct osd_xattr_entry {
+ struct list_head oxe_list;
+ size_t oxe_len;
+ size_t oxe_namelen;
+ bool oxe_exist;
+ struct rcu_head oxe_rcu;
+ char oxe_buf[0];
+};
+
+static struct osd_xattr_entry *osd_oxc_lookup(struct osd_object *obj,
+ const char *name,
+ size_t namelen)
+{
+ struct osd_xattr_entry *oxe;
+
+ list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
+ if (namelen == oxe->oxe_namelen &&
+ strncmp(name, oxe->oxe_buf, namelen) == 0)
+ return oxe;
+ }
+
+ return NULL;
+}
+
+static int osd_oxc_get(struct osd_object *obj, const char *name,
+ struct lu_buf *buf)
+{
+ struct osd_xattr_entry *oxe;
+ size_t vallen;
+ ENTRY;
+
+ rcu_read_lock();
+ oxe = osd_oxc_lookup(obj, name, strlen(name));
+ if (oxe == NULL) {
+ rcu_read_unlock();
+ RETURN(-ENOENT);
+ }
+
+ if (!oxe->oxe_exist) {
+ rcu_read_unlock();
+ RETURN(-ENODATA);
+ }
+
+ vallen = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
+ LASSERT(vallen > 0);
+
+ if (buf->lb_buf == NULL) {
+ rcu_read_unlock();
+ RETURN(vallen);
+ }
+
+ if (buf->lb_len < vallen) {
+ rcu_read_unlock();
+ RETURN(-ERANGE);
+ }
+
+ memcpy(buf->lb_buf, oxe->oxe_buf + oxe->oxe_namelen + 1, vallen);
+ rcu_read_unlock();
+
+ RETURN(vallen);
+}
+
+static void osd_oxc_free(struct rcu_head *head)
+{
+ struct osd_xattr_entry *oxe;
+
+ oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
+ OBD_FREE(oxe, oxe->oxe_len);
+}
+
+static inline void __osd_oxc_del(struct osd_object *obj, const char *name)
+{
+ struct osd_xattr_entry *oxe;
+
+ oxe = osd_oxc_lookup(obj, name, strlen(name));
+ if (oxe != NULL) {
+ list_del(&oxe->oxe_list);
+ call_rcu(&oxe->oxe_rcu, osd_oxc_free);
+ }
+}
+
+static void osd_oxc_add(struct osd_object *obj, const char *name,
+ const char *buf, int buflen)
+{
+ struct osd_xattr_entry *oxe;
+ size_t namelen = strlen(name);
+ size_t len = sizeof(*oxe) + namelen + 1 + buflen;
+
+ OBD_ALLOC(oxe, len);
+ if (oxe == NULL)
+ return;
+
+ INIT_LIST_HEAD(&oxe->oxe_list);
+ oxe->oxe_len = len;
+ oxe->oxe_namelen = namelen;
+ memcpy(oxe->oxe_buf, name, namelen);
+ if (buflen > 0) {
+ LASSERT(buf != NULL);
+ memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
+ oxe->oxe_exist = true;
+ } else {
+ oxe->oxe_exist = false;
+ }
+
+ /* this should be rarely called, just remove old and add new */
+ spin_lock(&obj->oo_guard);
+ __osd_oxc_del(obj, name);
+ list_add_tail(&oxe->oxe_list, &obj->oo_xattr_list);
+ spin_unlock(&obj->oo_guard);
+}
+
+static void osd_oxc_del(struct osd_object *obj, const char *name)
+{
+ spin_lock(&obj->oo_guard);
+ __osd_oxc_del(obj, name);
+ spin_unlock(&obj->oo_guard);
+}
+
+static void osd_oxc_fini(struct osd_object *obj)
+{
+ struct osd_xattr_entry *oxe, *next;
+
+ list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
+ list_del(&oxe->oxe_list);
+ OBD_FREE(oxe, oxe->oxe_len);
+ }
+}
+
/*
* Concurrency: no concurrent access is possible that late in object
* life-cycle.
LINVRNT(osd_invariant(obj));
+ osd_oxc_fini(obj);
dt_object_fini(&obj->oo_dt);
if (obj->oo_hl_head != NULL)
ldiskfs_htree_lock_head_free(obj->oo_hl_head);
OBD_FREE_PTR(oh);
}
+#ifndef HAVE_SB_START_WRITE
+# define sb_start_write(sb) do {} while (0)
+# define sb_end_write(sb) do {} while (0)
+#endif
+
static struct thandle *osd_trans_create(const struct lu_env *env,
struct dt_device *d)
{
/* on pending IO in this thread should left from prev. request */
LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
- th = ERR_PTR(-ENOMEM);
+ sb_start_write(osd_sb(osd_dt_dev(d)));
+
OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
if (oh != NULL) {
oh->ot_quota_trans = &oti->oti_quota_trans;
sizeof(oti->oti_declare_ops_cred));
memset(oti->oti_declare_ops_used, 0,
sizeof(oti->oti_declare_ops_used));
+ } else {
+ sb_end_write(osd_sb(osd_dt_dev(d)));
+ th = ERR_PTR(-ENOMEM);
}
RETURN(th);
}
static unsigned long last_printed;
static int last_credits;
- CWARN("%.16s: too many transaction credits (%d > %d)\n",
- LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
- oh->ot_credits,
- osd_journal(dev)->j_max_transaction_buffers);
-
- osd_trans_dump_creds(env, th);
-
+ /* don't make noise on a tiny testing systems
+ * actual credits misuse will be caught anyway */
if (last_credits != oh->ot_credits &&
time_after(jiffies, last_printed +
- msecs_to_jiffies(60 * MSEC_PER_SEC))) {
+ msecs_to_jiffies(60 * MSEC_PER_SEC)) &&
+ osd_transaction_size(dev) > 512) {
+ osd_trans_dump_creds(env, th);
libcfs_debug_dumpstack(NULL);
last_credits = oh->ot_credits;
last_printed = jiffies;
oh = container_of0(th, struct osd_thandle, ot_super);
+ /* reset OI cache for safety */
+ oti->oti_ins_cache_used = 0;
+
remove_agents = oh->ot_remove_agents;
qtrans = oh->ot_quota_trans;
if (unlikely(remove_agents != 0))
osd_process_scheduled_agent_removals(env, osd);
+ sb_end_write(osd_sb(osd));
+
RETURN(rc);
}
static void osd_object_release(const struct lu_env *env,
struct lu_object *l)
{
+ struct osd_object *o = osd_obj(l);
+ /* nobody should be releasing a non-destroyed object with nlink=0
+ * the API allows this, but ldiskfs doesn't like and then report
+ * this inode as deleted */
+ if (unlikely(!o->oo_destroyed && o->oo_inode && o->oo_inode->i_nlink == 0))
+ LBUG();
}
/*
d ? d->id_ops->id_name : "plain");
}
-#define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
-
/*
* Concurrency: shouldn't matter.
*/
int osd_statfs(const struct lu_env *env, struct dt_device *d,
struct obd_statfs *sfs)
{
- struct osd_device *osd = osd_dt_dev(d);
- struct super_block *sb = osd_sb(osd);
- struct kstatfs *ksfs;
- int result = 0;
+ struct osd_device *osd = osd_dt_dev(d);
+ struct super_block *sb = osd_sb(osd);
+ struct kstatfs *ksfs;
+ __u64 reserved;
+ int result = 0;
if (unlikely(osd->od_mnt == NULL))
return -EINPROGRESS;
ksfs = &osd_oti_get(env)->oti_ksfs;
}
- spin_lock(&osd->od_osfs_lock);
result = sb->s_op->statfs(sb->s_root, ksfs);
- if (likely(result == 0)) { /* N.B. statfs can't really fail */
- statfs_pack(sfs, ksfs);
- if (unlikely(sb->s_flags & MS_RDONLY))
- sfs->os_state = OS_STATE_READONLY;
- if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
- LDISKFS_FEATURE_INCOMPAT_EXTENTS))
- sfs->os_maxbytes = sb->s_maxbytes;
- else
- sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
- }
- spin_unlock(&osd->od_osfs_lock);
+ if (result)
+ goto out;
+
+ statfs_pack(sfs, ksfs);
+ if (unlikely(sb->s_flags & MS_RDONLY))
+ sfs->os_state = OS_STATE_READONLY;
+ if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
+ LDISKFS_FEATURE_INCOMPAT_EXTENTS))
+ sfs->os_maxbytes = sb->s_maxbytes;
+ else
+ sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
- if (unlikely(env == NULL))
- OBD_FREE_PTR(ksfs);
+ /*
+ * Reserve some space so to avoid fragmenting the filesystem too much.
+ * Fragmentation not only impacts performance, but can also increase
+ * metadata overhead significantly, causing grant calculation to be
+ * wrong.
+ *
+ * Reserve 0.78% of total space, at least 8MB for small filesystems.
+ */
+ CLASSERT(OSD_STATFS_RESERVED > LDISKFS_MAX_BLOCK_SIZE);
+ reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
+ if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
+ reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
- /* Reserve a small amount of space for local objects like last_rcvd,
- * llog, quota files, ... */
- if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
- sfs->os_bavail = 0;
- } else {
- sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
- /** Take out metadata overhead for indirect blocks */
- sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
- }
+ sfs->os_blocks -= reserved;
+ sfs->os_bfree -= min(reserved, sfs->os_bfree);
+ sfs->os_bavail -= min(reserved, sfs->os_bavail);
- return result;
+out:
+ if (unlikely(env == NULL))
+ OBD_FREE_PTR(ksfs);
+ return result;
}
/**
*/
param->ddp_max_name_len = LDISKFS_NAME_LEN;
param->ddp_max_nlink = LDISKFS_LINK_MAX;
- param->ddp_block_shift = sb->s_blocksize_bits;
+ param->ddp_symlink_max = sb->s_blocksize;
param->ddp_mount_type = LDD_MT_LDISKFS;
if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EXTENTS))
param->ddp_maxbytes = sb->s_maxbytes;
else
param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
- /* Overhead estimate should be fairly accurate, so we really take a tiny
- * error margin which also avoids fragmenting the filesystem too much */
- param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
/* inode are statically allocated, so per-inode space consumption
* is the space consumed by the directory entry */
param->ddp_inodespace = PER_OBJ_USAGE;
- /* per-fragment overhead to be used by the client code */
- param->ddp_grant_frag = 6 * LDISKFS_BLOCK_SIZE(sb);
- param->ddp_mntopts = 0;
+ /* EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
+ * = 128MB) which is unlikely to be hit in real life. Report a smaller
+ * maximum length to not under count the actual number of extents
+ * needed for writing a file. */
+ param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 2;
+ /* worst-case extent insertion metadata overhead */
+ param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
+ param->ddp_mntopts = 0;
if (test_opt(sb, XATTR_USER))
param->ddp_mntopts |= MNTOPT_USERXATTR;
if (test_opt(sb, POSIX_ACL))
#ifdef HAVE_DEV_SET_RDONLY
CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
+ if (sb->s_op->freeze_fs) {
+ rc = sb->s_op->freeze_fs(sb);
+ if (rc)
+ goto out;
+ }
+
if (jdev && (jdev != dev)) {
CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
(long)jdev);
}
CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
dev_set_rdonly(dev);
-#else
- CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
- osd_dt_dev(d)->od_svname, (long)dev, rc);
+
+ if (sb->s_op->unfreeze_fs)
+ sb->s_op->unfreeze_fs(sb);
+
+out:
#endif
+ if (rc)
+ CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
+ osd_dt_dev(d)->od_svname, (long)dev, rc);
+
RETURN(rc);
}
return t;
}
-
static void osd_inode_getattr(const struct lu_env *env,
struct inode *inode, struct lu_attr *attr)
{
attr->la_blocks = inode->i_blocks;
attr->la_uid = i_uid_read(inode);
attr->la_gid = i_gid_read(inode);
- attr->la_flags = LDISKFS_I(inode)->i_flags;
+ attr->la_flags = ll_inode_to_ext_flags(inode->i_flags);
attr->la_nlink = inode->i_nlink;
attr->la_rdev = inode->i_rdev;
attr->la_blksize = 1 << inode->i_blkbits;
{
struct osd_object *obj = osd_dt_obj(dt);
- if (!dt_object_exists(dt))
+ if (unlikely(!dt_object_exists(dt)))
+ return -ENOENT;
+ if (unlikely(obj->oo_destroyed))
return -ENOENT;
LASSERT(!dt_object_remote(dt));
spin_lock(&obj->oo_guard);
osd_inode_getattr(env, obj->oo_inode, attr);
+ if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
+ attr->la_flags |= LUSTRE_ORPHAN_FL;
spin_unlock(&obj->oo_guard);
+
return 0;
}
osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
+ osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
+
if (attr == NULL || obj->oo_inode == NULL)
RETURN(rc);
if (bits == 0)
return 0;
- if (bits & LA_ATIME)
- inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
- if (bits & LA_CTIME)
- inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
- if (bits & LA_MTIME)
- inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
- if (bits & LA_SIZE) {
- LDISKFS_I(inode)->i_disksize = attr->la_size;
- i_size_write(inode, attr->la_size);
- }
+ if (bits & LA_ATIME)
+ inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
+ if (bits & LA_CTIME)
+ inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
+ if (bits & LA_MTIME)
+ inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
+ if (bits & LA_SIZE) {
+ LDISKFS_I(inode)->i_disksize = attr->la_size;
+ i_size_write(inode, attr->la_size);
+ }
-#if 0
- /* OSD should not change "i_blocks" which is used by quota.
- * "i_blocks" should be changed by ldiskfs only. */
- if (bits & LA_BLOCKS)
- inode->i_blocks = attr->la_blocks;
-#endif
+ /* OSD should not change "i_blocks" which is used by quota.
+ * "i_blocks" should be changed by ldiskfs only. */
if (bits & LA_MODE)
inode->i_mode = (inode->i_mode & S_IFMT) |
(attr->la_mode & ~S_IFMT);
if (bits & LA_RDEV)
inode->i_rdev = attr->la_rdev;
- if (bits & LA_FLAGS) {
- /* always keep S_NOCMTIME */
- inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
- S_NOCMTIME;
- }
- return 0;
+ if (bits & LA_FLAGS) {
+ /* always keep S_NOCMTIME */
+ inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
+ S_NOCMTIME;
+ }
+ return 0;
}
static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
spin_lock(&obj->oo_guard);
rc = osd_inode_setattr(env, inode, attr);
spin_unlock(&obj->oo_guard);
+ if (rc != 0)
+ GOTO(out, rc);
- if (!rc)
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+
+ if (!(attr->la_valid & LA_FLAGS))
+ GOTO(out, rc);
+
+ /* Let's check if there are extra flags need to be set into LMA */
+ if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ lma->lma_incompat |=
+ lustre_to_lma_flags(attr->la_flags);
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
+ lma, sizeof(*lma), XATTR_REPLACE);
+ if (rc != 0) {
+ struct osd_device *osd = osd_obj2dev(obj);
+ CWARN("%s: set "DFID" lma flags %u failed: rc = %d\n",
+ osd_name(osd), PFID(lu_object_fid(&dt->do_lu)),
+ lma->lma_incompat, rc);
+ } else {
+ obj->oo_lma_flags =
+ attr->la_flags & LUSTRE_LMA_FL_MASKS;
+ }
+ osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
+ }
+out:
osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
return rc;
ah->dah_parent = parent;
ah->dah_mode = child_mode;
+
+ if (parent != NULL && !dt_object_remote(parent)) {
+ /* will help to find FID->ino at dt_insert("..") */
+ struct osd_object *pobj = osd_dt_obj(parent);
+ osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
+ }
}
static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
if (result)
return;
- if (attr->la_valid != 0) {
- result = osd_inode_setattr(info->oti_env, inode, attr);
- /*
- * The osd_inode_setattr() should always succeed here. The
- * only error that could be returned is EDQUOT when we are
- * trying to change the UID or GID of the inode. However, this
- * should not happen since quota enforcement is no longer
- * enabled on ldiskfs (lquota takes care of it).
- */
+ if (attr->la_valid != 0) {
+ result = osd_inode_setattr(info->oti_env, inode, attr);
+ /*
+ * The osd_inode_setattr() should always succeed here. The
+ * only error that could be returned is EDQUOT when we are
+ * trying to change the UID or GID of the inode. However, this
+ * should not happen since quota enforcement is no longer
+ * enabled on ldiskfs (lquota takes care of it).
+ */
LASSERTF(result == 0, "%d\n", result);
ll_dirty_inode(inode, I_DIRTY_DATASYNC);
- }
+ }
- attr->la_valid = valid;
+ attr->la_valid = valid;
}
/**
osd_trans_exec_op(env, th, OSD_OT_INSERT);
osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
- rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
+ rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle,
+ OI_CHECK_FLD, NULL);
osd_trans_exec_check(env, th, OSD_OT_INSERT);
return rc;
if (rc != 0)
RETURN(rc);
+ /* will help to find FID->ino mapping at dt_insert() */
+ rc = osd_idc_find_and_init(env, osd_obj2dev(osd_dt_obj(dt)),
+ osd_dt_obj(dt));
+
RETURN(rc);
}
int rc;
ENTRY;
+ if (inode == NULL)
+ RETURN(-ENOENT);
+
oh = container_of0(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- LASSERT(inode);
osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
/* data to be truncated */
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
0, oh, obj, true, NULL, false);
+ if (rc)
+ RETURN(rc);
+
+ /* will help to find FID->ino when this object is being
+ * added to PENDING/ */
+ rc = osd_idc_find_and_init(env, osd_obj2dev(obj), obj);
+
RETURN(rc);
}
{
struct ldiskfs_dentry_param *dot_ldp;
struct ldiskfs_dentry_param *dot_dot_ldp;
+ __u32 saved_nlink = dir->i_nlink;
+ int rc;
dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
dot_ldp->edp_magic = 0;
- return ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
- dir, dot_ldp, dot_dot_ldp);
+ rc = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
+ dir, dot_ldp, dot_dot_ldp);
+ /* The ldiskfs_add_dot_dotdot() may dir->i_nlink as 2, then
+ * the subseqent ref_add() will increase the dir->i_nlink
+ * as 3. That is incorrect for new created directory.
+ *
+ * It looks like hack, because we want to make the OSD API
+ * to be order-independent for new created directory object
+ * between dt_insert(..) and ref_add() operations.
+ *
+ * Here, we only restore the in-RAM dir-inode's nlink attr,
+ * becuase if the nlink attr is not 2, then there will be
+ * ref_add() called following the dt_insert(..), such call
+ * will make both the in-RAM and on-disk dir-inode's nlink
+ * attr to be set as 2. LU-7447 */
+ set_nlink(dir, saved_nlink);
+ return rc;
}
/**
const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
struct osd_object *obj = osd_dt_obj(dt);
struct osd_thread_info *info = osd_oti_get(env);
- int result;
+ int result, on_ost = 0;
ENTRY;
fid_to_ostid(fid, oi);
ostid_to_fid(tfid, oi, 0);
+ on_ost = 1;
result = osd_ea_fid_set(info, obj->oo_inode, tfid,
LMAC_FID_ON_OST, 0);
} else {
+ on_ost = fid_is_on_ost(info, osd_obj2dev(obj),
+ fid, OI_CHECK_FLD);
result = osd_ea_fid_set(info, obj->oo_inode, fid,
- fid_is_on_ost(info, osd_obj2dev(obj),
- fid, OI_CHECK_FLD) ?
- LMAC_FID_ON_OST : 0, 0);
+ on_ost ? LMAC_FID_ON_OST : 0,
+ 0);
}
if (obj->oo_dt.do_body_ops == &osd_body_ops_new)
obj->oo_dt.do_body_ops = &osd_body_ops;
if (result == 0)
result = __osd_oi_insert(env, obj, fid, th);
+ /* a small optimization - dt_insert() isn't usually applied
+ * to OST objects, so we don't need to cache OI mapping for
+ * OST objects */
+ if (result == 0 && on_ost == 0) {
+ struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
+ result = osd_idc_find_and_init(env, osd, obj);
+ LASSERT(result == 0);
+ }
+
LASSERT(ergo(result == 0,
dt_object_exists(dt) && !dt_object_remote(dt)));
LINVRNT(osd_invariant(obj));
struct osd_thandle *oh;
int rc = 0;
- if (!dt_object_exists(dt))
+ if (!dt_object_exists(dt) || obj->oo_destroyed)
return -ENOENT;
LINVRNT(osd_invariant(obj));
{
struct osd_thandle *oh;
+ if (!dt_object_exists(dt))
+ return -ENOENT;
+
LASSERT(!dt_object_remote(dt));
LASSERT(handle != NULL);
static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
struct lu_buf *buf, const char *name)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = osd_oti_get(env);
- struct dentry *dentry = &info->oti_obj_dentry;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct dentry *dentry = &info->oti_obj_dentry;
+ bool cache_xattr = false;
+ int rc;
- /* version get is not real XATTR but uses xattr API */
- if (strcmp(name, XATTR_NAME_VERSION) == 0) {
- /* for version we are just using xattr API but change inode
- * field instead */
+ /* version get is not real XATTR but uses xattr API */
+ if (strcmp(name, XATTR_NAME_VERSION) == 0) {
+ /* for version we are just using xattr API but change inode
+ * field instead */
if (buf->lb_len == 0)
return sizeof(dt_obj_version_t);
osd_object_version_get(env, dt, buf->lb_buf);
return sizeof(dt_obj_version_t);
- }
+ }
if (!dt_object_exists(dt))
return -ENOENT;
LASSERT(inode->i_op != NULL);
LASSERT(inode->i_op->getxattr != NULL);
- return __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
-}
+ if (strcmp(name, XATTR_NAME_LOV) == 0 ||
+ strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0)
+ cache_xattr = true;
+ if (cache_xattr) {
+ rc = osd_oxc_get(obj, name, buf);
+ if (rc != -ENOENT)
+ return rc;
+ }
+
+ rc = __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
+ if (cache_xattr) {
+ if (rc == -ENOENT || rc == -ENODATA)
+ osd_oxc_add(obj, name, NULL, 0);
+ else if (rc > 0 && buf->lb_buf != NULL)
+ osd_oxc_add(obj, name, buf->lb_buf, rc);
+ }
+
+ return rc;
+}
static int osd_declare_xattr_set(const struct lu_env *env,
struct dt_object *dt,
int rc;
ENTRY;
- LASSERT(handle != NULL);
+ LASSERT(handle != NULL);
- /* version set is not real XATTR */
- if (strcmp(name, XATTR_NAME_VERSION) == 0) {
- /* for version we are just using xattr API but change inode
- * field instead */
- LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
- osd_object_version_set(env, dt, buf->lb_buf);
- return sizeof(dt_obj_version_t);
- }
+ /* version set is not real XATTR */
+ if (strcmp(name, XATTR_NAME_VERSION) == 0) {
+ /* for version we are just using xattr API but change inode
+ * field instead */
+ LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
+ osd_object_version_set(env, dt, buf->lb_buf);
+ return sizeof(dt_obj_version_t);
+ }
CDEBUG(D_INODE, DFID" set xattr '%s' with size %zu\n",
PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
fs_flags);
osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
+ if (rc == 0 &&
+ (strcmp(name, XATTR_NAME_LOV) == 0 ||
+ strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0))
+ osd_oxc_add(obj, name, buf->lb_buf, buf->lb_len);
+
return rc;
}
dentry->d_sb = inode->i_sb;
rc = inode->i_op->removexattr(dentry, name);
osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
+
+ if (rc == 0 &&
+ (strcmp(name, XATTR_NAME_LOV) == 0 ||
+ strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0))
+ osd_oxc_del(obj, name);
+
return rc;
}
RETURN(rc);
}
+static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
+{
+ return 0;
+}
+
/*
* Index operations.
*/
result = 0;
} else if (feat == &dt_directory_features) {
dt->do_index_ops = &osd_index_ea_ops;
- if (obj->oo_inode != NULL && S_ISDIR(obj->oo_inode->i_mode))
+ if (obj->oo_inode == NULL || S_ISDIR(obj->oo_inode->i_mode))
result = 0;
else
result = -ENOTDIR;
.do_xattr_del = osd_xattr_del,
.do_xattr_list = osd_xattr_list,
.do_object_sync = osd_object_sync,
+ .do_invalidate = osd_invalidate,
};
/**
.do_xattr_del = osd_xattr_del,
.do_xattr_list = osd_xattr_list,
.do_object_sync = osd_object_sync,
+ .do_invalidate = osd_invalidate,
};
static const struct dt_object_operations osd_obj_otable_it_ops = {
osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
inode = osd_dt_obj(dt)->oo_inode;
- LASSERT(inode);
+ if (inode == NULL)
+ RETURN(-ENOENT);
rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
0, oh, osd_dt_obj(dt), true, NULL, false);
}
bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
- if (bh) {
+ if (!IS_ERR(bh)) {
/* If this is not the ".." entry, it might be a remote DNE
* entry and we need to check if the FID is for a remote
* MDT. If the FID is not in the directory entry (e.g.
le32_to_cpu(de->inode));
}
}
- rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
- brelse(bh);
- } else {
- rc = -ENOENT;
- }
+ rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
+ brelse(bh);
+ } else {
+ rc = PTR_ERR(bh);
+ }
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
child->d_fsdata = (void *)ldp;
ll_vfs_dq_init(pobj->oo_inode);
- rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
+ rc = osd_ldiskfs_add_entry(info, oth->ot_handle, child,
+ cinode, hlock);
if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_TYPE)) {
struct ldiskfs_dir_entry_2 *de;
struct buffer_head *bh;
bh = osd_ldiskfs_find_entry(pobj->oo_inode, &child->d_name, &de,
NULL, hlock);
- if (bh != NULL) {
+ if (!IS_ERR(bh)) {
rc1 = ldiskfs_journal_get_write_access(oth->ot_handle,
bh);
if (rc1 == 0) {
LDISKFS_FT_DIR;
ldiskfs_handle_dirty_metadata(oth->ot_handle,
NULL, bh);
- brelse(bh);
}
+ brelse(bh);
}
}
if (dir->oo_compat_dot_created) {
result = -EEXIST;
} else {
- LASSERT(inode == parent_dir);
+ LASSERT(inode->i_ino == parent_dir->i_ino);
dir->oo_compat_dot_created = 1;
result = 0;
}
}
bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
- if (bh) {
+ if (!IS_ERR(bh)) {
struct osd_thread_info *oti = osd_oti_get(env);
struct osd_inode_id *id = &oti->oti_id;
struct osd_idmap_cache *oic = &oti->oti_cache;
if (rc != 0)
fid_zero(&oic->oic_fid);
} else {
- rc = -ENOENT;
+ rc = PTR_ERR(bh);
}
GOTO(out, rc);
}
/**
- * Find the osd object for given fid.
- *
- * \param fid need to find the osd object having this fid
- *
- * \retval osd_object on success
- * \retval -ve on error
- */
-static struct osd_object *osd_object_find(const struct lu_env *env,
- struct dt_object *dt,
- const struct lu_fid *fid)
-{
- struct lu_device *ludev = dt->do_lu.lo_dev;
- struct osd_object *child = NULL;
- struct lu_object *luch;
- struct lu_object *lo;
-
- /*
- * at this point topdev might not exist yet
- * (i.e. MGS is preparing profiles). so we can
- * not rely on topdev and instead lookup with
- * our device passed as topdev. this can't work
- * if the object isn't cached yet (as osd doesn't
- * allocate lu_header). IOW, the object must be
- * in the cache, otherwise lu_object_alloc() crashes
- * -bzzz
- */
- luch = lu_object_find_at(env, ludev->ld_site->ls_top_dev == NULL ?
- ludev : ludev->ld_site->ls_top_dev,
- fid, NULL);
- if (!IS_ERR(luch)) {
- if (lu_object_exists(luch)) {
- lo = lu_object_locate(luch->lo_header, ludev->ld_type);
- if (lo != NULL)
- child = osd_obj(lo);
- else
- LU_OBJECT_DEBUG(D_ERROR, env, luch,
- "lu_object can't be located"
- DFID"\n", PFID(fid));
-
- if (child == NULL) {
- lu_object_put(env, luch);
- CERROR("Unable to get osd_object\n");
- child = ERR_PTR(-ENOENT);
- }
- } else {
- LU_OBJECT_DEBUG(D_ERROR, env, luch,
- "lu_object does not exists "DFID"\n",
- PFID(fid));
- lu_object_put(env, luch);
- child = ERR_PTR(-ENOENT);
- }
- } else {
- child = ERR_CAST(luch);
- }
-
- return child;
-}
-
-/**
* Put the osd object once done with it.
*
* \param obj osd object that needs to be put
{
struct osd_thandle *oh;
struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
- struct lu_fid *fid = (struct lu_fid *)rec;
+ struct dt_insert_rec *rec1 = (struct dt_insert_rec *)rec;
+ const struct lu_fid *fid = rec1->rec_fid;
int credits, rc = 0;
+ struct osd_idmap_cache *idc;
ENTRY;
LASSERT(!dt_object_remote(dt));
LASSERT(handle != NULL);
+ LASSERT(fid != NULL);
+ LASSERT(rec1->rec_type != 0);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
credits = osd_dto_credits_noquota[DTO_INDEX_INSERT];
- if (fid != NULL) {
- rc = osd_remote_fid(env, osd, fid);
- if (unlikely(rc < 0))
- RETURN(rc);
- if (rc > 0) {
- /* a reference to remote inode is represented by an
- * agent inode which we have to create */
- credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
- credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
- }
- rc = 0;
+
+ /* we can't call iget() while a transactions is running
+ * (this can lead to a deadlock), but we need to know
+ * inum and object type. so we find this information at
+ * declaration and cache in per-thread info */
+ idc = osd_idc_find_or_init(env, osd, fid);
+ if (IS_ERR(idc))
+ RETURN(PTR_ERR(idc));
+ if (idc->oic_remote) {
+ /* a reference to remote inode is represented by an
+ * agent inode which we have to create */
+ credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
+ credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
}
osd_trans_declare_op(env, oh, OSD_OT_INSERT, credits);
const struct lu_fid *fid = rec1->rec_fid;
const char *name = (const char *)key;
struct osd_thread_info *oti = osd_oti_get(env);
- struct osd_inode_id *id = &oti->oti_id;
struct inode *child_inode = NULL;
- struct osd_object *child = NULL;
+ struct osd_idmap_cache *idc;
int rc;
ENTRY;
LASSERTF(fid_is_sane(fid), "fid"DFID" is insane!\n", PFID(fid));
- rc = osd_remote_fid(env, osd, fid);
- if (rc < 0) {
- CERROR("%s: Can not find object "DFID" rc %d\n",
- osd_name(osd), PFID(fid), rc);
- RETURN(rc);
+ idc = osd_idc_find(env, osd, fid);
+ if (unlikely(idc == NULL)) {
+ /* this dt_insert() wasn't declared properly, so
+ * FID is missing in OI cache. we better do not
+ * lookup FID in FLDB/OI and don't risk to deadlock,
+ * but in some special cases (lfsck testing, etc)
+ * it's much simpler than fixing a caller */
+ CERROR("%s: "DFID" wasn't declared for insert\n",
+ osd_name(osd), PFID(fid));
+ dump_stack();
+ idc = osd_idc_find_or_init(env, osd, fid);
+ if (IS_ERR(idc))
+ RETURN(PTR_ERR(idc));
}
- if (rc == 1) {
+ if (idc->oic_remote) {
/* Insert remote entry */
if (strcmp(name, dotdot) == 0 && strlen(name) == 2) {
struct osd_mdobj_map *omm = osd->od_mdt_map;
}
} else {
/* Insert local entry */
- child = osd_object_find(env, dt, fid);
- if (IS_ERR(child)) {
- CERROR("%s: Can not find object "DFID"%u:%u: rc = %d\n",
- osd_name(osd), PFID(fid),
- id->oii_ino, id->oii_gen,
- (int)PTR_ERR(child));
- RETURN(PTR_ERR(child));
+ if (unlikely(idc->oic_lid.oii_ino == 0)) {
+ /* for a reason OI cache wasn't filled properly */
+ CERROR("%s: OIC for "DFID" isn't filled\n",
+ osd_name(osd), PFID(fid));
+ RETURN(-EINVAL);
+ }
+ child_inode = oti->oti_inode;
+ if (unlikely(child_inode == NULL)) {
+ struct ldiskfs_inode_info *lii;
+ OBD_ALLOC_PTR(lii);
+ if (lii == NULL)
+ RETURN(-ENOMEM);
+ child_inode = oti->oti_inode = &lii->vfs_inode;
}
- child_inode = igrab(child->oo_inode);
+ child_inode->i_sb = osd_sb(osd);
+ child_inode->i_ino = idc->oic_lid.oii_ino;
+ child_inode->i_mode = rec1->rec_type & S_IFMT;
}
rc = osd_ea_add_rec(env, obj, child_inode, name, fid, th);
CDEBUG(D_INODE, "parent %lu insert %s:%lu rc = %d\n",
obj->oo_inode->i_ino, name, child_inode->i_ino, rc);
- iput(child_inode);
- if (child != NULL)
- osd_object_put(env, child);
+ if (child_inode && child_inode != oti->oti_inode)
+ iput(child_inode);
LASSERT(osd_invariant(obj));
osd_trans_exec_check(env, th, OSD_OT_INSERT);
RETURN(rc);
* \retval 0 on success
* \retval 1 on buffer full
*/
+#ifdef HAVE_FILLDIR_USE_CTX
+static int osd_ldiskfs_filldir(struct dir_context *buf,
+ const char *name, int namelen,
+#else
static int osd_ldiskfs_filldir(void *buf, const char *name, int namelen,
+#endif
loff_t offset, __u64 ino,
unsigned d_type)
{
- struct osd_it_ea *it = ((struct osd_filldir_cbs *)buf)->it;
+ struct osd_it_ea *it =
+ ((struct osd_filldir_cbs *)buf)->it;
struct osd_object *obj = it->oie_obj;
struct osd_it_ea_dirent *ent = it->oie_dirent;
struct lu_fid *fid = &ent->oied_fid;
struct ldiskfs_dentry_param *ldp;
int namelen = dentry->d_name.len;
int rc;
+ struct osd_thread_info *info = osd_oti_get(env);
ENTRY;
if (!LDISKFS_HAS_INCOMPAT_FEATURE(inode->i_sb,
osd_get_ldiskfs_dirent_param(ldp, fid);
dentry->d_fsdata = (void *)ldp;
ll_vfs_dq_init(dir);
- rc = osd_ldiskfs_add_entry(jh, dentry, inode, hlock);
+ rc = osd_ldiskfs_add_entry(info, jh, dentry, inode, hlock);
/* It is too bad, we cannot reinsert the name entry back.
* That means we lose it! */
if (rc != 0)
* For the whole directory, only dot/dotdot entry have no FID-in-dirent
* and needs to get FID from LMA when readdir, it will not affect the
* performance much. */
- if ((bh == NULL) || (le32_to_cpu(de->inode) != inode->i_ino) ||
+ if (IS_ERR(bh) || (le32_to_cpu(de->inode) != inode->i_ino) ||
(dot_dotdot != 0 && !osd_dot_dotdot_has_space(de, dot_dotdot))) {
*attr |= LUDA_IGNORE;
if (jh == NULL) {
brelse(bh);
- if (hlock != NULL)
+ dev->od_dirent_journal = 1;
+ if (hlock != NULL) {
ldiskfs_htree_unlock(hlock);
- else
+ hlock = NULL;
+ } else {
up_read(&obj->oo_ext_idx_sem);
- dev->od_dirent_journal = 1;
+ }
goto again;
}
if (jh == NULL) {
brelse(bh);
- if (hlock != NULL)
+ dev->od_dirent_journal = 1;
+ if (hlock != NULL) {
ldiskfs_htree_unlock(hlock);
- else
+ hlock = NULL;
+ } else {
up_read(&obj->oo_ext_idx_sem);
- dev->od_dirent_journal = 1;
+ }
goto again;
}
if (jh == NULL) {
brelse(bh);
- if (hlock != NULL)
+ dev->od_dirent_journal = 1;
+ if (hlock != NULL) {
ldiskfs_htree_unlock(hlock);
- else
+ hlock = NULL;
+ } else {
up_read(&obj->oo_ext_idx_sem);
- dev->od_dirent_journal = 1;
+ }
goto again;
}
GOTO(out, rc);
out:
- brelse(bh);
+ if (!IS_ERR(bh))
+ brelse(bh);
if (hlock != NULL) {
ldiskfs_htree_unlock(hlock);
} else {
struct lu_context_key *key, void* data)
{
struct osd_thread_info *info = data;
+ struct ldiskfs_inode_info *lli = LDISKFS_I(info->oti_inode);
+ struct osd_idmap_cache *idc = info->oti_ins_cache;
if (info->oti_inode != NULL)
- OBD_FREE_PTR(info->oti_inode);
+ OBD_FREE_PTR(lli);
if (info->oti_hlock != NULL)
ldiskfs_htree_lock_free(info->oti_hlock);
OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
lu_buf_free(&info->oti_iobuf.dr_pg_buf);
lu_buf_free(&info->oti_iobuf.dr_bl_buf);
lu_buf_free(&info->oti_big_buf);
+ if (idc != NULL) {
+ LASSERT(info->oti_ins_cache_size > 0);
+ OBD_FREE(idc, sizeof(*idc) * info->oti_ins_cache_size);
+ info->oti_ins_cache = NULL;
+ info->oti_ins_cache_size = 0;
+ }
OBD_FREE_PTR(info);
}
struct osd_thread_info *info = osd_oti_get(env);
struct lu_fid *fid = &info->oti_fid;
struct inode *inode;
- int rc = 0, force_over_128tb = 0;
+ int rc = 0, force_over_256tb = 0;
ENTRY;
if (o->od_mnt != NULL)
RETURN(-EINVAL);
}
#endif
- if (opts != NULL && strstr(opts, "force_over_128tb") != NULL)
- force_over_128tb = 1;
+ if (opts != NULL && strstr(opts, "force_over_128tb") != NULL) {
+ CWARN("force_over_128tb option is depricated."
+ "Filesystems less then 256TB can be created without any"
+ "force options. Use force_over_256tb option for"
+ "filesystems greather then 256TB.\n");
+ }
+
+ if (opts != NULL && strstr(opts, "force_over_256tb") != NULL)
+ force_over_256tb = 1;
- __page = alloc_page(GFP_IOFS);
+ __page = alloc_page(GFP_KERNEL);
if (__page == NULL)
GOTO(out, rc = -ENOMEM);
page = (unsigned long)page_address(__page);
"noextents",
/* strip out option we processed in osd */
"bigendian_extents",
- "force_over_128tb",
+#if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(3,0,53,0)
+#warning "remove force_over_128 option"
+#else
+ "force_over_128tb (deprecated)",
+#endif
+ "force_over_256tb",
NULL
};
strcat(options, opts);
/* Glom up mount options */
if (*options != '\0')
strcat(options, ",");
- strlcat(options, "no_mbcache", PAGE_CACHE_SIZE);
+ strlcat(options, "no_mbcache", PAGE_SIZE);
type = get_fs_type("ldiskfs");
if (!type) {
GOTO(out, rc);
}
- if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) > (8ULL << 32) &&
- force_over_128tb == 0) {
+ if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) > (64ULL << 30) &&
+ force_over_256tb == 0) {
CERROR("%s: device %s LDISKFS does not support filesystems "
- "greater than 128TB and can cause data corruption. "
- "Use \"force_over_128tb\" mount option to override.\n",
+ "greater than 256TB and can cause data corruption. "
+ "Use \"force_over_256tb\" mount option to override.\n",
name, dev);
GOTO(out, rc = -EINVAL);
}
.o_health_check = osd_health_check,
};
-static int __init osd_mod_init(void)
+static int __init osd_init(void)
{
int rc;
return rc;
}
-static void __exit osd_mod_exit(void)
+static void __exit osd_exit(void)
{
class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
lu_kmem_fini(ldiskfs_caches);
MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-module_init(osd_mod_init);
-module_exit(osd_mod_exit);
+module_init(osd_init);
+module_exit(osd_exit);