#include <ldiskfs/ldiskfs.h>
#include <ldiskfs/xattr.h>
+#include <ldiskfs/ldiskfs_extents.h>
#undef ENTRY
/*
* struct OBD_{ALLOC,FREE}*()
#include <lustre_linkea.h>
int ldiskfs_pdo = 1;
-CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
- "ldiskfs with parallel directory operations");
+module_param(ldiskfs_pdo, int, 0644);
+MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
int ldiskfs_track_declares_assert;
-CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
- "LBUG during tracking of declares");
+module_param(ldiskfs_track_declares_assert, int, 0644);
+MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
/* Slab to allocate dynlocks */
struct kmem_cache *dynlock_cachep;
init_rwsem(&mo->oo_sem);
init_rwsem(&mo->oo_ext_idx_sem);
spin_lock_init(&mo->oo_guard);
+ INIT_LIST_HEAD(&mo->oo_xattr_list);
return l;
} else {
return NULL;
return inode;
}
+int osd_ldiskfs_add_entry(struct osd_thread_info *info,
+ handle_t *handle, struct dentry *child,
+ struct inode *inode, struct htree_lock *hlock)
+{
+ int rc, rc2;
+
+ rc = __ldiskfs_add_entry(handle, child, inode, hlock);
+ if (rc == -ENOBUFS || rc == -ENOSPC) {
+ char fidbuf[FID_LEN + 1];
+ struct lustre_mdt_attrs lma;
+ struct lu_fid fid = { };
+ char *errstr;
+ struct dentry *p_dentry = child->d_parent;
+
+ rc2 = osd_get_lma(info, p_dentry->d_inode, p_dentry,
+ &lma);
+ if (rc2 == 0) {
+ fid = lma.lma_self_fid;
+ snprintf(fidbuf, sizeof(fidbuf), DFID, PFID(&fid));
+ } else if (rc2 == -ENODATA) {
+ if (unlikely(p_dentry->d_inode ==
+ inode->i_sb->s_root->d_inode))
+ lu_local_obj_fid(&fid, OSD_FS_ROOT_OID);
+ else if (info->oti_dev && !info->oti_dev->od_is_ost &&
+ fid_seq_is_mdt0(fid_seq(&fid)))
+ lu_igif_build(&fid, p_dentry->d_inode->i_ino,
+ p_dentry->d_inode->i_generation);
+ snprintf(fidbuf, sizeof(fidbuf), DFID, PFID(&fid));
+ } else {
+ snprintf(fidbuf, FID_LEN, "%s", "unknown");
+ }
+
+ if (rc == -ENOSPC)
+ errstr = "has reached";
+ else
+ errstr = "is approaching";
+ CWARN("%.16s: directory (inode: %lu FID: %s) %s maximum entry limit\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ p_dentry->d_inode->i_ino, fidbuf, errstr);
+ /* ignore such error now */
+ if (rc == -ENOBUFS)
+ rc = 0;
+ }
+ return rc;
+}
+
+
static struct inode *
osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
struct osd_inode_id *id, struct lu_fid *fid)
RETURN(rc);
}
+struct osd_check_lmv_buf {
+#ifdef HAVE_DIR_CONTEXT
+ /* please keep it as first member */
+ struct dir_context ctx;
+#endif
+ struct osd_thread_info *oclb_info;
+ struct osd_device *oclb_dev;
+ struct osd_idmap_cache *oclb_oic;
+};
+
+/**
+ * It is called internally by ->readdir() to filter out the
+ * local slave object's FID of the striped directory.
+ *
+ * \retval 1 found the local slave's FID
+ * \retval 0 continue to check next item
+ * \retval -ve for failure
+ */
+#ifdef HAVE_FILLDIR_USE_CTX
+static int osd_stripe_dir_filldir(struct dir_context *buf,
+#else
+static int osd_stripe_dir_filldir(void *buf,
+#endif
+ const char *name, int namelen,
+ loff_t offset, __u64 ino, unsigned d_type)
+{
+ struct osd_check_lmv_buf *oclb = (struct osd_check_lmv_buf *)buf;
+ struct osd_thread_info *oti = oclb->oclb_info;
+ struct lu_fid *fid = &oti->oti_fid3;
+ struct osd_inode_id *id = &oti->oti_id3;
+ struct osd_device *dev = oclb->oclb_dev;
+ struct osd_idmap_cache *oic = oclb->oclb_oic;
+ struct inode *inode;
+ int rc;
+
+ if (name[0] == '.')
+ return 0;
+
+ fid_zero(fid);
+ sscanf(name + 1, SFID, RFID(fid));
+ if (!fid_is_sane(fid))
+ return 0;
+
+ if (osd_remote_fid(oti->oti_env, dev, fid))
+ return 0;
+
+ osd_id_gen(id, ino, OSD_OII_NOGEN);
+ inode = osd_iget(oti, dev, id);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ iput(inode);
+ osd_add_oi_cache(oti, dev, id, fid);
+ oic->oic_fid = *fid;
+ oic->oic_lid = *id;
+ oic->oic_dev = dev;
+ rc = osd_oii_insert(dev, oic, true);
+
+ return rc == 0 ? 1 : rc;
+}
+
+/* When lookup item under striped directory, we need to locate the master
+ * MDT-object of the striped directory firstly, then the client will send
+ * lookup (getattr_by_name) RPC to the MDT with some slave MDT-object's FID
+ * and the item's name. If the system is restored from MDT file level backup,
+ * then before the OI scrub completely built the OI files, the OI mappings of
+ * the master MDT-object and slave MDT-object may be invalid. Usually, it is
+ * not a problem for the master MDT-object. Because when locate the master
+ * MDT-object, we will do name based lookup (for the striped directory itself)
+ * firstly, during such process we can setup the correct OI mapping for the
+ * master MDT-object. But it will be trouble for the slave MDT-object. Because
+ * the client will not trigger name based lookup on the MDT to locate the slave
+ * MDT-object before locating item under the striped directory, then when
+ * osd_fid_lookup(), it will find that the OI mapping for the slave MDT-object
+ * is invalid and does not know what the right OI mapping is, then the MDT has
+ * to return -EINPROGRESS to the client to notify that the OI scrub is rebuiding
+ * the OI file, related OI mapping is unknown yet, please try again later. And
+ * then client will re-try the RPC again and again until related OI mapping has
+ * been updated. That is quite inefficient.
+ *
+ * To resolve above trouble, we will handle it as the following two cases:
+ *
+ * 1) The slave MDT-object and the master MDT-object are on different MDTs.
+ * It is relative easy. Be as one of remote MDT-objects, the slave MDT-object
+ * is linked under /REMOTE_PARENT_DIR with the name of its FID string.
+ * We can locate the slave MDT-object via lookup the /REMOTE_PARENT_DIR
+ * directly. Please check osd_fid_lookup().
+ *
+ * 2) The slave MDT-object and the master MDT-object reside on the same MDT.
+ * Under such case, during lookup the master MDT-object, we will lookup the
+ * slave MDT-object via readdir against the master MDT-object, because the
+ * slave MDT-objects information are stored as sub-directories with the name
+ * "${FID}:${index}". Then when find the local slave MDT-object, its OI
+ * mapping will be recorded. Then subsequent osd_fid_lookup() will know
+ * the correct OI mapping for the slave MDT-object. */
+static int osd_check_lmv(struct osd_thread_info *oti, struct osd_device *dev,
+ struct inode *inode, struct osd_idmap_cache *oic)
+{
+ struct lu_buf *buf = &oti->oti_big_buf;
+ struct dentry *dentry = &oti->oti_obj_dentry;
+ struct file *filp = &oti->oti_file;
+ const struct file_operations *fops;
+ struct lmv_mds_md_v1 *lmv1;
+ struct osd_check_lmv_buf oclb = {
+#ifdef HAVE_DIR_CONTEXT
+ .ctx.actor = osd_stripe_dir_filldir,
+#endif
+ .oclb_info = oti,
+ .oclb_dev = dev,
+ .oclb_oic = oic
+ };
+ int rc = 0;
+ ENTRY;
+
+again:
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, buf->lb_buf,
+ buf->lb_len);
+ if (rc == -ERANGE) {
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, NULL, 0);
+ if (rc > 0) {
+ lu_buf_realloc(buf, rc);
+ if (buf->lb_buf == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ goto again;
+ }
+ }
+
+ if (unlikely(rc == 0 || rc == -ENODATA))
+ GOTO(out, rc = 0);
+
+ if (rc < 0)
+ GOTO(out, rc);
+
+ if (unlikely(buf->lb_buf == NULL)) {
+ lu_buf_realloc(buf, rc);
+ if (buf->lb_buf == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ goto again;
+ }
+
+ lmv1 = buf->lb_buf;
+ if (le32_to_cpu(lmv1->lmv_magic) != LMV_MAGIC_V1)
+ GOTO(out, rc = 0);
+
+ fops = inode->i_fop;
+ dentry->d_inode = inode;
+ dentry->d_sb = inode->i_sb;
+ filp->f_pos = 0;
+ filp->f_path.dentry = dentry;
+ filp->f_mode = FMODE_64BITHASH;
+ filp->f_mapping = inode->i_mapping;
+ filp->f_op = fops;
+ filp->private_data = NULL;
+ set_file_inode(filp, inode);
+
+#ifdef HAVE_DIR_CONTEXT
+ oclb.ctx.pos = filp->f_pos;
+ rc = fops->iterate(filp, &oclb.ctx);
+ filp->f_pos = oclb.ctx.pos;
+#else
+ rc = fops->readdir(filp, &oclb, osd_stripe_dir_filldir);
+#endif
+ fops->release(inode, filp);
+
+out:
+ if (rc < 0)
+ CDEBUG(D_LFSCK, "%.16s: fail to check LMV EA, inode = %lu/%u,"
+ DFID": rc = %d\n",
+ LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
+ inode->i_ino, inode->i_generation,
+ PFID(&oic->oic_fid), rc);
+ else
+ rc = 0;
+
+ RETURN(rc);
+}
+
static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
const struct lu_fid *fid,
const struct lu_object_conf *conf)
struct osd_device *dev;
struct osd_idmap_cache *oic;
struct osd_inode_id *id;
- struct inode *inode;
+ struct inode *inode = NULL;
struct osd_scrub *scrub;
struct scrub_file *sf;
- int result;
- int saved = 0;
- bool cached = true;
- bool triggered = false;
+ __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT |
+ SS_AUTO_FULL;
+ int result = 0;
+ int rc1 = 0;
+ bool cached = true;
+ bool remote = false;
ENTRY;
LINVRNT(osd_invariant(obj));
if (result == -EREMCHG) {
trigger:
- if (unlikely(triggered))
- GOTO(out, result = saved);
-
- triggered = true;
- if (thread_is_running(&scrub->os_thread)) {
- result = -EINPROGRESS;
- } else if (!dev->od_noscrub) {
- result = osd_scrub_start(dev, SS_AUTO_FULL |
- SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT);
- LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
- "for "DFID", rc = %d [1]\n",
- osd_name(dev), PFID(fid), result);
- if (result == 0 || result == -EALREADY)
- result = -EINPROGRESS;
- else
- result = -EREMCHG;
- } else {
- result = -EREMCHG;
- }
-
- if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD))
- GOTO(out, result);
-
/* We still have chance to get the valid inode: for the
* object which is referenced by remote name entry, the
* object on the local MDT will be linked under the dir
* only happened for the RPC from other MDT during the
* OI scrub, or for the client side RPC with FID only,
* such as FID to path, or from old connected client. */
- saved = result;
- result = osd_lookup_in_remote_parent(info, dev,
- fid, id);
- if (result == 0) {
- cached = true;
- goto iget;
+ if (!remote &&
+ !fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
+ rc1 = osd_lookup_in_remote_parent(info, dev,
+ fid, id);
+ if (rc1 == 0) {
+ remote = true;
+ cached = true;
+ flags |= SS_AUTO_PARTIAL;
+ flags &= ~SS_AUTO_FULL;
+ goto iget;
+ }
}
- result = saved;
+ if (thread_is_running(&scrub->os_thread)) {
+ if (scrub->os_partial_scan &&
+ !scrub->os_in_join) {
+ goto join;
+ } else {
+ if (inode != NULL && !IS_ERR(inode)) {
+ LASSERT(remote);
+
+ osd_add_oi_cache(info, dev, id,
+ fid);
+ osd_oii_insert(dev, oic, true);
+ } else {
+ result = -EINPROGRESS;
+ }
+ }
+ } else if (!dev->od_noscrub) {
+
+join:
+ rc1 = osd_scrub_start(dev, flags);
+ LCONSOLE_WARN("%.16s: trigger OI scrub by RPC "
+ "for the "DFID" with flags 0x%x,"
+ " rc = %d\n", osd_name(dev),
+ PFID(fid), flags, rc1);
+ if (rc1 == 0 || rc1 == -EALREADY) {
+ if (inode != NULL && !IS_ERR(inode)) {
+ LASSERT(remote);
+
+ osd_add_oi_cache(info, dev, id,
+ fid);
+ osd_oii_insert(dev, oic, true);
+ } else {
+ result = -EINPROGRESS;
+ }
+ } else {
+ result = -EREMCHG;
+ }
+ } else {
+ result = -EREMCHG;
+ }
}
- GOTO(out, result);
+ if (inode == NULL || IS_ERR(inode))
+ GOTO(out, result);
+ } else if (remote) {
+ goto trigger;
}
obj->oo_inode = inode;
}
iput(inode);
+ inode = NULL;
obj->oo_inode = NULL;
if (result != -EREMCHG)
GOTO(out, result);
obj->oo_compat_dot_created = 1;
obj->oo_compat_dotdot_created = 1;
- if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
+ if (S_ISDIR(inode->i_mode) &&
+ (flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
+ osd_check_lmv(info, dev, inode, oic);
+
+ if (!ldiskfs_pdo)
GOTO(out, result = 0);
LASSERT(obj->oo_hl_head == NULL);
result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
obj->oo_dt.do_body_ops = &osd_body_ops_new;
- if (result == 0 && obj->oo_inode != NULL)
+ if (result == 0 && obj->oo_inode != NULL) {
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct lustre_mdt_attrs *lma = &oti->oti_mdt_attrs;
+
osd_object_init0(obj);
+ result = osd_get_lma(oti, obj->oo_inode,
+ &oti->oti_obj_dentry, lma);
+ if (result == 0) {
+ /* Convert LMAI flags to lustre LMA flags
+ * and cache it to oo_lma_flags */
+ obj->oo_lma_flags =
+ lma_to_lustre_flags(lma->lma_incompat);
+ } else if (result == -ENODATA) {
+ result = 0;
+ }
+ }
LINVRNT(osd_invariant(obj));
return result;
}
+/* The first part of oxe_buf is xattr name, and is '\0' terminated.
+ * The left part is for value, binary mode. */
+struct osd_xattr_entry {
+ struct list_head oxe_list;
+ size_t oxe_len;
+ size_t oxe_namelen;
+ bool oxe_exist;
+ struct rcu_head oxe_rcu;
+ char oxe_buf[0];
+};
+
+static struct osd_xattr_entry *osd_oxc_lookup(struct osd_object *obj,
+ const char *name,
+ size_t namelen)
+{
+ struct osd_xattr_entry *oxe;
+
+ list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
+ if (namelen == oxe->oxe_namelen &&
+ strncmp(name, oxe->oxe_buf, namelen) == 0)
+ return oxe;
+ }
+
+ return NULL;
+}
+
+static int osd_oxc_get(struct osd_object *obj, const char *name,
+ struct lu_buf *buf)
+{
+ struct osd_xattr_entry *oxe;
+ size_t vallen;
+ ENTRY;
+
+ rcu_read_lock();
+ oxe = osd_oxc_lookup(obj, name, strlen(name));
+ if (oxe == NULL) {
+ rcu_read_unlock();
+ RETURN(-ENOENT);
+ }
+
+ if (!oxe->oxe_exist) {
+ rcu_read_unlock();
+ RETURN(-ENODATA);
+ }
+
+ vallen = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
+ LASSERT(vallen > 0);
+
+ if (buf->lb_buf == NULL) {
+ rcu_read_unlock();
+ RETURN(vallen);
+ }
+
+ if (buf->lb_len < vallen) {
+ rcu_read_unlock();
+ RETURN(-ERANGE);
+ }
+
+ memcpy(buf->lb_buf, oxe->oxe_buf + oxe->oxe_namelen + 1, vallen);
+ rcu_read_unlock();
+
+ RETURN(vallen);
+}
+
+static void osd_oxc_free(struct rcu_head *head)
+{
+ struct osd_xattr_entry *oxe;
+
+ oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
+ OBD_FREE(oxe, oxe->oxe_len);
+}
+
+static inline void __osd_oxc_del(struct osd_object *obj, const char *name)
+{
+ struct osd_xattr_entry *oxe;
+
+ oxe = osd_oxc_lookup(obj, name, strlen(name));
+ if (oxe != NULL) {
+ list_del(&oxe->oxe_list);
+ call_rcu(&oxe->oxe_rcu, osd_oxc_free);
+ }
+}
+
+static void osd_oxc_add(struct osd_object *obj, const char *name,
+ const char *buf, int buflen)
+{
+ struct osd_xattr_entry *oxe;
+ size_t namelen = strlen(name);
+ size_t len = sizeof(*oxe) + namelen + 1 + buflen;
+
+ OBD_ALLOC(oxe, len);
+ if (oxe == NULL)
+ return;
+
+ INIT_LIST_HEAD(&oxe->oxe_list);
+ oxe->oxe_len = len;
+ oxe->oxe_namelen = namelen;
+ memcpy(oxe->oxe_buf, name, namelen);
+ if (buflen > 0) {
+ LASSERT(buf != NULL);
+ memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
+ oxe->oxe_exist = true;
+ } else {
+ oxe->oxe_exist = false;
+ }
+
+ /* this should be rarely called, just remove old and add new */
+ spin_lock(&obj->oo_guard);
+ __osd_oxc_del(obj, name);
+ list_add_tail(&oxe->oxe_list, &obj->oo_xattr_list);
+ spin_unlock(&obj->oo_guard);
+}
+
+static void osd_oxc_del(struct osd_object *obj, const char *name)
+{
+ spin_lock(&obj->oo_guard);
+ __osd_oxc_del(obj, name);
+ spin_unlock(&obj->oo_guard);
+}
+
+static void osd_oxc_fini(struct osd_object *obj)
+{
+ struct osd_xattr_entry *oxe, *next;
+
+ list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
+ list_del(&oxe->oxe_list);
+ OBD_FREE(oxe, oxe->oxe_len);
+ }
+}
+
/*
* Concurrency: no concurrent access is possible that late in object
* life-cycle.
LINVRNT(osd_invariant(obj));
+ osd_oxc_fini(obj);
dt_object_fini(&obj->oo_dt);
if (obj->oo_hl_head != NULL)
ldiskfs_htree_lock_head_free(obj->oo_hl_head);
OBD_FREE_PTR(oh);
}
+#ifndef HAVE_SB_START_WRITE
+# define sb_start_write(sb) do {} while (0)
+# define sb_end_write(sb) do {} while (0)
+#endif
+
static struct thandle *osd_trans_create(const struct lu_env *env,
struct dt_device *d)
{
/* on pending IO in this thread should left from prev. request */
LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
- th = ERR_PTR(-ENOMEM);
+ sb_start_write(osd_sb(osd_dt_dev(d)));
+
OBD_ALLOC_GFP(oh, sizeof *oh, GFP_NOFS);
if (oh != NULL) {
oh->ot_quota_trans = &oti->oti_quota_trans;
sizeof(oti->oti_declare_ops_cred));
memset(oti->oti_declare_ops_used, 0,
sizeof(oti->oti_declare_ops_used));
+ } else {
+ sb_end_write(osd_sb(osd_dt_dev(d)));
+ th = ERR_PTR(-ENOMEM);
}
RETURN(th);
}
static unsigned long last_printed;
static int last_credits;
- CWARN("%.16s: too many transaction credits (%d > %d)\n",
- LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
- oh->ot_credits,
- osd_journal(dev)->j_max_transaction_buffers);
-
- osd_trans_dump_creds(env, th);
-
+ /* don't make noise on a tiny testing systems
+ * actual credits misuse will be caught anyway */
if (last_credits != oh->ot_credits &&
time_after(jiffies, last_printed +
- msecs_to_jiffies(60 * MSEC_PER_SEC))) {
+ msecs_to_jiffies(60 * MSEC_PER_SEC)) &&
+ osd_transaction_size(dev) > 512) {
+ osd_trans_dump_creds(env, th);
libcfs_debug_dumpstack(NULL);
last_credits = oh->ot_credits;
last_printed = jiffies;
if (unlikely(remove_agents != 0))
osd_process_scheduled_agent_removals(env, osd);
+ sb_end_write(osd_sb(osd));
+
RETURN(rc);
}
static void osd_object_release(const struct lu_env *env,
struct lu_object *l)
{
+ struct osd_object *o = osd_obj(l);
+ /* nobody should be releasing a non-destroyed object with nlink=0
+ * the API allows this, but ldiskfs doesn't like and then report
+ * this inode as deleted */
+ if (unlikely(!o->oo_destroyed && o->oo_inode && o->oo_inode->i_nlink == 0))
+ LBUG();
}
/*
d ? d->id_ops->id_name : "plain");
}
-#define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
-
/*
* Concurrency: shouldn't matter.
*/
int osd_statfs(const struct lu_env *env, struct dt_device *d,
struct obd_statfs *sfs)
{
- struct osd_device *osd = osd_dt_dev(d);
- struct super_block *sb = osd_sb(osd);
- struct kstatfs *ksfs;
- int result = 0;
+ struct osd_device *osd = osd_dt_dev(d);
+ struct super_block *sb = osd_sb(osd);
+ struct kstatfs *ksfs;
+ __u64 reserved;
+ int result = 0;
if (unlikely(osd->od_mnt == NULL))
return -EINPROGRESS;
ksfs = &osd_oti_get(env)->oti_ksfs;
}
- spin_lock(&osd->od_osfs_lock);
result = sb->s_op->statfs(sb->s_root, ksfs);
- if (likely(result == 0)) { /* N.B. statfs can't really fail */
- statfs_pack(sfs, ksfs);
- if (unlikely(sb->s_flags & MS_RDONLY))
- sfs->os_state = OS_STATE_READONLY;
- if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
- LDISKFS_FEATURE_INCOMPAT_EXTENTS))
- sfs->os_maxbytes = sb->s_maxbytes;
- else
- sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
- }
- spin_unlock(&osd->od_osfs_lock);
+ if (result)
+ goto out;
+
+ statfs_pack(sfs, ksfs);
+ if (unlikely(sb->s_flags & MS_RDONLY))
+ sfs->os_state = OS_STATE_READONLY;
+ if (LDISKFS_HAS_INCOMPAT_FEATURE(sb,
+ LDISKFS_FEATURE_INCOMPAT_EXTENTS))
+ sfs->os_maxbytes = sb->s_maxbytes;
+ else
+ sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
- if (unlikely(env == NULL))
- OBD_FREE_PTR(ksfs);
+ /*
+ * Reserve some space so to avoid fragmenting the filesystem too much.
+ * Fragmentation not only impacts performance, but can also increase
+ * metadata overhead significantly, causing grant calculation to be
+ * wrong.
+ *
+ * Reserve 0.78% of total space, at least 8MB for small filesystems.
+ */
+ CLASSERT(OSD_STATFS_RESERVED > LDISKFS_MAX_BLOCK_SIZE);
+ reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
+ if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
+ reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
- /* Reserve a small amount of space for local objects like last_rcvd,
- * llog, quota files, ... */
- if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
- sfs->os_bavail = 0;
- } else {
- sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
- /** Take out metadata overhead for indirect blocks */
- sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
- }
+ sfs->os_blocks -= reserved;
+ sfs->os_bfree -= min(reserved, sfs->os_bfree);
+ sfs->os_bavail -= min(reserved, sfs->os_bavail);
- return result;
+out:
+ if (unlikely(env == NULL))
+ OBD_FREE_PTR(ksfs);
+ return result;
}
/**
*/
param->ddp_max_name_len = LDISKFS_NAME_LEN;
param->ddp_max_nlink = LDISKFS_LINK_MAX;
- param->ddp_block_shift = sb->s_blocksize_bits;
+ param->ddp_symlink_max = sb->s_blocksize;
param->ddp_mount_type = LDD_MT_LDISKFS;
if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EXTENTS))
param->ddp_maxbytes = sb->s_maxbytes;
else
param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
- /* Overhead estimate should be fairly accurate, so we really take a tiny
- * error margin which also avoids fragmenting the filesystem too much */
- param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
/* inode are statically allocated, so per-inode space consumption
* is the space consumed by the directory entry */
param->ddp_inodespace = PER_OBJ_USAGE;
- /* per-fragment overhead to be used by the client code */
- param->ddp_grant_frag = 6 * LDISKFS_BLOCK_SIZE(sb);
- param->ddp_mntopts = 0;
+ /* EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
+ * = 128MB) which is unlikely to be hit in real life. Report a smaller
+ * maximum length to not under count the actual number of extents
+ * needed for writing a file. */
+ param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 2;
+ /* worst-case extent insertion metadata overhead */
+ param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
+ param->ddp_mntopts = 0;
if (test_opt(sb, XATTR_USER))
param->ddp_mntopts |= MNTOPT_USERXATTR;
if (test_opt(sb, POSIX_ACL))
#ifdef HAVE_DEV_SET_RDONLY
CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
+ if (sb->s_op->freeze_fs) {
+ rc = sb->s_op->freeze_fs(sb);
+ if (rc)
+ goto out;
+ }
+
if (jdev && (jdev != dev)) {
CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
(long)jdev);
}
CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
dev_set_rdonly(dev);
-#else
- CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
- osd_dt_dev(d)->od_svname, (long)dev, rc);
+
+ if (sb->s_op->unfreeze_fs)
+ sb->s_op->unfreeze_fs(sb);
+
+out:
#endif
+ if (rc)
+ CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
+ osd_dt_dev(d)->od_svname, (long)dev, rc);
+
RETURN(rc);
}
return t;
}
-
static void osd_inode_getattr(const struct lu_env *env,
struct inode *inode, struct lu_attr *attr)
{
attr->la_blocks = inode->i_blocks;
attr->la_uid = i_uid_read(inode);
attr->la_gid = i_gid_read(inode);
- attr->la_flags = LDISKFS_I(inode)->i_flags;
+ attr->la_flags = ll_inode_to_ext_flags(inode->i_flags);
attr->la_nlink = inode->i_nlink;
attr->la_rdev = inode->i_rdev;
attr->la_blksize = 1 << inode->i_blkbits;
{
struct osd_object *obj = osd_dt_obj(dt);
- if (!dt_object_exists(dt))
+ if (unlikely(!dt_object_exists(dt)))
+ return -ENOENT;
+ if (unlikely(obj->oo_destroyed))
return -ENOENT;
LASSERT(!dt_object_remote(dt));
spin_lock(&obj->oo_guard);
osd_inode_getattr(env, obj->oo_inode, attr);
+ if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
+ attr->la_flags |= LUSTRE_ORPHAN_FL;
spin_unlock(&obj->oo_guard);
+
return 0;
}
osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
+ osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
+
if (attr == NULL || obj->oo_inode == NULL)
RETURN(rc);
if (bits == 0)
return 0;
- if (bits & LA_ATIME)
- inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
- if (bits & LA_CTIME)
- inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
- if (bits & LA_MTIME)
- inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
- if (bits & LA_SIZE) {
- LDISKFS_I(inode)->i_disksize = attr->la_size;
- i_size_write(inode, attr->la_size);
- }
+ if (bits & LA_ATIME)
+ inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
+ if (bits & LA_CTIME)
+ inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
+ if (bits & LA_MTIME)
+ inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
+ if (bits & LA_SIZE) {
+ LDISKFS_I(inode)->i_disksize = attr->la_size;
+ i_size_write(inode, attr->la_size);
+ }
-#if 0
- /* OSD should not change "i_blocks" which is used by quota.
- * "i_blocks" should be changed by ldiskfs only. */
- if (bits & LA_BLOCKS)
- inode->i_blocks = attr->la_blocks;
-#endif
+ /* OSD should not change "i_blocks" which is used by quota.
+ * "i_blocks" should be changed by ldiskfs only. */
if (bits & LA_MODE)
inode->i_mode = (inode->i_mode & S_IFMT) |
(attr->la_mode & ~S_IFMT);
if (bits & LA_RDEV)
inode->i_rdev = attr->la_rdev;
- if (bits & LA_FLAGS) {
- /* always keep S_NOCMTIME */
- inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
- S_NOCMTIME;
- }
- return 0;
+ if (bits & LA_FLAGS) {
+ /* always keep S_NOCMTIME */
+ inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
+ S_NOCMTIME;
+ }
+ return 0;
}
static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
spin_lock(&obj->oo_guard);
rc = osd_inode_setattr(env, inode, attr);
spin_unlock(&obj->oo_guard);
+ if (rc != 0)
+ GOTO(out, rc);
- if (!rc)
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+
+ if (!(attr->la_valid & LA_FLAGS))
+ GOTO(out, rc);
+
+ /* Let's check if there are extra flags need to be set into LMA */
+ if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ lma->lma_incompat |=
+ lustre_to_lma_flags(attr->la_flags);
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
+ lma, sizeof(*lma), XATTR_REPLACE);
+ if (rc != 0) {
+ struct osd_device *osd = osd_obj2dev(obj);
+ CWARN("%s: set "DFID" lma flags %u failed: rc = %d\n",
+ osd_name(osd), PFID(lu_object_fid(&dt->do_lu)),
+ lma->lma_incompat, rc);
+ } else {
+ obj->oo_lma_flags =
+ attr->la_flags & LUSTRE_LMA_FL_MASKS;
+ }
+ osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
+ }
+out:
osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
return rc;
if (result)
return;
- if (attr->la_valid != 0) {
- result = osd_inode_setattr(info->oti_env, inode, attr);
- /*
- * The osd_inode_setattr() should always succeed here. The
- * only error that could be returned is EDQUOT when we are
- * trying to change the UID or GID of the inode. However, this
- * should not happen since quota enforcement is no longer
- * enabled on ldiskfs (lquota takes care of it).
- */
+ if (attr->la_valid != 0) {
+ result = osd_inode_setattr(info->oti_env, inode, attr);
+ /*
+ * The osd_inode_setattr() should always succeed here. The
+ * only error that could be returned is EDQUOT when we are
+ * trying to change the UID or GID of the inode. However, this
+ * should not happen since quota enforcement is no longer
+ * enabled on ldiskfs (lquota takes care of it).
+ */
LASSERTF(result == 0, "%d\n", result);
ll_dirty_inode(inode, I_DIRTY_DATASYNC);
- }
+ }
- attr->la_valid = valid;
+ attr->la_valid = valid;
}
/**
osd_trans_exec_op(env, th, OSD_OT_INSERT);
osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
- rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle, OI_CHECK_FLD);
+ rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle,
+ OI_CHECK_FLD, NULL);
osd_trans_exec_check(env, th, OSD_OT_INSERT);
return rc;
struct osd_thandle *oh;
int rc = 0;
- if (!dt_object_exists(dt))
+ if (!dt_object_exists(dt) || obj->oo_destroyed)
return -ENOENT;
LINVRNT(osd_invariant(obj));
static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
struct lu_buf *buf, const char *name)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = osd_oti_get(env);
- struct dentry *dentry = &info->oti_obj_dentry;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct dentry *dentry = &info->oti_obj_dentry;
+ bool cache_xattr = false;
+ int rc;
- /* version get is not real XATTR but uses xattr API */
- if (strcmp(name, XATTR_NAME_VERSION) == 0) {
- /* for version we are just using xattr API but change inode
- * field instead */
+ /* version get is not real XATTR but uses xattr API */
+ if (strcmp(name, XATTR_NAME_VERSION) == 0) {
+ /* for version we are just using xattr API but change inode
+ * field instead */
if (buf->lb_len == 0)
return sizeof(dt_obj_version_t);
osd_object_version_get(env, dt, buf->lb_buf);
return sizeof(dt_obj_version_t);
- }
+ }
if (!dt_object_exists(dt))
return -ENOENT;
LASSERT(inode->i_op != NULL);
LASSERT(inode->i_op->getxattr != NULL);
- return __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
-}
+ if (strcmp(name, XATTR_NAME_LOV) == 0 ||
+ strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0)
+ cache_xattr = true;
+
+ if (cache_xattr) {
+ rc = osd_oxc_get(obj, name, buf);
+ if (rc != -ENOENT)
+ return rc;
+ }
+ rc = __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
+ if (cache_xattr) {
+ if (rc == -ENOENT || rc == -ENODATA)
+ osd_oxc_add(obj, name, NULL, 0);
+ else if (rc > 0 && buf->lb_buf != NULL)
+ osd_oxc_add(obj, name, buf->lb_buf, rc);
+ }
+
+ return rc;
+}
static int osd_declare_xattr_set(const struct lu_env *env,
struct dt_object *dt,
int rc;
ENTRY;
- LASSERT(handle != NULL);
+ LASSERT(handle != NULL);
- /* version set is not real XATTR */
- if (strcmp(name, XATTR_NAME_VERSION) == 0) {
- /* for version we are just using xattr API but change inode
- * field instead */
- LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
- osd_object_version_set(env, dt, buf->lb_buf);
- return sizeof(dt_obj_version_t);
- }
+ /* version set is not real XATTR */
+ if (strcmp(name, XATTR_NAME_VERSION) == 0) {
+ /* for version we are just using xattr API but change inode
+ * field instead */
+ LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
+ osd_object_version_set(env, dt, buf->lb_buf);
+ return sizeof(dt_obj_version_t);
+ }
CDEBUG(D_INODE, DFID" set xattr '%s' with size %zu\n",
PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
fs_flags);
osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
+ if (rc == 0 &&
+ (strcmp(name, XATTR_NAME_LOV) == 0 ||
+ strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0))
+ osd_oxc_add(obj, name, buf->lb_buf, buf->lb_len);
+
return rc;
}
dentry->d_sb = inode->i_sb;
rc = inode->i_op->removexattr(dentry, name);
osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
+
+ if (rc == 0 &&
+ (strcmp(name, XATTR_NAME_LOV) == 0 ||
+ strcmp(name, XATTR_NAME_DEFAULT_LMV) == 0))
+ osd_oxc_del(obj, name);
+
return rc;
}
RETURN(rc);
}
+static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
+{
+ return 0;
+}
+
/*
* Index operations.
*/
.do_xattr_del = osd_xattr_del,
.do_xattr_list = osd_xattr_list,
.do_object_sync = osd_object_sync,
+ .do_invalidate = osd_invalidate,
};
/**
.do_xattr_del = osd_xattr_del,
.do_xattr_list = osd_xattr_list,
.do_object_sync = osd_object_sync,
+ .do_invalidate = osd_invalidate,
};
static const struct dt_object_operations osd_obj_otable_it_ops = {
}
bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
- if (bh) {
+ if (!IS_ERR(bh)) {
/* If this is not the ".." entry, it might be a remote DNE
* entry and we need to check if the FID is for a remote
* MDT. If the FID is not in the directory entry (e.g.
le32_to_cpu(de->inode));
}
}
- rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
- brelse(bh);
- } else {
- rc = -ENOENT;
- }
+ rc = ldiskfs_delete_entry(oh->ot_handle, dir, de, bh);
+ brelse(bh);
+ } else {
+ rc = PTR_ERR(bh);
+ }
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
child->d_fsdata = (void *)ldp;
ll_vfs_dq_init(pobj->oo_inode);
- rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
+ rc = osd_ldiskfs_add_entry(info, oth->ot_handle, child,
+ cinode, hlock);
if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_TYPE)) {
struct ldiskfs_dir_entry_2 *de;
struct buffer_head *bh;
bh = osd_ldiskfs_find_entry(pobj->oo_inode, &child->d_name, &de,
NULL, hlock);
- if (bh != NULL) {
+ if (!IS_ERR(bh)) {
rc1 = ldiskfs_journal_get_write_access(oth->ot_handle,
bh);
if (rc1 == 0) {
LDISKFS_FT_DIR;
ldiskfs_handle_dirty_metadata(oth->ot_handle,
NULL, bh);
- brelse(bh);
}
+ brelse(bh);
}
}
osd_consistency_check(struct osd_thread_info *oti, struct osd_device *dev,
struct osd_idmap_cache *oic)
{
- struct osd_scrub *scrub = &dev->od_scrub;
- struct lu_fid *fid = &oic->oic_fid;
- struct osd_inode_id *id = &oti->oti_id;
- int once = 0;
- int rc;
+ struct osd_scrub *scrub = &dev->od_scrub;
+ struct lu_fid *fid = &oic->oic_fid;
+ struct osd_inode_id *id = &oic->oic_lid;
+ struct inode *inode = NULL;
+ int once = 0;
+ bool insert;
+ int rc;
ENTRY;
if (!fid_is_norm(fid) && !fid_is_igif(fid))
RETURN(0);
again:
- rc = osd_oi_lookup(oti, dev, fid, id, 0);
+ rc = osd_oi_lookup(oti, dev, fid, &oti->oti_id, 0);
if (rc == -ENOENT) {
- struct inode *inode;
+ __u32 gen = id->oii_gen;
- *id = oic->oic_lid;
- inode = osd_iget(oti, dev, &oic->oic_lid);
+ insert = true;
+ if (inode != NULL)
+ goto trigger;
+ inode = osd_iget(oti, dev, id);
/* The inode has been removed (by race maybe). */
if (IS_ERR(inode)) {
rc = PTR_ERR(inode);
RETURN(rc == -ESTALE ? -ENOENT : rc);
}
- iput(inode);
/* The OI mapping is lost. */
- if (id->oii_gen != OSD_OII_NOGEN)
+ if (gen != OSD_OII_NOGEN)
goto trigger;
+ iput(inode);
/* The inode may has been reused by others, we do not know,
* leave it to be handled by subsequent osd_fid_lookup(). */
RETURN(0);
- } else if (rc != 0 || osd_id_eq(id, &oic->oic_lid)) {
+ } else if (rc != 0 || osd_id_eq(id, &oti->oti_id)) {
RETURN(rc);
+ } else {
+ insert = false;
}
trigger:
if (thread_is_running(&scrub->os_thread)) {
- rc = osd_oii_insert(dev, oic, rc == -ENOENT);
+ if (inode == NULL) {
+ inode = osd_iget(oti, dev, id);
+ /* The inode has been removed (by race maybe). */
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+
+ RETURN(rc == -ESTALE ? -ENOENT : rc);
+ }
+ }
+
+ rc = osd_oii_insert(dev, oic, insert);
/* There is race condition between osd_oi_lookup and OI scrub.
* The OI scrub finished just after osd_oi_lookup() failure.
* Under such case, it is unnecessary to trigger OI scrub again,
if (unlikely(rc == -EAGAIN))
goto again;
- RETURN(0);
+ if (!S_ISDIR(inode->i_mode))
+ rc = 0;
+ else
+ rc = osd_check_lmv(oti, dev, inode, oic);
+
+ iput(inode);
+ RETURN(rc);
}
if (!dev->od_noscrub && ++once == 1) {
rc = osd_scrub_start(dev, SS_AUTO_PARTIAL | SS_CLEAR_DRYRUN |
SS_CLEAR_FAILOUT);
- CDEBUG(D_LFSCK | D_CONSOLE, "%.16s: trigger OI scrub by RPC "
- "for "DFID", rc = %d [2]\n",
+ CDEBUG(D_LFSCK | D_CONSOLE | D_WARNING,
+ "%.16s: trigger partial OI scrub for RPC inconsistency "
+ "checking FID "DFID": rc = %d\n",
LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
PFID(fid), rc);
if (rc == 0 || rc == -EALREADY)
goto again;
}
- RETURN(0);
+ if (inode != NULL)
+ iput(inode);
+
+ RETURN(rc);
}
static int osd_fail_fid_lookup(struct osd_thread_info *oti,
}
bh = osd_ldiskfs_find_entry(dir, &dentry->d_name, &de, NULL, hlock);
- if (bh) {
+ if (!IS_ERR(bh)) {
struct osd_thread_info *oti = osd_oti_get(env);
struct osd_inode_id *id = &oti->oti_id;
struct osd_idmap_cache *oic = &oti->oti_cache;
if (rc != 0)
fid_zero(&oic->oic_fid);
} else {
- rc = -ENOENT;
+ rc = PTR_ERR(bh);
}
GOTO(out, rc);
* \retval 0 on success
* \retval 1 on buffer full
*/
-static int osd_ldiskfs_filldir(void *buf, const char *name, int namelen,
- loff_t offset, __u64 ino,
- unsigned d_type)
+#ifdef HAVE_FILLDIR_USE_CTX
+static int osd_ldiskfs_filldir(struct dir_context *buf,
+#else
+static int osd_ldiskfs_filldir(void *buf,
+#endif
+ const char *name, int namelen,
+ loff_t offset, __u64 ino, unsigned d_type)
{
- struct osd_it_ea *it = ((struct osd_filldir_cbs *)buf)->it;
+ struct osd_it_ea *it =
+ ((struct osd_filldir_cbs *)buf)->it;
struct osd_object *obj = it->oie_obj;
struct osd_it_ea_dirent *ent = it->oie_dirent;
struct lu_fid *fid = &ent->oied_fid;
struct ldiskfs_dentry_param *ldp;
int namelen = dentry->d_name.len;
int rc;
+ struct osd_thread_info *info = osd_oti_get(env);
ENTRY;
if (!LDISKFS_HAS_INCOMPAT_FEATURE(inode->i_sb,
osd_get_ldiskfs_dirent_param(ldp, fid);
dentry->d_fsdata = (void *)ldp;
ll_vfs_dq_init(dir);
- rc = osd_ldiskfs_add_entry(jh, dentry, inode, hlock);
+ rc = osd_ldiskfs_add_entry(info, jh, dentry, inode, hlock);
/* It is too bad, we cannot reinsert the name entry back.
* That means we lose it! */
if (rc != 0)
* For the whole directory, only dot/dotdot entry have no FID-in-dirent
* and needs to get FID from LMA when readdir, it will not affect the
* performance much. */
- if ((bh == NULL) || (le32_to_cpu(de->inode) != inode->i_ino) ||
+ if (IS_ERR(bh) || (le32_to_cpu(de->inode) != inode->i_ino) ||
(dot_dotdot != 0 && !osd_dot_dotdot_has_space(de, dot_dotdot))) {
*attr |= LUDA_IGNORE;
GOTO(out, rc);
out:
- brelse(bh);
+ if (!IS_ERR(bh))
+ brelse(bh);
if (hlock != NULL) {
ldiskfs_htree_unlock(hlock);
} else {
struct osd_thread_info *info = osd_oti_get(env);
struct lu_fid *fid = &info->oti_fid;
struct inode *inode;
- int rc = 0, force_over_128tb = 0;
+ int rc = 0, force_over_256tb = 0;
ENTRY;
if (o->od_mnt != NULL)
RETURN(-EINVAL);
}
#endif
- if (opts != NULL && strstr(opts, "force_over_128tb") != NULL)
- force_over_128tb = 1;
+ if (opts != NULL && strstr(opts, "force_over_128tb") != NULL) {
+ CWARN("force_over_128tb option is depricated."
+ "Filesystems less then 256TB can be created without any"
+ "force options. Use force_over_256tb option for"
+ "filesystems greather then 256TB.\n");
+ }
+
+ if (opts != NULL && strstr(opts, "force_over_256tb") != NULL)
+ force_over_256tb = 1;
- __page = alloc_page(GFP_IOFS);
+ __page = alloc_page(GFP_KERNEL);
if (__page == NULL)
GOTO(out, rc = -ENOMEM);
page = (unsigned long)page_address(__page);
"noextents",
/* strip out option we processed in osd */
"bigendian_extents",
- "force_over_128tb",
+#if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(3,0,53,0)
+#warning "remove force_over_128 option"
+#else
+ "force_over_128tb (deprecated)",
+#endif
+ "force_over_256tb",
NULL
};
strcat(options, opts);
/* Glom up mount options */
if (*options != '\0')
strcat(options, ",");
- strlcat(options, "no_mbcache", PAGE_CACHE_SIZE);
+ strlcat(options, "no_mbcache", PAGE_SIZE);
type = get_fs_type("ldiskfs");
if (!type) {
GOTO(out, rc);
}
- if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) > (8ULL << 32) &&
- force_over_128tb == 0) {
+ if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) > (64ULL << 30) &&
+ force_over_256tb == 0) {
CERROR("%s: device %s LDISKFS does not support filesystems "
- "greater than 128TB and can cause data corruption. "
- "Use \"force_over_128tb\" mount option to override.\n",
+ "greater than 256TB and can cause data corruption. "
+ "Use \"force_over_256tb\" mount option to override.\n",
name, dev);
GOTO(out, rc = -EINVAL);
}