* Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
*/
-#define DEBUG_SUBSYSTEM S_MDS
+#define DEBUG_SUBSYSTEM S_OSD
#include <linux/module.h>
#include <md_object.h>
#include <lustre_quota.h>
+#include <ldiskfs/xattr.h>
+
int ldiskfs_pdo = 1;
CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
"ldiskfs with parallel directory operations");
int rc;
ENTRY;
- if (OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
- RETURN(0);
-
CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
info->oti_mdt_attrs_old, LMA_OLD_SIZE);
}
id = &info->oti_id;
- if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+ if (!list_empty(&scrub->os_inconsistent_items)) {
/* Search order: 2. OI scrub pending list. */
result = osd_oii_lookup(dev, fid, id);
if (result == 0)
dt_txn_hook_commit(th);
/* call per-transaction callbacks if any */
- cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
+ list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
"commit callback entry: magic=%x name='%s'\n",
dcb->dcb_magic, dcb->dcb_name);
- cfs_list_del_init(&dcb->dcb_linkage);
+ list_del_init(&dcb->dcb_linkage);
dcb->dcb_func(NULL, th, dcb, error);
}
atomic_set(&th->th_refc, 1);
th->th_alloc_size = sizeof(*oh);
oti->oti_dev = osd_dt_dev(d);
- CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
+ INIT_LIST_HEAD(&oh->ot_dcb_list);
osd_th_alloced(oh);
memset(oti->oti_declare_ops, 0,
int rc;
ENTRY;
- if (ss == NULL)
- RETURN(1);
+ LASSERT(ss != NULL);
+ LASSERT(ss->ss_server_fld != NULL);
rc = osd_fld_lookup(env, osd, seq, range);
if (rc != 0) {
*/
wait_event(iobuf->dr_wait,
atomic_read(&iobuf->dr_numreqs) == 0);
+ osd_fini_iobuf(oti->oti_dev, iobuf);
if (!rc)
rc = iobuf->dr_error;
LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
LASSERT(&dcb->dcb_func != NULL);
- cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
+ list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
return 0;
}
osd_index_fini(obj);
if (inode != NULL) {
struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
- qid_t uid = inode->i_uid;
- qid_t gid = inode->i_gid;
+ qid_t uid = i_uid_read(inode);
+ qid_t gid = i_gid_read(inode);
iput(inode);
obj->oo_inode = NULL;
struct dt_device_param *param)
{
struct super_block *sb = osd_sb(osd_dt_dev(dev));
+ int ea_overhead;
/*
* XXX should be taken from not-yet-existing fs abstraction layer.
if (test_opt(sb, POSIX_ACL))
param->ddp_mntopts |= MNTOPT_ACL;
+ /* LOD might calculate the max stripe count based on max_ea_size,
+ * so we need take account in the overhead as well,
+ * xattr_header + magic + xattr_entry_head */
+ ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
+ LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
+
#if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
- if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
- param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE;
- else
+ if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
+ param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
+ ea_overhead;
+ else
#endif
- param->ddp_max_ea_size = sb->s_blocksize;
-
+ param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
}
/*
static void osd_inode_getattr(const struct lu_env *env,
- struct inode *inode, struct lu_attr *attr)
-{
- attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
- LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
- LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
- LA_TYPE;
-
- attr->la_atime = LTIME_S(inode->i_atime);
- attr->la_mtime = LTIME_S(inode->i_mtime);
- attr->la_ctime = LTIME_S(inode->i_ctime);
- attr->la_mode = inode->i_mode;
- attr->la_size = i_size_read(inode);
- attr->la_blocks = inode->i_blocks;
- attr->la_uid = inode->i_uid;
- attr->la_gid = inode->i_gid;
- attr->la_flags = LDISKFS_I(inode)->i_flags;
- attr->la_nlink = inode->i_nlink;
- attr->la_rdev = inode->i_rdev;
- attr->la_blksize = 1 << inode->i_blkbits;
- attr->la_blkbits = inode->i_blkbits;
+ struct inode *inode, struct lu_attr *attr)
+{
+ attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
+ LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
+ LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
+ LA_TYPE;
+
+ attr->la_atime = LTIME_S(inode->i_atime);
+ attr->la_mtime = LTIME_S(inode->i_mtime);
+ attr->la_ctime = LTIME_S(inode->i_ctime);
+ attr->la_mode = inode->i_mode;
+ attr->la_size = i_size_read(inode);
+ attr->la_blocks = inode->i_blocks;
+ attr->la_uid = i_uid_read(inode);
+ attr->la_gid = i_gid_read(inode);
+ attr->la_flags = LDISKFS_I(inode)->i_flags;
+ attr->la_nlink = inode->i_nlink;
+ attr->la_rdev = inode->i_rdev;
+ attr->la_blksize = 1 << inode->i_blkbits;
+ attr->la_blkbits = inode->i_blkbits;
}
static int osd_attr_get(const struct lu_env *env,
struct osd_object *obj;
struct osd_thread_info *info = osd_oti_get(env);
struct lquota_id_info *qi = &info->oti_qi;
+ qid_t uid;
+ qid_t gid;
long long bspace;
int rc = 0;
bool allocated;
* credits for updating quota accounting files and to trigger quota
* space adjustment once the operation is completed.*/
if ((attr->la_valid & LA_UID) != 0 &&
- attr->la_uid != obj->oo_inode->i_uid) {
+ attr->la_uid != (uid = i_uid_read(obj->oo_inode))) {
qi->lqi_type = USRQUOTA;
/* inode accounting */
RETURN(rc);
/* and one less inode for the current uid */
- qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
+ qi->lqi_id.qid_uid = uid;
qi->lqi_space = -1;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
RETURN(rc);
/* and finally less blocks for the current owner */
- qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
+ qi->lqi_id.qid_uid = uid;
qi->lqi_space = -bspace;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
}
if (attr->la_valid & LA_GID &&
- attr->la_gid != obj->oo_inode->i_gid) {
+ attr->la_gid != (gid = i_gid_read(obj->oo_inode))) {
qi->lqi_type = GRPQUOTA;
/* inode accounting */
RETURN(rc);
/* and one less inode for the current gid */
- qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
+ qi->lqi_id.qid_gid = gid;
qi->lqi_space = -1;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
RETURN(rc);
/* and finally less blocks for the current owner */
- qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
+ qi->lqi_id.qid_gid = gid;
qi->lqi_space = -bspace;
rc = osd_declare_qid(env, oh, qi, true, NULL);
if (rc == -EDQUOT || rc == -EINPROGRESS)
}
static int osd_inode_setattr(const struct lu_env *env,
- struct inode *inode, const struct lu_attr *attr)
+ struct inode *inode, const struct lu_attr *attr)
{
- __u64 bits;
+ __u64 bits = attr->la_valid;
- bits = attr->la_valid;
+ /* Only allow set size for regular file */
+ if (!S_ISREG(inode->i_mode))
+ bits &= ~(LA_SIZE | LA_BLOCKS);
+
+ if (bits == 0)
+ return 0;
if (bits & LA_ATIME)
inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
if (bits & LA_BLOCKS)
inode->i_blocks = attr->la_blocks;
#endif
- if (bits & LA_MODE)
- inode->i_mode = (inode->i_mode & S_IFMT) |
- (attr->la_mode & ~S_IFMT);
- if (bits & LA_UID)
- inode->i_uid = attr->la_uid;
- if (bits & LA_GID)
- inode->i_gid = attr->la_gid;
- if (bits & LA_NLINK)
+ if (bits & LA_MODE)
+ inode->i_mode = (inode->i_mode & S_IFMT) |
+ (attr->la_mode & ~S_IFMT);
+ if (bits & LA_UID)
+ i_uid_write(inode, attr->la_uid);
+ if (bits & LA_GID)
+ i_gid_write(inode, attr->la_gid);
+ if (bits & LA_NLINK)
set_nlink(inode, attr->la_nlink);
- if (bits & LA_RDEV)
- inode->i_rdev = attr->la_rdev;
+ if (bits & LA_RDEV)
+ inode->i_rdev = attr->la_rdev;
if (bits & LA_FLAGS) {
/* always keep S_NOCMTIME */
static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
{
- if ((attr->la_valid & LA_UID && attr->la_uid != inode->i_uid) ||
- (attr->la_valid & LA_GID && attr->la_gid != inode->i_gid)) {
+ if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
+ (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
struct iattr iattr;
int rc;
iattr.ia_valid |= ATTR_UID;
if (attr->la_valid & LA_GID)
iattr.ia_valid |= ATTR_GID;
- iattr.ia_uid = attr->la_uid;
- iattr.ia_gid = attr->la_gid;
+ iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
+ iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
rc = ll_vfs_dq_transfer(inode, &iattr);
if (rc) {
osd_trans_declare_op(env, oh, OSD_OT_DELETE,
osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
/* one less inode */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
- false, true, NULL, false);
+ rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
+ -1, oh, false, true, NULL, false);
if (rc)
RETURN(rc);
/* data to be truncated */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
- true, true, NULL, false);
+ rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
+ 0, oh, true, true, NULL, false);
RETURN(rc);
}
osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
+ CDEBUG(D_INODE, DFID" increase nlink %d\n",
+ PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
/*
* The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
* (65000) subdirectories by storing "1" in i_nlink if the link count
* This also has to properly handle the case of inodes with nlink == 0
* in case they are being linked into the PENDING directory
*/
+#ifdef I_LINKABLE
+ /* This is necessary to increment from i_nlink == 0 */
+ spin_lock(&inode->i_lock);
+ inode->i_state |= I_LINKABLE;
+ spin_unlock(&inode->i_lock);
+#endif
+
spin_lock(&obj->oo_guard);
ldiskfs_inc_count(oh->ot_handle, inode);
- LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
+ if (!S_ISDIR(inode->i_mode))
+ LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
spin_unlock(&obj->oo_guard);
ll_dirty_inode(inode, I_DIRTY_DATASYNC);
return 0;
}
+ CDEBUG(D_INODE, DFID" decrease nlink %d\n",
+ PFID(lu_object_fid(&dt->do_lu)), inode->i_nlink);
+
ldiskfs_dec_count(oh->ot_handle, inode);
spin_unlock(&obj->oo_guard);
if (fl & LU_XATTR_CREATE)
fs_flags |= XATTR_CREATE;
+ if (strcmp(name, XATTR_NAME_LMV) == 0) {
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ int rc;
+
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
+ if (rc != 0)
+ RETURN(rc);
+
+ lma->lma_incompat |= LMAI_STRIPED;
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
+ sizeof(*lma), XATTR_REPLACE);
+ if (rc != 0)
+ RETURN(rc);
+ }
+
return __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
fs_flags);
}
case LC_ID_NONE:
RETURN(NULL);
case LC_ID_PLAIN:
- capa->lc_uid = obj->oo_inode->i_uid;
- capa->lc_gid = obj->oo_inode->i_gid;
+ capa->lc_uid = i_uid_read(obj->oo_inode);
+ capa->lc_gid = i_gid_read(obj->oo_inode);
capa->lc_flags = LC_ID_PLAIN;
break;
case LC_ID_CONVERT: {
__u32 d[4], s[4];
- s[0] = obj->oo_inode->i_uid;
+ s[0] = i_uid_read(obj->oo_inode);
cfs_get_random_bytes(&(s[1]), sizeof(__u32));
- s[2] = obj->oo_inode->i_gid;
+ s[2] = i_uid_read(obj->oo_inode);
cfs_get_random_bytes(&(s[3]), sizeof(__u32));
rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
if (unlikely(rc))
RETURN(oc);
}
-static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
+static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end)
{
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
file->f_mapping = inode->i_mapping;
file->f_op = inode->i_fop;
set_file_inode(file, inode);
-#ifndef HAVE_FILE_FSYNC_4ARGS
+
+#ifdef HAVE_FILE_FSYNC_4ARGS
+ rc = file->f_op->fsync(file, start, end, 0);
+#elif defined(HAVE_FILE_FSYNC_2ARGS)
mutex_lock(&inode->i_mutex);
-#endif
- rc = do_fsync(file, 0);
-#ifndef HAVE_FILE_FSYNC_4ARGS
+ rc = file->f_op->fsync(file, 0);
+ mutex_unlock(&inode->i_mutex);
+#else
+ mutex_lock(&inode->i_mutex);
+ rc = file->f_op->fsync(file, dentry, 0);
mutex_unlock(&inode->i_mutex);
#endif
+
RETURN(rc);
}
* recheck under lock.
*/
if (!osd_has_index(obj))
- result = osd_iam_container_init(env, obj, dir);
+ result = osd_iam_container_init(env, obj,
+ obj->oo_dir);
else
result = 0;
up_write(&obj->oo_ext_idx_sem);
inode = osd_dt_obj(dt)->oo_inode;
LASSERT(inode);
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
- true, true, NULL, false);
+ rc = osd_declare_inode_qid(env, i_uid_read(inode), i_gid_read(inode),
+ 0, oh, true, true, NULL, false);
RETURN(rc);
}
static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
struct lu_fid *fid)
{
+ struct seq_server_site *ss = osd_seq_site(osd);
ENTRY;
/* FID seqs not in FLDB, must be local seq */
if (unlikely(!fid_seq_in_fldb(fid_seq(fid))))
RETURN(0);
+ /* If FLD is not being initialized yet, it only happens during the
+ * initialization, likely during mgs initialization, and we assume
+ * this is local FID. */
+ if (ss == NULL || ss->ss_server_fld == NULL)
+ RETURN(0);
+
+ /* Only check the local FLDB here */
if (osd_seq_exists(env, osd, fid_seq(fid)))
RETURN(0);
osd_trans_declare_op(env, oh, OSD_OT_INSERT,
osd_dto_credits_noquota[DTO_INDEX_INSERT]);
- if (osd_dt_obj(dt)->oo_inode == NULL) {
- const char *name = (const char *)key;
- /* Object is not being created yet. Only happens when
- * 1. declare directory create
- * 2. declare insert .
- * 3. declare insert ..
- */
- LASSERT(strcmp(name, dotdot) == 0 || strcmp(name, dot) == 0);
- } else {
+ if (osd_dt_obj(dt)->oo_inode != NULL) {
struct inode *inode = osd_dt_obj(dt)->oo_inode;
/* We ignore block quota on meta pool (MDTs), so needn't
* calculate how many blocks will be consumed by this index
* insert */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0,
+ rc = osd_declare_inode_qid(env, i_uid_read(inode),
+ i_gid_read(inode), 0,
oh, true, true, NULL, false);
}
rc = osd_ea_add_rec(env, obj, child_inode, name, rec, th);
+ CDEBUG(D_INODE, "parent %lu insert %s:%lu rc = %d\n",
+ obj->oo_inode->i_ino, name, child_inode->i_ino, rc);
+
iput(child_inode);
if (child != NULL)
osd_object_put(env, child);
{
}
+struct osd_filldir_cbs {
+#ifdef HAVE_DIR_CONTEXT
+ struct dir_context ctx;
+#endif
+ struct osd_it_ea *it;
+};
/**
* It is called internally by ->readdir(). It fills the
* iterator's in-memory data structure with required
* \retval 0 on success
* \retval 1 on buffer full
*/
-static int osd_ldiskfs_filldir(char *buf, const char *name, int namelen,
+static int osd_ldiskfs_filldir(void *buf, const char *name, int namelen,
loff_t offset, __u64 ino,
unsigned d_type)
{
- struct osd_it_ea *it = (struct osd_it_ea *)buf;
+ struct osd_it_ea *it = ((struct osd_filldir_cbs *)buf)->it;
struct osd_object *obj = it->oie_obj;
struct osd_it_ea_dirent *ent = it->oie_dirent;
struct lu_fid *fid = &ent->oied_fid;
*
* \retval 0 on success
* \retval -ve on error
+ * \retval +1 reach the end of entry
*/
static int osd_ldiskfs_it_fill(const struct lu_env *env,
const struct dt_it *di)
struct osd_object *obj = it->oie_obj;
struct inode *inode = obj->oo_inode;
struct htree_lock *hlock = NULL;
- int result = 0;
+ struct file *filp = &it->oie_file;
+ int rc = 0;
+ struct osd_filldir_cbs buf = {
+#ifdef HAVE_DIR_CONTEXT
+ .ctx.actor = osd_ldiskfs_filldir,
+#endif
+ .it = it
+ };
ENTRY;
it->oie_dirent = it->oie_buf;
down_read(&obj->oo_ext_idx_sem);
}
- result = inode->i_fop->readdir(&it->oie_file, it,
- (filldir_t) osd_ldiskfs_filldir);
+#ifdef HAVE_DIR_CONTEXT
+ buf.ctx.pos = filp->f_pos;
+ rc = inode->i_fop->iterate(filp, &buf.ctx);
+ filp->f_pos = buf.ctx.pos;
+#else
+ rc = inode->i_fop->readdir(filp, &buf, osd_ldiskfs_filldir);
+#endif
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
/*If it does not get any dirent, it means it has been reached
*to the end of the dir */
it->oie_file.f_pos = ldiskfs_get_htree_eof(&it->oie_file);
+ if (rc == 0)
+ rc = 1;
} else {
it->oie_dirent = it->oie_buf;
it->oie_it_dirent = 1;
}
- RETURN(result);
+ RETURN(rc);
}
/**
LASSERT(de->rec_len >= de->name_len + sizeof(struct osd_fid_pack));
rc = ldiskfs_journal_get_write_access(jh, bh);
- if (rc != 0) {
- CERROR("%.16s: fail to write access for update dirent: "
- "name = %.*s, rc = %d\n",
- LDISKFS_SB(sb)->s_es->s_volume_name,
- ent->oied_namelen, ent->oied_name, rc);
+ if (rc != 0)
RETURN(rc);
- }
rec = (struct osd_fid_pack *)(de->name + de->name_len + 1);
fid_cpu_to_be((struct lu_fid *)rec->fp_area, fid);
rc = ldiskfs_journal_dirty_metadata(jh, bh);
- if (rc != 0)
- CERROR("%.16s: fail to dirty metadata for update dirent: "
- "name = %.*s, rc = %d\n",
- LDISKFS_SB(sb)->s_es->s_volume_name,
- ent->oied_namelen, ent->oied_name, rc);
RETURN(rc);
}
if (osd_dirent_has_space(de->rec_len, ent->oied_namelen,
dir->i_sb->s_blocksize)) {
rc = ldiskfs_journal_get_write_access(jh, bh);
- if (rc != 0) {
- CERROR("%.16s: fail to write access for reinsert "
- "dirent: name = %.*s, rc = %d\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
- ent->oied_namelen, ent->oied_name, rc);
+ if (rc != 0)
RETURN(rc);
- }
de->name[de->name_len] = 0;
rec = (struct osd_fid_pack *)(de->name + de->name_len + 1);
de->file_type |= LDISKFS_DIRENT_LUFID;
rc = ldiskfs_journal_dirty_metadata(jh, bh);
- if (rc != 0)
- CERROR("%.16s: fail to dirty metadata for reinsert "
- "dirent: name = %.*s, rc = %d\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
- ent->oied_namelen, ent->oied_name, rc);
RETURN(rc);
}
rc = ldiskfs_delete_entry(jh, dir, de, bh);
- if (rc != 0) {
- CERROR("%.16s: fail to delete entry for reinsert dirent: "
- "name = %.*s, rc = %d\n",
- LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
- ent->oied_namelen, ent->oied_name, rc);
+ if (rc != 0)
RETURN(rc);
- }
dentry = osd_child_dentry_by_inode(env, dir, ent->oied_name,
ent->oied_namelen);
/* It is too bad, we cannot reinsert the name entry back.
* That means we lose it! */
if (rc != 0)
- CERROR("%.16s: fail to insert entry for reinsert dirent: "
- "name = %.*s, rc = %d\n",
+ CDEBUG(D_LFSCK, "%.16s: fail to reinsert the dirent, "
+ "dir = %lu/%u, name = %.*s, "DFID": rc = %d\n",
LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
- ent->oied_namelen, ent->oied_name, rc);
+ dir->i_ino, dir->i_generation,
+ ent->oied_namelen, ent->oied_name, PFID(fid), rc);
RETURN(rc);
}
jh = osd_journal_start_sb(sb, LDISKFS_HT_MISC, credits);
if (IS_ERR(jh)) {
rc = PTR_ERR(jh);
- CERROR("%.16s: fail to start trans for dirent "
- "check_repair: credits %d, name %.*s, rc %d\n",
- devname, credits, ent->oied_namelen,
- ent->oied_name, rc);
+ CDEBUG(D_LFSCK, "%.16s: fail to start trans for dirent "
+ "check_repair, dir = %lu/%u, credits = %d, "
+ "name = %.*s: rc = %d\n",
+ devname, dir->i_ino, dir->i_generation, credits,
+ ent->oied_namelen, ent->oied_name, rc);
RETURN(rc);
}
if (rc == -ENOENT || rc == -ESTALE) {
*attr |= LUDA_IGNORE;
rc = 0;
+ } else {
+ CDEBUG(D_LFSCK, "%.16s: fail to iget for dirent "
+ "check_repair, dir = %lu/%u, name = %.*s: "
+ "rc = %d\n",
+ devname, dir->i_ino, dir->i_generation,
+ ent->oied_namelen, ent->oied_name, rc);
}
GOTO(out_journal, rc);
rc = osd_dirent_update(jh, sb, ent, fid, bh, de);
if (rc == 0)
*attr |= LUDA_REPAIR;
+ else
+ CDEBUG(D_LFSCK, "%.16s: fail to update FID "
+ "in the dirent, dir = %lu/%u, "
+ "name = %.*s, "DFID": rc = %d\n",
+ devname, dir->i_ino, dir->i_generation,
+ ent->oied_namelen, ent->oied_name,
+ PFID(fid), rc);
} else {
/* Do not repair under dryrun mode. */
if (*attr & LUDA_VERIFY_DRYRUN) {
fid, bh, de, hlock);
if (rc == 0)
*attr |= LUDA_REPAIR;
+ else
+ CDEBUG(D_LFSCK, "%.16s: fail to append FID "
+ "after the dirent, dir = %lu/%u, "
+ "name = %.*s, "DFID": rc = %d\n",
+ devname, dir->i_ino, dir->i_generation,
+ ent->oied_namelen, ent->oied_name,
+ PFID(fid), rc);
}
} else if (rc == -ENODATA) {
/* Do not repair under dryrun mode. */
rc = osd_ea_fid_set(info, inode, fid, 0, 0);
if (rc == 0)
*attr |= LUDA_REPAIR;
+ else
+ CDEBUG(D_LFSCK, "%.16s: fail to set LMA for "
+ "update dirent, dir = %lu/%u, "
+ "name = %.*s, "DFID": rc = %d\n",
+ devname, dir->i_ino, dir->i_generation,
+ ent->oied_namelen, ent->oied_name,
+ PFID(fid), rc);
} else {
lu_igif_build(fid, inode->i_ino, inode->i_generation);
/* It is probably IGIF object. Only aappend the
fid, bh, de, hlock);
if (rc == 0)
*attr |= LUDA_UPGRADE;
+ else
+ CDEBUG(D_LFSCK, "%.16s: fail to append IGIF "
+ "after the dirent, dir = %lu/%u, "
+ "name = %.*s, "DFID": rc = %d\n",
+ devname, dir->i_ino, dir->i_generation,
+ ent->oied_namelen, ent->oied_name,
+ PFID(fid), rc);
}
}
} else {
attr &= ~LU_DIRENT_ATTRS_MASK;
if (!fid_is_sane(fid)) {
- if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP))
+ if (OBD_FAIL_CHECK(OBD_FAIL_FID_LOOKUP) &&
+ likely(it->oie_dirent->oied_namelen != 2 ||
+ it->oie_dirent->oied_name[0] != '.' ||
+ it->oie_dirent->oied_name[1] != '.'))
RETURN(-ENOENT);
rc = osd_ea_fid_get(env, obj, ino, fid, id);
}
/**
+ * Returns the record size size at current position.
+ *
+ * This function will return record(lu_dirent) size in bytes.
+ *
+ * \param[in] env execution environment
+ * \param[in] di iterator's in memory structure
+ * \param[in] attr attribute of the entry, only requires LUDA_TYPE to
+ * calculate the lu_dirent size.
+ *
+ * \retval record size(in bytes & in memory) of the current lu_dirent
+ * entry.
+ */
+static int osd_it_ea_rec_size(const struct lu_env *env, const struct dt_it *di,
+ __u32 attr)
+{
+ struct osd_it_ea *it = (struct osd_it_ea *)di;
+
+ return lu_dirent_calc_size(it->oie_dirent->oied_namelen, attr);
+}
+
+/**
* Returns a cookie for current position of the iterator head, so that
* user can use this cookie to load/start the iterator next time.
*
it->oie_file.f_pos = hash;
rc = osd_ldiskfs_it_fill(env, di);
+ if (rc > 0)
+ rc = -ENODATA;
+
if (rc == 0)
rc = +1;
* mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
*/
static const struct dt_index_operations osd_index_ea_ops = {
- .dio_lookup = osd_index_ea_lookup,
- .dio_declare_insert = osd_index_declare_ea_insert,
- .dio_insert = osd_index_ea_insert,
- .dio_declare_delete = osd_index_declare_ea_delete,
- .dio_delete = osd_index_ea_delete,
- .dio_it = {
- .init = osd_it_ea_init,
- .fini = osd_it_ea_fini,
- .get = osd_it_ea_get,
- .put = osd_it_ea_put,
- .next = osd_it_ea_next,
- .key = osd_it_ea_key,
- .key_size = osd_it_ea_key_size,
- .rec = osd_it_ea_rec,
- .store = osd_it_ea_store,
- .load = osd_it_ea_load
- }
+ .dio_lookup = osd_index_ea_lookup,
+ .dio_declare_insert = osd_index_declare_ea_insert,
+ .dio_insert = osd_index_ea_insert,
+ .dio_declare_delete = osd_index_declare_ea_delete,
+ .dio_delete = osd_index_ea_delete,
+ .dio_it = {
+ .init = osd_it_ea_init,
+ .fini = osd_it_ea_fini,
+ .get = osd_it_ea_get,
+ .put = osd_it_ea_put,
+ .next = osd_it_ea_next,
+ .key = osd_it_ea_key,
+ .key_size = osd_it_ea_key_size,
+ .rec = osd_it_ea_rec,
+ .rec_size = osd_it_ea_rec_size,
+ .store = osd_it_ea_store,
+ .load = osd_it_ea_load
+ }
};
static void *osd_key_init(const struct lu_context *ctx,
return osd_procfs_init(osd, name);
}
+static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
+{
+ struct seq_server_site *ss = osd_seq_site(osd);
+ int rc;
+ ENTRY;
+
+ if (osd->od_is_ost || osd->od_cl_seq != NULL)
+ RETURN(0);
+
+ if (unlikely(ss == NULL))
+ RETURN(-ENODEV);
+
+ OBD_ALLOC_PTR(osd->od_cl_seq);
+ if (osd->od_cl_seq == NULL)
+ RETURN(-ENOMEM);
+
+ rc = seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
+ osd->od_svname, ss->ss_server_seq);
+
+ if (rc != 0) {
+ OBD_FREE_PTR(osd->od_cl_seq);
+ osd->od_cl_seq = NULL;
+ }
+
+ RETURN(rc);
+}
+
+static void osd_fid_fini(const struct lu_env *env, struct osd_device *osd)
+{
+ if (osd->od_cl_seq == NULL)
+ return;
+
+ seq_client_fini(osd->od_cl_seq);
+ OBD_FREE_PTR(osd->od_cl_seq);
+ osd->od_cl_seq = NULL;
+}
+
static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
{
ENTRY;
o->od_quota_slave = NULL;
}
+ osd_fid_fini(env, o);
+
RETURN(0);
}
if (LDISKFS_HAS_INCOMPAT_FEATURE(o->od_mnt->mnt_sb,
LDISKFS_FEATURE_INCOMPAT_DIRDATA))
LDISKFS_SB(osd_sb(o))->s_mount_opt |= LDISKFS_MOUNT_DIRDATA;
+ else if (!o->od_is_ost)
+ CWARN("%s: device %s was upgraded from Lustre-1.x without "
+ "enabling the dirdata feature. If you do not want to "
+ "downgrade to Lustre-1.x again, you can enable it via "
+ "'tune2fs -O dirdata device'\n", name, dev);
#endif
inode = osd_sb(o)->s_root->d_inode;
lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
o->od_writethrough_cache = 1;
o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
- rc = osd_mount(env, o, cfg);
- if (rc)
- GOTO(out_capa, rc);
-
cplen = strlcpy(o->od_svname, lustre_cfg_string(cfg, 4),
sizeof(o->od_svname));
if (cplen >= sizeof(o->od_svname)) {
rc = -E2BIG;
- GOTO(out_mnt, rc);
+ GOTO(out_capa, rc);
}
if (server_name_is_ost(o->od_svname))
o->od_is_ost = 1;
+ rc = osd_mount(env, o, cfg);
+ if (rc != 0)
+ GOTO(out_capa, rc);
+
rc = osd_obj_map_init(env, o);
if (rc != 0)
GOTO(out_mnt, rc);
/* self-repair LMA by default */
o->od_lma_self_repair = 1;
- CFS_INIT_LIST_HEAD(&o->od_ios_list);
+ INIT_LIST_HEAD(&o->od_ios_list);
/* setup scrub, including OI files initialization */
rc = osd_scrub_setup(env, o);
if (rc < 0)
break;
case LCFG_PARAM:
LASSERT(&o->od_dt_dev);
- rc = class_process_proc_param(PARAM_OSD, lprocfs_osd_obd_vars,
- cfg, &o->od_dt_dev);
+ rc = class_process_proc_seq_param(PARAM_OSD,
+ lprocfs_osd_obd_vars,
+ cfg, &o->od_dt_dev);
if (rc > 0 || rc == -ENOSYS)
- rc = class_process_proc_param(PARAM_OST,
- lprocfs_osd_obd_vars,
- cfg, &o->od_dt_dev);
+ rc = class_process_proc_seq_param(PARAM_OST,
+ lprocfs_osd_obd_vars,
+ cfg, &o->od_dt_dev);
break;
default:
rc = -ENOSYS;
int result = 0;
ENTRY;
- if (osd->od_quota_slave != NULL)
+ if (osd->od_quota_slave != NULL) {
/* set up quota slave objects */
result = qsd_prepare(env, osd->od_quota_slave);
+ if (result != 0)
+ RETURN(result);
+ }
+
+ result = osd_fid_init(env, osd);
RETURN(result);
}
+int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
+ struct lu_fid *fid, struct md_op_data *op_data)
+{
+ struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
+
+ return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
+}
+
static const struct lu_object_operations osd_lu_obj_ops = {
.loo_object_init = osd_object_init,
.loo_object_delete = osd_object_delete,
static struct obd_ops osd_obd_device_ops = {
.o_owner = THIS_MODULE,
.o_connect = osd_obd_connect,
- .o_disconnect = osd_obd_disconnect
+ .o_disconnect = osd_obd_disconnect,
+ .o_fid_alloc = osd_fid_alloc,
};
static int __init osd_mod_init(void)
if (rc)
return rc;
- rc = class_register_type(&osd_obd_device_ops, NULL, NULL,
+ rc = class_register_type(&osd_obd_device_ops, NULL, true,
+ lprocfs_osd_module_vars,
#ifndef HAVE_ONLY_PROCFS_SEQ
- lprocfs_osd_module_vars,
+ NULL,
#endif
- LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
+ LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
if (rc)
lu_kmem_fini(ldiskfs_caches);
return rc;