[DTO_INDEX_INSERT] = 16,
[DTO_INDEX_DELETE] = 16,
/**
- * Unused now
+ * Used for OI scrub
*/
[DTO_INDEX_UPDATE] = 16,
/**
* NB: don't need any lock because no contention at this
* early stage */
inode->i_flags |= S_NOCMTIME;
+ inode->i_state |= I_LUSTRE_NOSCRUB;
obj->oo_inode = inode;
result = 0;
} else {
LASSERT(inode);
LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
+ /* Parallel control for OI scrub. For most of cases, there is no
+ * lock contention. So it will not affect unlink performance. */
+ cfs_mutex_lock(&inode->i_mutex);
if (S_ISDIR(inode->i_mode)) {
LASSERT(osd_inode_unlinked(inode) ||
inode->i_nlink == 1);
OSD_EXEC_OP(th, destroy);
result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
+ cfs_mutex_unlock(&inode->i_mutex);
/* XXX: add to ext3 orphan list */
/* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
LASSERT(dt_object_exists(dt));
LASSERT(inode->i_op != NULL && inode->i_op->setxattr != NULL);
- LASSERT(osd_write_locked(env, obj));
if (fl & LU_XATTR_REPLACE)
fs_flags |= XATTR_REPLACE;
const struct dt_it *di,
struct dt_rec *dtrec, __u32 attr)
{
- struct osd_it_iam *it = (struct osd_it_iam *)di;
- struct osd_thread_info *info = osd_oti_get(env);
- struct lu_fid *fid = &info->oti_fid;
- const struct osd_fid_pack *rec;
- struct lu_dirent *lde = (struct lu_dirent *)dtrec;
- char *name;
- int namelen;
- __u64 hash;
- int rc;
+ struct osd_it_iam *it = (struct osd_it_iam *)di;
+ struct osd_thread_info *info = osd_oti_get(env);
+ ENTRY;
- name = (char *)iam_it_key_get(&it->oi_it);
- if (IS_ERR(name))
- RETURN(PTR_ERR(name));
+ if (S_ISDIR(it->oi_obj->oo_inode->i_mode)) {
+ const struct osd_fid_pack *rec;
+ struct lu_fid *fid = &info->oti_fid;
+ struct lu_dirent *lde = (struct lu_dirent *)dtrec;
+ char *name;
+ int namelen;
+ __u64 hash;
+ int rc;
- namelen = iam_it_key_size(&it->oi_it);
+ name = (char *)iam_it_key_get(&it->oi_it);
+ if (IS_ERR(name))
+ RETURN(PTR_ERR(name));
- rec = (const struct osd_fid_pack *) iam_it_rec_get(&it->oi_it);
- if (IS_ERR(rec))
- RETURN(PTR_ERR(rec));
+ namelen = iam_it_key_size(&it->oi_it);
- rc = osd_fid_unpack(fid, rec);
- if (rc)
- RETURN(rc);
+ rec = (const struct osd_fid_pack *)iam_it_rec_get(&it->oi_it);
+ if (IS_ERR(rec))
+ RETURN(PTR_ERR(rec));
- hash = iam_it_store(&it->oi_it);
+ rc = osd_fid_unpack(fid, rec);
+ if (rc)
+ RETURN(rc);
- /* IAM does not store object type in IAM index (dir) */
- osd_it_pack_dirent(lde, fid, hash, name, namelen,
- 0, LUDA_FID);
+ hash = iam_it_store(&it->oi_it);
- return 0;
+ /* IAM does not store object type in IAM index (dir) */
+ osd_it_pack_dirent(lde, fid, hash, name, namelen,
+ 0, LUDA_FID);
+ } else {
+ iam_reccpy(&it->oi_it.ii_path.ip_leaf,
+ (struct iam_rec *)dtrec);
+ }
+
+ RETURN(0);
}
/**
static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
{
- struct osd_thread_info *info = osd_oti_get(env);
-
- ENTRY;
+ ENTRY;
- if (o->od_oi_table != NULL)
- osd_oi_fini(info, o);
+ osd_scrub_cleanup(env, o);
- if (o->od_fsops) {
- fsfilt_put_ops(o->od_fsops);
- o->od_fsops = NULL;
- }
+ if (o->od_fsops) {
+ fsfilt_put_ops(o->od_fsops);
+ o->od_fsops = NULL;
+ }
- RETURN(0);
+ RETURN(0);
}
static int osd_mount(const struct lu_env *env,
l->ld_ops = &osd_lu_ops;
o->od_dt_dev.dd_ops = &osd_dt_ops;
cfs_spin_lock_init(&o->od_osfs_lock);
+ cfs_mutex_init(&o->od_otable_mutex);
o->od_osfs_age = cfs_time_shift_64(-1000);
o->od_capa_hash = init_capa_hash();
if (o->od_capa_hash == NULL) {
static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
struct lu_device *dev)
{
- struct osd_device *osd = osd_dev(dev);
- struct osd_thread_info *oti = osd_oti_get(env);
- int result;
-
- ENTRY;
+ struct osd_device *osd = osd_dev(dev);
+ int result;
+ ENTRY;
- /* 1. initialize oi before any file create or file open */
- result = osd_oi_init(oti, osd);
+ /* 1. setup scrub, including OI files initialization */
+ result = osd_scrub_setup(env, osd);
if (result < 0)
RETURN(result);