* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <obd_support.h>
/* struct ptlrpc_thread */
#include <lustre_net.h>
-
-/* fid_is_local() */
#include <lustre_fid.h>
#include "osd_internal.h"
-#include "osd_igif.h"
/* llo_* api support */
#include <md_object.h>
-/* dt_acct_features */
-#include <lquota.h>
+#include <lustre_quota.h>
-#ifdef HAVE_LDISKFS_PDO
int ldiskfs_pdo = 1;
CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
"ldiskfs with parallel directory operations");
-#else
-int ldiskfs_pdo = 0;
-#endif
static const char dot[] = ".";
static const char dotdot[] = "..";
static const struct dt_index_operations osd_index_iam_ops;
static const struct dt_index_operations osd_index_ea_ops;
+#ifdef OSD_TRACK_DECLARES
+int osd_trans_declare_op2rb[] = {
+ [OSD_OT_ATTR_SET] = OSD_OT_ATTR_SET,
+ [OSD_OT_PUNCH] = OSD_OT_MAX,
+ [OSD_OT_XATTR_SET] = OSD_OT_XATTR_SET,
+ [OSD_OT_CREATE] = OSD_OT_DESTROY,
+ [OSD_OT_DESTROY] = OSD_OT_CREATE,
+ [OSD_OT_REF_ADD] = OSD_OT_REF_DEL,
+ [OSD_OT_REF_DEL] = OSD_OT_REF_ADD,
+ [OSD_OT_WRITE] = OSD_OT_WRITE,
+ [OSD_OT_INSERT] = OSD_OT_DELETE,
+ [OSD_OT_DELETE] = OSD_OT_INSERT,
+ [OSD_OT_QUOTA] = OSD_OT_MAX,
+};
+#endif
+
static int osd_has_index(const struct osd_object *obj)
{
return obj->oo_dt.do_index_ops != NULL;
l = &mo->oo_dt.do_lu;
dt_object_init(&mo->oo_dt, NULL, d);
- if (osd_dev(d)->od_iop_mode)
- mo->oo_dt.do_ops = &osd_obj_ea_ops;
- else
- mo->oo_dt.do_ops = &osd_obj_ops;
-
+ mo->oo_dt.do_ops = &osd_obj_ea_ops;
l->lo_ops = &osd_lu_obj_ops;
- cfs_init_rwsem(&mo->oo_sem);
- cfs_init_rwsem(&mo->oo_ext_idx_sem);
- cfs_spin_lock_init(&mo->oo_guard);
+ init_rwsem(&mo->oo_sem);
+ init_rwsem(&mo->oo_ext_idx_sem);
+ spin_lock_init(&mo->oo_guard);
return l;
} else {
return NULL;
}
}
-static int osd_get_lma(struct inode *inode, struct dentry *dentry,
- struct lustre_mdt_attrs *lma)
+static inline int __osd_xattr_get(struct inode *inode, struct dentry *dentry,
+ const char *name, void *buf, int len)
+{
+ dentry->d_inode = inode;
+ return inode->i_op->getxattr(dentry, name, buf, len);
+}
+
+int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
+ struct dentry *dentry, struct lustre_mdt_attrs *lma)
{
int rc;
- dentry->d_inode = inode;
- rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA, (void *)lma,
- sizeof(*lma));
+ rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA, (void *)lma,
+ sizeof(*lma));
+ if (rc == -ERANGE) {
+ /* try with old lma size */
+ rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA,
+ info->oti_mdt_attrs_old,
+ LMA_OLD_SIZE);
+ if (rc > 0)
+ memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
+ }
if (rc > 0) {
/* Check LMA compatibility */
if (lma->lma_incompat & ~cpu_to_le32(LMA_INCOMPAT_SUPP)) {
return inode;
}
-struct inode *osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
- struct osd_inode_id *id, struct lu_fid *fid)
+static struct inode *
+osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
+ struct osd_inode_id *id, struct lu_fid *fid)
{
struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
struct inode *inode;
if (IS_ERR(inode))
return inode;
- rc = osd_get_lma(inode, &info->oti_obj_dentry, lma);
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
if (rc == 0) {
*fid = lma->lma_self_fid;
} else if (rc == -ENODATA) {
- LU_IGIF_BUILD(fid, inode->i_ino, inode->i_generation);
+ if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
+ lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
+ else
+ lu_igif_build(fid, inode->i_ino, inode->i_generation);
} else {
iput(inode);
inode = ERR_PTR(rc);
if (IS_ERR(inode))
return inode;
- rc = osd_get_lma(inode, &info->oti_obj_dentry, lma);
+ rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
if (rc == -ENODATA)
return inode;
CDEBUG(D_LFSCK, "inconsistent obj: "DFID", %lu, "DFID"\n",
PFID(&lma->lma_self_fid), inode->i_ino, PFID(fid));
iput(inode);
- return ERR_PTR(EREMCHG);
+ return ERR_PTR(-EREMCHG);
}
return inode;
info = osd_oti_get(env);
LASSERT(info);
oic = &info->oti_cache;
- id = &oic->oic_lid;
if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
RETURN(-ENOENT);
/* Search order: 1. per-thread cache. */
if (lu_fid_eq(fid, &oic->oic_fid)) {
+ id = &oic->oic_lid;
goto iget;
- } else if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+ }
+
+ id = &info->oti_id;
+ if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
/* Search order: 2. OI scrub pending list. */
result = osd_oii_lookup(dev, fid, id);
if (result == 0)
GOTO(out, result = 0);
/* Search order: 3. OI files. */
- result = osd_oi_lookup(info, dev, fid, id);
+ result = osd_oi_lookup(info, dev, fid, id, true);
if (result == -ENOENT) {
if (!fid_is_norm(fid) ||
!ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
obj->oo_inode = inode;
LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
- if (dev->od_iop_mode) {
- obj->oo_compat_dot_created = 1;
- obj->oo_compat_dotdot_created = 1;
- }
+
+ obj->oo_compat_dot_created = 1;
+ obj->oo_compat_dotdot_created = 1;
if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
GOTO(out, result = 0);
LINVRNT(osd_invariant(obj));
+ if (fid_is_otable_it(&l->lo_header->loh_fid)) {
+ obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
+ l->lo_header->loh_attr |= LOHA_EXISTS;
+ return 0;
+ }
+
result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
obj->oo_dt.do_body_ops = &osd_body_ops_new;
- if (result == 0) {
- if (obj->oo_inode != NULL) {
- osd_object_init0(obj);
- } else if (fid_is_otable_it(&l->lo_header->loh_fid)) {
- obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
- /* LFSCK iterator object is special without inode */
- l->lo_header->loh_attr |= LOHA_EXISTS;
- }
- }
+ if (result == 0 && obj->oo_inode != NULL)
+ osd_object_init0(obj);
+
LINVRNT(osd_invariant(obj));
return result;
}
/**
* Helper function to convert time interval to microseconds packed in
- * long int (default time units for the counter in "stats" initialized
- * by lu_time_init() )
+ * long int.
*/
static long interval_to_usec(cfs_time_t start, cfs_time_t end)
{
/*
* Concurrency: doesn't access mutable data.
*/
-static int osd_param_is_sane(const struct osd_device *dev,
- const struct thandle *th)
+static int osd_param_is_not_sane(const struct osd_device *dev,
+ const struct thandle *th)
{
- struct osd_thandle *oh;
- oh = container_of0(th, struct osd_thandle, ot_super);
- return oh->ot_credits <= osd_journal(dev)->j_max_transaction_buffers;
+ struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
+
+ return oh->ot_credits > osd_journal(dev)->j_max_transaction_buffers;
}
/*
oti->oti_dev = osd_dt_dev(d);
CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
osd_th_alloced(oh);
+
+ memset(oti->oti_declare_ops, 0, OSD_OT_MAX);
+ memset(oti->oti_declare_ops_rb, 0, OSD_OT_MAX);
+ memset(oti->oti_declare_ops_cred, 0, OSD_OT_MAX);
+ oti->oti_rollback = false;
}
RETURN(th);
}
if (rc != 0)
GOTO(out, rc);
- if (!osd_param_is_sane(dev, th)) {
+ if (unlikely(osd_param_is_not_sane(dev, th))) {
+ static unsigned long last_printed;
+ static int last_credits;
+
CWARN("%.16s: too many transaction credits (%d > %d)\n",
LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
oh->ot_credits,
osd_journal(dev)->j_max_transaction_buffers);
- /* XXX Limit the credits to 'max_transaction_buffers', and
- * let the underlying filesystem to catch the error if
- * we really need so many credits.
- *
- * This should be removed when we can calculate the
- * credits precisely. */
- oh->ot_credits = osd_journal(dev)->j_max_transaction_buffers;
#ifdef OSD_TRACK_DECLARES
- CERROR(" attr_set: %d, punch: %d, xattr_set: %d,\n",
- oh->ot_declare_attr_set, oh->ot_declare_punch,
- oh->ot_declare_xattr_set);
- CERROR(" create: %d, ref_add: %d, ref_del: %d, write: %d\n",
- oh->ot_declare_create, oh->ot_declare_ref_add,
- oh->ot_declare_ref_del, oh->ot_declare_write);
- CERROR(" insert: %d, delete: %d, destroy: %d\n",
- oh->ot_declare_insert, oh->ot_declare_delete,
- oh->ot_declare_destroy);
+ CWARN(" create: %u/%u, delete: %u/%u, destroy: %u/%u\n",
+ oti->oti_declare_ops[OSD_OT_CREATE],
+ oti->oti_declare_ops_cred[OSD_OT_CREATE],
+ oti->oti_declare_ops[OSD_OT_DELETE],
+ oti->oti_declare_ops_cred[OSD_OT_DELETE],
+ oti->oti_declare_ops[OSD_OT_DESTROY],
+ oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
+ CWARN(" attr_set: %u/%u, xattr_set: %u/%u\n",
+ oti->oti_declare_ops[OSD_OT_ATTR_SET],
+ oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
+ oti->oti_declare_ops[OSD_OT_XATTR_SET],
+ oti->oti_declare_ops_cred[OSD_OT_XATTR_SET]);
+ CWARN(" write: %u/%u, punch: %u/%u, quota %u/%u\n",
+ oti->oti_declare_ops[OSD_OT_WRITE],
+ oti->oti_declare_ops_cred[OSD_OT_WRITE],
+ oti->oti_declare_ops[OSD_OT_PUNCH],
+ oti->oti_declare_ops_cred[OSD_OT_PUNCH],
+ oti->oti_declare_ops[OSD_OT_QUOTA],
+ oti->oti_declare_ops_cred[OSD_OT_QUOTA]);
+ CWARN(" insert: %u/%u, delete: %u/%u\n",
+ oti->oti_declare_ops[OSD_OT_INSERT],
+ oti->oti_declare_ops_cred[OSD_OT_INSERT],
+ oti->oti_declare_ops[OSD_OT_DESTROY],
+ oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
+ CWARN(" ref_add: %u/%u, ref_del: %u/%u\n",
+ oti->oti_declare_ops[OSD_OT_REF_ADD],
+ oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
+ oti->oti_declare_ops[OSD_OT_REF_DEL],
+ oti->oti_declare_ops_cred[OSD_OT_REF_DEL]);
+
+ if (last_credits != oh->ot_credits &&
+ time_after(jiffies, last_printed + 60 * HZ)) {
+ libcfs_debug_dumpstack(NULL);
+ last_credits = oh->ot_credits;
+ last_printed = jiffies;
+ }
#endif
- }
+ /* XXX Limit the credits to 'max_transaction_buffers', and
+ * let the underlying filesystem to catch the error if
+ * we really need so many credits.
+ *
+ * This should be removed when we can calculate the
+ * credits precisely. */
+ oh->ot_credits = osd_journal(dev)->j_max_transaction_buffers;
+ }
/*
* XXX temporary stuff. Some abstraction layer should
osd_index_fini(obj);
if (inode != NULL) {
+ struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
+ qid_t uid = inode->i_uid;
+ qid_t gid = inode->i_gid;
+
iput(inode);
obj->oo_inode = NULL;
+
+ if (qsd != NULL) {
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lquota_id_info *qi = &info->oti_qi;
+
+ /* Release granted quota to master if necessary */
+ qi->lqi_id.qid_uid = uid;
+ qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
+
+ qi->lqi_id.qid_uid = gid;
+ qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
+ }
}
}
ksfs = &osd_oti_get(env)->oti_ksfs;
}
- cfs_spin_lock(&osd->od_osfs_lock);
+ spin_lock(&osd->od_osfs_lock);
/* cache 1 second */
if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
result = sb->s_op->statfs(sb->s_root, ksfs);
}
}
- if (likely(result == 0))
- *sfs = osd->od_statfs;
- cfs_spin_unlock(&osd->od_osfs_lock);
+ if (likely(result == 0))
+ *sfs = osd->od_statfs;
+ spin_unlock(&osd->od_osfs_lock);
if (unlikely(env == NULL))
OBD_FREE_PTR(ksfs);
}
-/**
- * Helper function to get and fill the buffer with input values.
- */
-static struct lu_buf *osd_buf_get(const struct lu_env *env, void *area, ssize_t len)
-{
- struct lu_buf *buf;
-
- buf = &osd_oti_get(env)->oti_buf;
- buf->lb_buf = area;
- buf->lb_len = len;
- return buf;
-}
-
/*
* Concurrency: shouldn't matter.
*/
LINVRNT(osd_invariant(obj));
LASSERT(obj->oo_owner != env);
- cfs_down_read_nested(&obj->oo_sem, role);
+ down_read_nested(&obj->oo_sem, role);
LASSERT(obj->oo_owner == NULL);
oti->oti_r_locks++;
LINVRNT(osd_invariant(obj));
LASSERT(obj->oo_owner != env);
- cfs_down_write_nested(&obj->oo_sem, role);
+ down_write_nested(&obj->oo_sem, role);
LASSERT(obj->oo_owner == NULL);
obj->oo_owner = env;
LASSERT(oti->oti_r_locks > 0);
oti->oti_r_locks--;
- cfs_up_read(&obj->oo_sem);
+ up_read(&obj->oo_sem);
}
static void osd_object_write_unlock(const struct lu_env *env,
LASSERT(oti->oti_w_locks > 0);
oti->oti_w_locks--;
obj->oo_owner = NULL;
- cfs_up_write(&obj->oo_sem);
+ up_write(&obj->oo_sem);
}
static int osd_object_write_locked(const struct lu_env *env,
RETURN(-ESTALE);
}
- cfs_spin_lock(&capa_lock);
- for (i = 0; i < 2; i++) {
- if (keys[i].lk_keyid == capa->lc_keyid) {
- oti->oti_capa_key = keys[i];
- break;
- }
- }
- cfs_spin_unlock(&capa_lock);
+ spin_lock(&capa_lock);
+ for (i = 0; i < 2; i++) {
+ if (keys[i].lk_keyid == capa->lc_keyid) {
+ oti->oti_capa_key = keys[i];
+ break;
+ }
+ }
+ spin_unlock(&capa_lock);
if (i == 2) {
DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
- cfs_spin_lock(&obj->oo_guard);
- osd_inode_getattr(env, obj->oo_inode, attr);
- cfs_spin_unlock(&obj->oo_guard);
- return 0;
+ spin_lock(&obj->oo_guard);
+ osd_inode_getattr(env, obj->oo_inode, attr);
+ spin_unlock(&obj->oo_guard);
+ return 0;
}
static int osd_declare_attr_set(const struct lu_env *env,
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, attr_set);
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
+ osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
+ osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
if (attr == NULL || obj->oo_inode == NULL)
RETURN(rc);
if (bits & LA_GID)
inode->i_gid = attr->la_gid;
if (bits & LA_NLINK)
- inode->i_nlink = attr->la_nlink;
+ set_nlink(inode, attr->la_nlink);
if (bits & LA_RDEV)
inode->i_rdev = attr->la_rdev;
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
return -EACCES;
- OSD_EXEC_OP(handle, attr_set);
+ osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
inode = obj->oo_inode;
+ ll_vfs_dq_init(inode);
rc = osd_quota_transfer(inode, attr);
if (rc)
return rc;
- cfs_spin_lock(&obj->oo_guard);
- rc = osd_inode_setattr(env, inode, attr);
- cfs_spin_unlock(&obj->oo_guard);
+ spin_lock(&obj->oo_guard);
+ rc = osd_inode_setattr(env, inode, attr);
+ spin_unlock(&obj->oo_guard);
if (!rc)
inode->i_sb->s_op->dirty_inode(inode);
* NB: don't need any lock because no contention at this
* early stage */
inode->i_flags |= S_NOCMTIME;
- inode->i_state |= I_LUSTRE_NOSCRUB;
+
+ /* For new created object, it must be consistent,
+ * and it is unnecessary to scrub against it. */
+ ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
obj->oo_inode = inode;
result = 0;
} else {
{
int result;
struct osd_thandle *oth;
- struct osd_device *osd = osd_obj2dev(obj);
__u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
LASSERT(S_ISDIR(attr->la_mode));
oth = container_of(th, struct osd_thandle, ot_super);
LASSERT(oth->ot_handle->h_transaction != NULL);
result = osd_mkfile(info, obj, mode, hint, th);
- if (result == 0 && osd->od_iop_mode == 0) {
- LASSERT(obj->oo_inode != NULL);
- /*
- * XXX uh-oh... call low-level iam function directly.
- */
- result = iam_lvar_create(obj->oo_inode, OSD_NAME_LEN, 4,
- sizeof (struct osd_fid_pack),
- oth->ot_handle);
- }
return result;
}
* This inode should be marked dirty for i_rdev. Currently
* that is done in the osd_attr_init().
*/
- init_special_inode(obj->oo_inode, mode, attr->la_rdev);
+ init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
+ attr->la_rdev);
}
LINVRNT(osd_invariant(obj));
return result;
struct thandle *th)
{
int result;
- __u32 umask;
-
- /* we drop umask so that permissions we pass are not affected */
- umask = current->fs->umask;
- current->fs->umask = 0;
result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
th);
unlock_new_inode(obj->oo_inode);
}
- /* restore previous umask value */
- current->fs->umask = umask;
-
return result;
}
return osd_oi_insert(info, osd, fid, id, th);
}
+int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
+ const struct lu_fid *fid, struct lu_seq_range *range)
+{
+ struct seq_server_site *ss = osd_seq_site(osd);
+ int rc;
+
+ if (fid_is_igif(fid)) {
+ range->lsr_flags = LU_SEQ_RANGE_MDT;
+ range->lsr_index = 0;
+ return 0;
+ }
+
+ if (fid_is_idif(fid)) {
+ range->lsr_flags = LU_SEQ_RANGE_OST;
+ range->lsr_index = fid_idif_ost_idx(fid);
+ return 0;
+ }
+
+ if (!fid_is_norm(fid)) {
+ range->lsr_flags = LU_SEQ_RANGE_MDT;
+ if (ss != NULL)
+ /* FIXME: If ss is NULL, it suppose not get lsr_index
+ * at all */
+ range->lsr_index = ss->ss_node_id;
+ return 0;
+ }
+
+ LASSERT(ss != NULL);
+ range->lsr_flags = -1;
+ rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), range);
+ if (rc != 0) {
+ CERROR("%s can not find "DFID": rc = %d\n",
+ osd2lu_dev(osd)->ld_obd->obd_name, PFID(fid), rc);
+ }
+ return rc;
+}
+
+
static int osd_declare_object_create(const struct lu_env *env,
- struct dt_object *dt,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *handle)
+ struct dt_object *dt,
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *handle)
{
+ struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
struct osd_thandle *oh;
int rc;
ENTRY;
- LASSERT(handle != NULL);
+ LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, create);
- oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
- /* XXX: So far, only normal fid needs be inserted into the oi,
- * things could be changed later. Revise following code then. */
- if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
- OSD_DECLARE_OP(oh, insert);
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
+ osd_trans_declare_op(env, oh, OSD_OT_CREATE,
+ osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
+ /* XXX: So far, only normal fid needs be inserted into the oi,
+ * things could be changed later. Revise following code then. */
+ if (fid_is_norm(lu_object_fid(&dt->do_lu)) &&
+ !fid_is_on_ost(osd_oti_get(env), osd_dt_dev(handle->th_dev),
+ lu_object_fid(&dt->do_lu))) {
/* Reuse idle OI block may cause additional one OI block
* to be changed. */
- oh->ot_credits += 1;
- }
- /* If this is directory, then we expect . and .. to be inserted as
- * well. The one directory block always needs to be created for the
- * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
- * block), there is no danger of needing a tree for the first block.
- */
- if (attr && S_ISDIR(attr->la_mode)) {
- OSD_DECLARE_OP(oh, insert);
- OSD_DECLARE_OP(oh, insert);
- oh->ot_credits += osd_dto_credits_noquota[DTO_WRITE_BASE];
- }
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
+ }
+ /* If this is directory, then we expect . and .. to be inserted as
+ * well. The one directory block always needs to be created for the
+ * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
+ * block), there is no danger of needing a tree for the first block.
+ */
+ if (attr && S_ISDIR(attr->la_mode)) {
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_WRITE_BASE]);
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT, 0);
+ }
if (!attr)
RETURN(0);
rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
false, false, NULL, false);
+ if (rc != 0)
+ RETURN(rc);
+
+ /* It does fld look up inside declare, and the result will be
+ * added to fld cache, so the following fld lookup inside insert
+ * does not need send RPC anymore, so avoid send rpc with holding
+ * transaction */
+ if (fid_is_norm(lu_object_fid(&dt->do_lu)) &&
+ !fid_is_last_id(lu_object_fid(&dt->do_lu)))
+ osd_fld_lookup(env, osd_dt_dev(handle->th_dev),
+ lu_object_fid(&dt->do_lu), range);
+
+
RETURN(rc);
}
* 'tune2fs -O quota' will take care of creating them */
RETURN(-EPERM);
- OSD_EXEC_OP(th, create);
+ osd_trans_exec_op(env, th, OSD_OT_CREATE);
+ osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
result = __osd_object_create(info, obj, attr, hint, dof, th);
if (result == 0)
* Concurrency: must be locked
*/
static int osd_declare_object_destroy(const struct lu_env *env,
- struct dt_object *dt,
- struct thandle *th)
+ struct dt_object *dt,
+ struct thandle *th)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_thandle *oh;
- int rc;
- ENTRY;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thandle *oh;
+ int rc;
+ ENTRY;
- oh = container_of0(th, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
- LASSERT(inode);
+ oh = container_of0(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
+ LASSERT(inode);
- OSD_DECLARE_OP(oh, destroy);
- OSD_DECLARE_OP(oh, delete);
- oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_DELETE];
- /* XXX: So far, only normal fid needs to be inserted into the OI,
- * so only normal fid needs to be removed from the OI also. */
- if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
- /* Recycle idle OI leaf may cause additional three OI blocks
- * to be changed. */
- oh->ot_credits += 3;
- }
+ osd_trans_declare_op(env, oh, OSD_OT_DELETE,
+ osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
+ /* XXX: So far, only normal fid needs to be inserted into the OI,
+ * so only normal fid needs to be removed from the OI also.
+ * Recycle idle OI leaf may cause additional three OI blocks
+ * to be changed. */
+ osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
+ fid_is_norm(lu_object_fid(&dt->do_lu)) ?
+ osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3 : 0);
/* one less inode */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
false, true, NULL, false);
if (rc)
RETURN(rc);
/* data to be truncated */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh, true,
- true, NULL, false);
- RETURN(rc);
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
+ true, true, NULL, false);
+ RETURN(rc);
}
static int osd_object_destroy(const struct lu_env *env,
/* Parallel control for OI scrub. For most of cases, there is no
* lock contention. So it will not affect unlink performance. */
- cfs_mutex_lock(&inode->i_mutex);
- if (S_ISDIR(inode->i_mode)) {
- LASSERT(osd_inode_unlinked(inode) ||
- inode->i_nlink == 1);
- cfs_spin_lock(&obj->oo_guard);
- inode->i_nlink = 0;
- cfs_spin_unlock(&obj->oo_guard);
- inode->i_sb->s_op->dirty_inode(inode);
- } else {
- LASSERT(osd_inode_unlinked(inode));
- }
+ mutex_lock(&inode->i_mutex);
+ if (S_ISDIR(inode->i_mode)) {
+ LASSERT(osd_inode_unlinked(inode) ||
+ inode->i_nlink == 1);
+ spin_lock(&obj->oo_guard);
+ clear_nlink(inode);
+ spin_unlock(&obj->oo_guard);
+ inode->i_sb->s_op->dirty_inode(inode);
+ }
- OSD_EXEC_OP(th, destroy);
+ osd_trans_exec_op(env, th, OSD_OT_DESTROY);
result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
- cfs_mutex_unlock(&inode->i_mutex);
+ mutex_unlock(&inode->i_mutex);
/* XXX: add to ext3 orphan list */
/* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
RETURN(0);
}
-/**
- * Helper function for osd_xattr_set()
- */
-static int __osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
- const struct lu_buf *buf, const char *name, int fl)
-{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = osd_oti_get(env);
- struct dentry *dentry = &info->oti_child_dentry;
- int fs_flags = 0;
- int rc;
-
- LASSERT(dt_object_exists(dt));
- LASSERT(inode->i_op != NULL && inode->i_op->setxattr != NULL);
-
- if (fl & LU_XATTR_REPLACE)
- fs_flags |= XATTR_REPLACE;
-
- if (fl & LU_XATTR_CREATE)
- fs_flags |= XATTR_CREATE;
+static inline int __osd_xattr_set(struct osd_thread_info *info,
+ struct inode *inode, const char *name,
+ const void *buf, int buflen, int fl)
+{
+ struct dentry *dentry = &info->oti_child_dentry;
- dentry->d_inode = inode;
- rc = inode->i_op->setxattr(dentry, name, buf->lb_buf,
- buf->lb_len, fs_flags);
- return rc;
+ ll_vfs_dq_init(inode);
+ dentry->d_inode = inode;
+ return inode->i_op->setxattr(dentry, name, buf, buflen, fl);
}
/**
static int osd_ea_fid_set(const struct lu_env *env, struct dt_object *dt,
const struct lu_fid *fid)
{
- struct osd_thread_info *info = osd_oti_get(env);
- struct lustre_mdt_attrs *mdt_attrs = &info->oti_mdt_attrs;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct inode *inode = osd_dt_obj(dt)->oo_inode;
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ int rc;
- lustre_lma_init(mdt_attrs, fid);
- lustre_lma_swab(mdt_attrs);
- return __osd_xattr_set(env, dt,
- osd_buf_get(env, mdt_attrs, sizeof *mdt_attrs),
- XATTR_NAME_LMA, LU_XATTR_CREATE);
+ lustre_lma_init(lma, fid);
+ lustre_lma_swab(lma);
+ rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
+ XATTR_CREATE);
+ return rc;
}
/**
* its inmemory API.
*/
void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
- const struct dt_rec *fid)
+ const struct dt_rec *fid)
{
- param->edp_magic = LDISKFS_LUFID_MAGIC;
- param->edp_len = sizeof(struct lu_fid) + 1;
+ /* XXX: replace the check with "!fid_is_client_mdt_visible()"
+ * when FID in OI file introduced for local object. */
+ if (!fid_is_norm((const struct lu_fid *)fid) &&
+ !fid_is_igif((const struct lu_fid *)fid)) {
+ param->edp_magic = 0;
+ return;
+ }
- fid_cpu_to_be((struct lu_fid *)param->edp_data,
- (struct lu_fid *)fid);
+ param->edp_magic = LDISKFS_LUFID_MAGIC;
+ param->edp_len = sizeof(struct lu_fid) + 1;
+ fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
}
/**
* 'tune2fs -O quota' will take care of creating them */
RETURN(-EPERM);
- OSD_EXEC_OP(th, create);
+ osd_trans_exec_op(env, th, OSD_OT_CREATE);
+ osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
result = __osd_object_create(info, obj, attr, hint, dof, th);
/* objects under osd root shld have igif fid, so dont add fid EA */
- if (result == 0 && fid_seq(fid) >= FID_SEQ_NORMAL)
+ /* For ost object, the fid will be stored during first write */
+ if (result == 0 && fid_seq(fid) >= FID_SEQ_NORMAL &&
+ !fid_is_on_ost(info, osd_dt_dev(th->th_dev), fid))
result = osd_ea_fid_set(env, dt, fid);
if (result == 0)
struct dt_object *dt,
struct thandle *handle)
{
- struct osd_thandle *oh;
+ struct osd_thandle *oh;
/* it's possible that object doesn't exist yet */
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, ref_add);
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
+ osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
+ osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
- return 0;
+ return 0;
}
/*
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
- OSD_EXEC_OP(th, ref_add);
+ osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
- /*
- * DIR_NLINK feature is set for compatibility reasons if:
- * 1) nlinks > LDISKFS_LINK_MAX, or
- * 2) nlinks == 2, since this indicates i_nlink was previously 1.
- *
- * It is easier to always set this flag (rather than check and set),
- * since it has less overhead, and the superblock will be dirtied
- * at some point. Both e2fsprogs and any Lustre-supported ldiskfs
- * do not actually care whether this flag is set or not.
- */
- cfs_spin_lock(&obj->oo_guard);
- inode->i_nlink++;
- if (S_ISDIR(inode->i_mode) && inode->i_nlink > 1) {
- if (inode->i_nlink >= LDISKFS_LINK_MAX ||
- inode->i_nlink == 2)
- inode->i_nlink = 1;
- }
- LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
- cfs_spin_unlock(&obj->oo_guard);
- inode->i_sb->s_op->dirty_inode(inode);
- LINVRNT(osd_invariant(obj));
+ /*
+ * DIR_NLINK feature is set for compatibility reasons if:
+ * 1) nlinks > LDISKFS_LINK_MAX, or
+ * 2) nlinks == 2, since this indicates i_nlink was previously 1.
+ *
+ * It is easier to always set this flag (rather than check and set),
+ * since it has less overhead, and the superblock will be dirtied
+ * at some point. Both e2fsprogs and any Lustre-supported ldiskfs
+ * do not actually care whether this flag is set or not.
+ */
+ spin_lock(&obj->oo_guard);
+ /* inc_nlink from 0 may cause WARN_ON */
+ if(inode->i_nlink == 0)
+ set_nlink(inode, 1);
+ else
+ inc_nlink(inode);
+ if (S_ISDIR(inode->i_mode) && inode->i_nlink > 1) {
+ if (inode->i_nlink >= LDISKFS_LINK_MAX ||
+ inode->i_nlink == 2)
+ set_nlink(inode, 1);
+ }
+ LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
+ spin_unlock(&obj->oo_guard);
+ inode->i_sb->s_op->dirty_inode(inode);
+ LINVRNT(osd_invariant(obj));
- return 0;
+ return 0;
}
static int osd_declare_object_ref_del(const struct lu_env *env,
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, ref_del);
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
+ osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
+ osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
- return 0;
+ return 0;
}
/*
LASSERT(osd_write_locked(env, obj));
LASSERT(th != NULL);
- OSD_EXEC_OP(th, ref_del);
-
- cfs_spin_lock(&obj->oo_guard);
- LASSERT(inode->i_nlink > 0);
- inode->i_nlink--;
- /* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
- * then the nlink count is 1. Don't let it be set to 0 or the directory
- * inode will be deleted incorrectly. */
- if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
- inode->i_nlink++;
- cfs_spin_unlock(&obj->oo_guard);
- inode->i_sb->s_op->dirty_inode(inode);
- LINVRNT(osd_invariant(obj));
+ osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
+
+ spin_lock(&obj->oo_guard);
+ LASSERT(inode->i_nlink > 0);
+ drop_nlink(inode);
+ /* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
+ * then the nlink count is 1. Don't let it be set to 0 or the directory
+ * inode will be deleted incorrectly. */
+ if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
+ set_nlink(inode, 1);
+ spin_unlock(&obj->oo_guard);
+ inode->i_sb->s_op->dirty_inode(inode);
+ LINVRNT(osd_invariant(obj));
- return 0;
+ return 0;
}
/*
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
- dentry->d_inode = inode;
- return inode->i_op->getxattr(dentry, name, buf->lb_buf, buf->lb_len);
+ return __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
}
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, xattr_set);
- if (strcmp(name, XATTR_NAME_VERSION) == 0)
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
- else
- oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
+ osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
+ strcmp(name, XATTR_NAME_VERSION) == 0 ?
+ osd_dto_credits_noquota[DTO_ATTR_SET_BASE] :
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
return 0;
}
const struct lu_buf *buf, const char *name, int fl,
struct thandle *handle, struct lustre_capa *capa)
{
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thread_info *info = osd_oti_get(env);
+ int fs_flags = 0;
+
LASSERT(handle != NULL);
/* version set is not real XATTR */
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
return -EACCES;
- OSD_EXEC_OP(handle, xattr_set);
- return __osd_xattr_set(env, dt, buf, name, fl);
+ osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
+ if (fl & LU_XATTR_REPLACE)
+ fs_flags |= XATTR_REPLACE;
+
+ if (fl & LU_XATTR_CREATE)
+ fs_flags |= XATTR_CREATE;
+
+ return __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
+ fs_flags);
}
/*
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, xattr_set);
- oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
+ osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
+ osd_dto_credits_noquota[DTO_XATTR_SET]);
- return 0;
+ return 0;
}
/*
LASSERT(dt_object_exists(dt));
LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
- LASSERT(osd_write_locked(env, obj));
LASSERT(handle != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
return -EACCES;
- OSD_EXEC_OP(handle, xattr_set);
+ osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
+ ll_vfs_dq_init(inode);
dentry->d_inode = inode;
rc = inode->i_op->removexattr(dentry, name);
return rc;
RETURN(oc);
}
- cfs_spin_lock(&capa_lock);
- *key = dev->od_capa_keys[1];
- cfs_spin_unlock(&capa_lock);
+ spin_lock(&capa_lock);
+ *key = dev->od_capa_keys[1];
+ spin_unlock(&capa_lock);
capa->lc_keyid = key->lk_keyid;
capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
int result;
int skip_iam = 0;
struct osd_object *obj = osd_dt_obj(dt);
- struct osd_device *osd = osd_obj2dev(obj);
LINVRNT(osd_invariant(obj));
LASSERT(dt_object_exists(dt));
if (osd_object_is_root(obj)) {
dt->do_index_ops = &osd_index_ea_ops;
result = 0;
- } else if (feat == &dt_directory_features && osd->od_iop_mode) {
+ } else if (feat == &dt_directory_features) {
dt->do_index_ops = &osd_index_ea_ops;
if (S_ISDIR(obj->oo_inode->i_mode))
result = 0;
} else if (unlikely(feat == &dt_otable_features)) {
dt->do_index_ops = &osd_otable_ops;
return 0;
- } else if (feat == &dt_acct_features) {
+ } else if (unlikely(feat == &dt_acct_features)) {
dt->do_index_ops = &osd_acct_index_ops;
result = 0;
skip_iam = 1;
OBD_ALLOC_PTR(dir);
if (dir != NULL) {
- cfs_spin_lock(&obj->oo_guard);
- if (obj->oo_dir == NULL)
- obj->oo_dir = dir;
- else
- /*
- * Concurrent thread allocated container data.
- */
- OBD_FREE_PTR(dir);
- cfs_spin_unlock(&obj->oo_guard);
- /*
- * Now, that we have container data, serialize its
- * initialization.
- */
- cfs_down_write(&obj->oo_ext_idx_sem);
- /*
- * recheck under lock.
- */
- if (!osd_has_index(obj))
- result = osd_iam_container_init(env, obj, dir);
- else
- result = 0;
- cfs_up_write(&obj->oo_ext_idx_sem);
+ spin_lock(&obj->oo_guard);
+ if (obj->oo_dir == NULL)
+ obj->oo_dir = dir;
+ else
+ /*
+ * Concurrent thread allocated container data.
+ */
+ OBD_FREE_PTR(dir);
+ spin_unlock(&obj->oo_guard);
+ /*
+ * Now, that we have container data, serialize its
+ * initialization.
+ */
+ down_write(&obj->oo_ext_idx_sem);
+ /*
+ * recheck under lock.
+ */
+ if (!osd_has_index(obj))
+ result = osd_iam_container_init(env, obj, dir);
+ else
+ result = 0;
+ up_write(&obj->oo_ext_idx_sem);
} else {
result = -ENOMEM;
}
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, delete);
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
+ osd_trans_declare_op(env, oh, OSD_OT_DELETE,
+ osd_dto_credits_noquota[DTO_INDEX_DELETE]);
- return 0;
+ return 0;
}
/**
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
RETURN(-EACCES);
- OSD_EXEC_OP(handle, delete);
+ osd_trans_exec_op(env, handle, OSD_OT_DELETE);
ipd = osd_idx_ipd_get(env, bag);
if (unlikely(ipd == NULL))
}
static int osd_index_declare_ea_delete(const struct lu_env *env,
- struct dt_object *dt,
- const struct dt_key *key,
- struct thandle *handle)
+ struct dt_object *dt,
+ const struct dt_key *key,
+ struct thandle *handle)
{
- struct osd_thandle *oh;
+ struct osd_thandle *oh;
struct inode *inode;
int rc;
ENTRY;
- LASSERT(dt_object_exists(dt));
- LASSERT(handle != NULL);
+ LASSERT(dt_object_exists(dt));
+ LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, delete);
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
+ osd_trans_declare_op(env, oh, OSD_OT_DELETE,
+ osd_dto_credits_noquota[DTO_INDEX_DELETE]);
inode = osd_dt_obj(dt)->oo_inode;
LASSERT(inode);
LASSERT(dt_object_exists(dt));
LASSERT(handle != NULL);
- OSD_EXEC_OP(handle, delete);
+ osd_trans_exec_op(env, handle, OSD_OT_DELETE);
oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
RETURN(-EACCES);
+ ll_vfs_dq_init(dir);
dentry = osd_child_dentry_get(env, obj,
(char *)key, strlen((char *)key));
ldiskfs_htree_lock(hlock, obj->oo_hl_head,
dir, LDISKFS_HLOCK_DEL);
} else {
- cfs_down_write(&obj->oo_ext_idx_sem);
+ down_write(&obj->oo_ext_idx_sem);
}
bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_write(&obj->oo_ext_idx_sem);
+ up_write(&obj->oo_ext_idx_sem);
LASSERT(osd_invariant(obj));
RETURN(rc);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, insert);
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT]);
- return 0;
+ return 0;
}
/**
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
RETURN(-EACCES);
- OSD_EXEC_OP(th, insert);
+ osd_trans_exec_op(env, th, OSD_OT_INSERT);
ipd = osd_idx_ipd_get(env, bag);
if (unlikely(ipd == NULL))
oth = container_of(th, struct osd_thandle, ot_super);
LASSERT(oth->ot_handle != NULL);
LASSERT(oth->ot_handle->h_transaction != NULL);
+ LASSERT(pobj->oo_inode);
- child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
-
- /* XXX: remove fid_is_igif() check here.
- * IGIF check is just to handle insertion of .. when it is 'ROOT',
- * it is IGIF now but needs FID in dir entry as well for readdir
- * to work.
- * LU-838 should fix that and remove fid_is_igif() check */
- if (fid_is_igif((struct lu_fid *)fid) ||
- fid_is_norm((struct lu_fid *)fid)) {
- ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
- osd_get_ldiskfs_dirent_param(ldp, fid);
- child->d_fsdata = (void *)ldp;
- } else {
- child->d_fsdata = NULL;
- }
- rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
+ ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
+ if (unlikely(pobj->oo_inode ==
+ osd_sb(osd_obj2dev(pobj))->s_root->d_inode))
+ ldp->edp_magic = 0;
+ else
+ osd_get_ldiskfs_dirent_param(ldp, fid);
+ child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
+ child->d_fsdata = (void *)ldp;
+ ll_vfs_dq_init(pobj->oo_inode);
+ rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
- RETURN(rc);
+ RETURN(rc);
}
/**
result = 0;
}
} else if(strcmp(name, dotdot) == 0) {
- dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
- dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
-
- if (!dir->oo_compat_dot_created)
- return -EINVAL;
- if (!fid_is_igif((struct lu_fid *)dot_fid)) {
- osd_get_ldiskfs_dirent_param(dot_ldp, dot_fid);
- osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
- } else {
- dot_ldp = NULL;
- dot_dot_ldp = NULL;
- }
- /* in case of rename, dotdot is already created */
- if (dir->oo_compat_dotdot_created) {
- return __osd_ea_add_rec(info, dir, parent_dir, name,
- dot_dot_fid, NULL, th);
- }
-
- result = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
- inode, dot_ldp, dot_dot_ldp);
- if (result == 0)
- dir->oo_compat_dotdot_created = 1;
- }
+ if (!dir->oo_compat_dot_created)
+ return -EINVAL;
+
+ dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
+ osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
+ /* in case of rename, dotdot is already created */
+ if (dir->oo_compat_dotdot_created)
+ return __osd_ea_add_rec(info, dir, parent_dir, name,
+ dot_dot_fid, NULL, th);
+
+ dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
+ dot_ldp->edp_magic = 0;
+ result = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
+ inode, dot_ldp, dot_dot_ldp);
+ if (result == 0)
+ dir->oo_compat_dotdot_created = 1;
+ }
- return result;
+ return result;
}
ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
pobj->oo_inode, 0);
} else {
- cfs_down_write(&pobj->oo_ext_idx_sem);
+ down_write(&pobj->oo_ext_idx_sem);
}
rc = osd_add_dot_dotdot(info, pobj, cinode, name,
(struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
pobj->oo_inode, LDISKFS_HLOCK_ADD);
} else {
- cfs_down_write(&pobj->oo_ext_idx_sem);
+ down_write(&pobj->oo_ext_idx_sem);
}
rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_write(&pobj->oo_ext_idx_sem);
+ up_write(&pobj->oo_ext_idx_sem);
return rc;
}
-static int
+static void
osd_consistency_check(struct osd_thread_info *oti, struct osd_device *dev,
struct osd_idmap_cache *oic)
{
ENTRY;
if (!fid_is_norm(fid) && !fid_is_igif(fid))
- RETURN(0);
+ RETURN_EXIT;
again:
- rc = osd_oi_lookup(oti, dev, fid, id);
+ rc = osd_oi_lookup(oti, dev, fid, id, true);
if (rc != 0 && rc != -ENOENT)
- RETURN(rc);
+ RETURN_EXIT;
if (rc == 0 && osd_id_eq(id, &oic->oic_lid))
- RETURN(0);
+ RETURN_EXIT;
if (thread_is_running(&scrub->os_thread)) {
rc = osd_oii_insert(dev, oic, rc == -ENOENT);
if (unlikely(rc == -EAGAIN))
goto again;
- RETURN(rc);
+ RETURN_EXIT;
}
if (!dev->od_noscrub && ++once == 1) {
goto again;
}
- RETURN(0);
+ EXIT;
}
/**
struct htree_lock *hlock = NULL;
int ino;
int rc;
+ ENTRY;
LASSERT(dir->i_op != NULL && dir->i_op->lookup != NULL);
ldiskfs_htree_lock(hlock, obj->oo_hl_head,
dir, LDISKFS_HLOCK_LOOKUP);
} else {
- cfs_down_read(&obj->oo_ext_idx_sem);
+ down_read(&obj->oo_ext_idx_sem);
}
bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_read(&obj->oo_ext_idx_sem);
+ up_read(&obj->oo_ext_idx_sem);
return rc;
}
}
static int osd_index_declare_ea_insert(const struct lu_env *env,
- struct dt_object *dt,
- const struct dt_rec *rec,
- const struct dt_key *key,
- struct thandle *handle)
+ struct dt_object *dt,
+ const struct dt_rec *rec,
+ const struct dt_key *key,
+ struct thandle *handle)
{
- struct osd_thandle *oh;
- struct inode *inode;
- int rc;
+ struct osd_thandle *oh;
+ struct inode *inode;
+ struct lu_fid *fid = (struct lu_fid *)rec;
+ int rc;
ENTRY;
- LASSERT(dt_object_exists(dt));
- LASSERT(handle != NULL);
+ LASSERT(dt_object_exists(dt));
+ LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, insert);
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT]);
inode = osd_dt_obj(dt)->oo_inode;
LASSERT(inode);
* insert */
rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
true, true, NULL, false);
+ if (fid == NULL)
+ RETURN(0);
+
+ /* It does fld look up inside declare, and the result will be
+ * added to fld cache, so the following fld lookup inside insert
+ * does not need send RPC anymore, so avoid send rpc with holding
+ * transaction */
+ LASSERTF(fid_is_sane(fid), "fid is insane"DFID"\n", PFID(fid));
+ osd_fld_lookup(env, osd_dt_dev(handle->th_dev), fid,
+ &osd_oti_get(env)->oti_seq_range);
+
RETURN(rc);
}
LASSERT(dt_object_exists(dt));
LASSERT(th != NULL);
+ osd_trans_exec_op(env, th, OSD_OT_INSERT);
+
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
RETURN(-EACCES);
}
};
+
/**
* Creates or initializes iterator context.
*
unsigned d_type)
{
struct osd_it_ea *it = (struct osd_it_ea *)buf;
+ struct osd_object *obj = it->oie_obj;
struct osd_it_ea_dirent *ent = it->oie_dirent;
struct lu_fid *fid = &ent->oied_fid;
struct osd_fid_pack *rec;
OSD_IT_EA_BUFSIZE)
RETURN(1);
- if (d_type & LDISKFS_DIRENT_LUFID) {
- rec = (struct osd_fid_pack*) (name + namelen + 1);
-
- if (osd_fid_unpack(fid, rec) != 0)
- fid_zero(fid);
+ /* "." is just the object itself. */
+ if (namelen == 1 && name[0] == '.') {
+ *fid = obj->oo_dt.do_lu.lo_header->loh_fid;
+ } else if (d_type & LDISKFS_DIRENT_LUFID) {
+ rec = (struct osd_fid_pack*) (name + namelen + 1);
+ if (osd_fid_unpack(fid, rec) != 0)
+ fid_zero(fid);
+ } else {
+ fid_zero(fid);
+ }
+ d_type &= ~LDISKFS_DIRENT_LUFID;
- d_type &= ~LDISKFS_DIRENT_LUFID;
- } else {
- fid_zero(fid);
- }
+ /* NOT export local root. */
+ if (unlikely(osd_sb(osd_obj2dev(obj))->s_root->d_inode->i_ino == ino)) {
+ ino = obj->oo_inode->i_ino;
+ *fid = obj->oo_dt.do_lu.lo_header->loh_fid;
+ }
ent->oied_ino = ino;
ent->oied_off = offset;
ldiskfs_htree_lock(hlock, obj->oo_hl_head,
inode, LDISKFS_HLOCK_READDIR);
} else {
- cfs_down_read(&obj->oo_ext_idx_sem);
+ down_read(&obj->oo_ext_idx_sem);
}
result = inode->i_fop->readdir(&it->oie_file, it,
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_read(&obj->oo_ext_idx_sem);
+ up_read(&obj->oo_ext_idx_sem);
if (it->oie_rd_dirent == 0) {
result = -EIO;
if (!fid_is_sane(fid)) {
rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
- if (rc != 0)
+ if (rc != 0) {
+ fid_zero(&oic->oic_fid);
RETURN(rc);
+ }
} else {
osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
}
if (o->od_mnt != NULL)
RETURN(0);
+ if (strlen(dev) >= sizeof(o->od_mntdev))
+ RETURN(-E2BIG);
+ strcpy(o->od_mntdev, dev);
+
o->od_fsops = fsfilt_get_ops(mt_str(LDD_MT_LDISKFS));
if (o->od_fsops == NULL) {
CERROR("Can't find fsfilt_ldiskfs\n");
GOTO(out, rc = -EINVAL);
}
- if (lmd_flags & LMD_FLG_IAM) {
- o->od_iop_mode = 0;
- LCONSOLE_WARN("%s: OSD: IAM mode enabled\n", name);
- } else
- o->od_iop_mode = 1;
+ ldiskfs_set_inode_state(osd_sb(o)->s_root->d_inode,
+ LDISKFS_STATE_LUSTRE_NO_OI);
if (lmd_flags & LMD_FLG_NOSCRUB)
o->od_noscrub = 1;
rc = osd_shutdown(env, osd_dev(d));
- osd_compat_fini(osd_dev(d));
+ osd_obj_map_fini(osd_dev(d));
shrink_dcache_sb(osd_sb(osd_dev(d)));
osd_sync(env, lu2dt_dev(d));
l->ld_ops = &osd_lu_ops;
o->od_dt_dev.dd_ops = &osd_dt_ops;
- cfs_spin_lock_init(&o->od_osfs_lock);
- cfs_mutex_init(&o->od_otable_mutex);
+ spin_lock_init(&o->od_osfs_lock);
+ mutex_init(&o->od_otable_mutex);
o->od_osfs_age = cfs_time_shift_64(-1000);
o->od_capa_hash = init_capa_hash();
strncpy(o->od_svname, lustre_cfg_string(cfg, 4),
sizeof(o->od_svname) - 1);
- rc = osd_compat_init(o);
+ rc = osd_obj_map_init(o);
if (rc != 0)
GOTO(out_scrub, rc);
out_site:
lu_site_fini(&o->od_site);
out_compat:
- osd_compat_fini(o);
+ osd_obj_map_fini(o);
out_scrub:
osd_scrub_cleanup(env, o);
out_mnt:
rc = dt_device_init(&o->od_dt_dev, t);
if (rc == 0) {
+ /* Because the ctx might be revived in dt_device_init,
+ * refill the env here */
+ lu_env_refill((struct lu_env *)env);
rc = osd_device_init0(env, o, cfg);
if (rc)
dt_device_fini(&o->od_dt_dev);
static int osd_recovery_complete(const struct lu_env *env,
struct lu_device *d)
{
- RETURN(0);
+ struct osd_device *osd = osd_dev(d);
+ int rc = 0;
+ ENTRY;
+
+ if (osd->od_quota_slave == NULL)
+ RETURN(0);
+
+ /* start qsd instance on recovery completion, this notifies the quota
+ * slave code that we are about to process new requests now */
+ rc = qsd_start(env, osd->od_quota_slave);
+ RETURN(rc);
}
/*
*exp = class_conn2export(&conn);
- cfs_spin_lock(&osd->od_osfs_lock);
+ spin_lock(&osd->od_osfs_lock);
osd->od_connects++;
- cfs_spin_unlock(&osd->od_osfs_lock);
+ spin_unlock(&osd->od_osfs_lock);
RETURN(0);
}
ENTRY;
/* Only disconnect the underlying layers on the final disconnect. */
- cfs_spin_lock(&osd->od_osfs_lock);
+ spin_lock(&osd->od_osfs_lock);
osd->od_connects--;
if (osd->od_connects == 0)
release = 1;
- cfs_spin_unlock(&osd->od_osfs_lock);
+ spin_unlock(&osd->od_osfs_lock);
rc = class_disconnect(exp); /* bz 9811 */