* Use is subject to license terms.
*/
/*
- * Copyright (c) 2011, 2012 Whamcloud, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
* Use is subject to license terms.
*/
/*
* Author: Johann Lombardi <johann@whamcloud.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_OSD
#include <lustre_ver.h>
#include <libcfs/libcfs.h>
-#include <lustre_fsfilt.h>
#include <obd_support.h>
#include <lustre_net.h>
#include <obd.h>
static struct dt_object_operations osd_obj_ops;
static struct lu_object_operations osd_lu_obj_ops;
extern struct dt_body_operations osd_body_ops;
+static struct dt_object_operations osd_obj_otable_it_ops;
-extern cfs_mem_cache_t *osd_object_kmem;
+extern struct kmem_cache *osd_object_kmem;
static void
osd_object_sa_fini(struct osd_object *obj)
if (!cfs_list_empty(&obj->oo_sa_linkage))
return;
- cfs_down(&oh->ot_sa_lock);
+ down(&oh->ot_sa_lock);
+ write_lock(&obj->oo_attr_lock);
if (likely(cfs_list_empty(&obj->oo_sa_linkage)))
cfs_list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
- cfs_up(&oh->ot_sa_lock);
+ write_unlock(&obj->oo_attr_lock);
+ up(&oh->ot_sa_lock);
}
/*
{
struct osd_object *obj;
- cfs_down(&oh->ot_sa_lock);
+ down(&oh->ot_sa_lock);
while (!cfs_list_empty(&oh->ot_sa_list)) {
obj = cfs_list_entry(oh->ot_sa_list.next,
struct osd_object, oo_sa_linkage);
sa_spill_rele(obj->oo_sa_hdl);
+ write_lock(&obj->oo_attr_lock);
cfs_list_del_init(&obj->oo_sa_linkage);
+ write_unlock(&obj->oo_attr_lock);
}
- cfs_up(&oh->ot_sa_lock);
+ up(&oh->ot_sa_lock);
}
/*
la->la_uid = osa->uid;
la->la_gid = osa->gid;
la->la_nlink = osa->nlink;
- la->la_flags = osa->flags;
+ la->la_flags = attrs_zfs2fs(osa->flags);
la->la_size = osa->size;
if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
{
struct osd_object *mo;
- OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, __GFP_IO);
if (mo != NULL) {
struct lu_object *l;
mo->oo_dt.do_ops = &osd_obj_ops;
l->lo_ops = &osd_lu_obj_ops;
CFS_INIT_LIST_HEAD(&mo->oo_sa_linkage);
- cfs_init_rwsem(&mo->oo_sem);
- cfs_sema_init(&mo->oo_guard, 1);
- cfs_rwlock_init(&mo->oo_attr_lock);
+ init_rwsem(&mo->oo_sem);
+ sema_init(&mo->oo_guard, 1);
+ rwlock_init(&mo->oo_attr_lock);
return l;
} else {
return NULL;
*/
obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
- cfs_mb();
+ smp_mb();
obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
RETURN(0);
}
+static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
+{
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lu_buf buf;
+ int rc;
+ struct lustre_mdt_attrs *lma;
+ ENTRY;
+
+ CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
+ lma = (struct lustre_mdt_attrs *)info->oti_buf;
+ buf.lb_buf = lma;
+ buf.lb_len = sizeof(info->oti_buf);
+
+ rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA, BYPASS_CAPA);
+ if (rc > 0) {
+ rc = 0;
+ lustre_lma_swab(lma);
+ if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
+ CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
+ CWARN("%s: unsupported incompat LMA feature(s) %#x for "
+ "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
+ lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
+ PFID(lu_object_fid(&obj->oo_dt.do_lu)));
+ rc = -EOPNOTSUPP;
+ }
+ } else if (rc == -ENODATA) {
+ /* haven't initialize LMA xattr */
+ rc = 0;
+ }
+
+ RETURN(rc);
+}
+
/*
* Concurrency: no concurrent access is possible that early in object
* life-cycle.
LASSERT(osd_invariant(obj));
+ if (fid_is_otable_it(&l->lo_header->loh_fid)) {
+ obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
+ l->lo_header->loh_attr |= LOHA_EXISTS;
+ RETURN(0);
+ }
+
rc = osd_fid_lookup(env, osd, lu_object_fid(l), &oid);
if (rc == 0) {
LASSERT(obj->oo_db == NULL);
rc = __osd_obj2dbuf(env, osd->od_objset.os, oid,
&obj->oo_db, osd_obj_tag);
- if (rc == 0) {
- LASSERT(obj->oo_db);
- rc = osd_object_init0(env, obj);
- } else {
+ if (rc != 0) {
CERROR("%s: lookup "DFID"/"LPX64" failed: rc = %d\n",
osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
+ GOTO(out, rc);
}
+ LASSERT(obj->oo_db);
+ rc = osd_object_init0(env, obj);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ rc = osd_check_lma(env, obj);
+ if (rc != 0)
+ GOTO(out, rc);
} else if (rc == -ENOENT) {
rc = 0;
}
LASSERT(osd_invariant(obj));
+out:
RETURN(rc);
}
struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
uint64_t zapid;
+ int rc;
ENTRY;
LASSERT(th != NULL);
dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, 0, buf);
- RETURN(0);
+ /* one less inode */
+ rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
+ obj->oo_attr.la_gid, -1, oh, false, NULL, false);
+ if (rc)
+ RETURN(rc);
+
+ /* data to be truncated */
+ rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
+ obj->oo_attr.la_gid, 0, oh, true, NULL, false);
+ RETURN(rc);
}
int __osd_object_free(udmu_objset_t *uos, uint64_t oid, dmu_tx_t *tx)
{
LASSERT(uos->objects != 0);
- cfs_spin_lock(&uos->lock);
+ spin_lock(&uos->lock);
uos->objects--;
- cfs_spin_unlock(&uos->lock);
+ spin_unlock(&uos->lock);
return -dmu_object_free(uos->os, oid, tx);
}
LASSERT(osd_invariant(obj));
- cfs_down_read(&obj->oo_sem);
+ down_read(&obj->oo_sem);
}
static void osd_object_write_lock(const struct lu_env *env,
LASSERT(osd_invariant(obj));
- cfs_down_write(&obj->oo_sem);
+ down_write(&obj->oo_sem);
}
static void osd_object_read_unlock(const struct lu_env *env,
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(osd_invariant(obj));
- cfs_up_read(&obj->oo_sem);
+ up_read(&obj->oo_sem);
}
static void osd_object_write_unlock(const struct lu_env *env,
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(osd_invariant(obj));
- cfs_up_write(&obj->oo_sem);
+ up_write(&obj->oo_sem);
}
static int osd_object_write_locked(const struct lu_env *env,
LASSERT(osd_invariant(obj));
- if (cfs_down_write_trylock(&obj->oo_sem)) {
+ if (down_write_trylock(&obj->oo_sem)) {
rc = 0;
- cfs_up_write(&obj->oo_sem);
+ up_write(&obj->oo_sem);
}
return rc;
}
LASSERT(osd_invariant(obj));
LASSERT(obj->oo_db);
- cfs_read_lock(&obj->oo_attr_lock);
+ read_lock(&obj->oo_attr_lock);
*attr = obj->oo_attr;
- cfs_read_unlock(&obj->oo_attr_lock);
+ read_unlock(&obj->oo_attr_lock);
/* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
* from within sa_object_size() can block on a mutex, so
* we can't call sa_object_size() holding rwlock */
sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
+ /* we do not control size of indices, so always calculate
+ * it from number of blocks reported by DMU */
+ if (S_ISDIR(attr->la_mode))
+ attr->la_size = 512 * blocks;
/* Block size may be not set; suggest maximal I/O transfers. */
if (blksize == 0)
blksize = 1ULL << SPA_MAXBLOCKSHIFT;
return 0;
}
+/* Simple wrapper on top of qsd API which implement quota transfer for osd
+ * setattr needs. As a reminder, only the root user can change ownership of
+ * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
+static inline int qsd_transfer(const struct lu_env *env,
+ struct qsd_instance *qsd,
+ struct lquota_trans *trans, int qtype,
+ __u64 orig_id, __u64 new_id, __u64 bspace,
+ struct lquota_id_info *qi)
+{
+ int rc;
+
+ if (unlikely(qsd == NULL))
+ return 0;
+
+ LASSERT(qtype >= 0 && qtype < MAXQUOTAS);
+ qi->lqi_type = qtype;
+
+ /* inode accounting */
+ qi->lqi_is_blk = false;
+
+ /* one more inode for the new owner ... */
+ qi->lqi_id.qid_uid = new_id;
+ qi->lqi_space = 1;
+ rc = qsd_op_begin(env, qsd, trans, qi, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ return rc;
+
+ /* and one less inode for the current id */
+ qi->lqi_id.qid_uid = orig_id;;
+ qi->lqi_space = -1;
+ /* can't get EDQUOT when reducing usage */
+ rc = qsd_op_begin(env, qsd, trans, qi, NULL);
+ if (rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ return rc;
+
+ /* block accounting */
+ qi->lqi_is_blk = true;
+
+ /* more blocks for the new owner ... */
+ qi->lqi_id.qid_uid = new_id;
+ qi->lqi_space = bspace;
+ rc = qsd_op_begin(env, qsd, trans, qi, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ return rc;
+
+ /* and finally less blocks for the current owner */
+ qi->lqi_id.qid_uid = orig_id;
+ qi->lqi_space = -bspace;
+ rc = qsd_op_begin(env, qsd, trans, qi, NULL);
+ /* can't get EDQUOT when reducing usage */
+ if (rc == -EINPROGRESS)
+ rc = 0;
+ return rc;
+}
+
static int osd_declare_attr_set(const struct lu_env *env,
struct dt_object *dt,
const struct lu_attr *attr,
struct thandle *handle)
{
+ struct osd_thread_info *info = osd_oti_get(env);
char *buf = osd_oti_get(env)->oti_str;
struct osd_object *obj = osd_dt_obj(dt);
struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
+ uint64_t bspace;
+ uint32_t blksize;
+ int rc;
ENTRY;
if (!dt_object_exists(dt)) {
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(obj->oo_sa_hdl != NULL);
+ LASSERT(oh->ot_tx != NULL);
dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
+ if (oh->ot_tx->tx_err != 0)
+ RETURN(-oh->ot_tx->tx_err);
+
+ sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
+ bspace = toqb(bspace * blksize);
if (attr && attr->la_valid & LA_UID) {
/* account for user inode tracking ZAP update */
dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf);
+
+ /* quota enforcement for user */
+ if (attr->la_uid != obj->oo_attr.la_uid) {
+ rc = qsd_transfer(env, osd->od_quota_slave,
+ &oh->ot_quota_trans, USRQUOTA,
+ obj->oo_attr.la_uid, attr->la_uid,
+ bspace, &info->oti_qi);
+ if (rc)
+ RETURN(rc);
+ }
}
if (attr && attr->la_valid & LA_GID) {
/* account for user inode tracking ZAP update */
dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf);
+
+ /* quota enforcement for group */
+ if (attr->la_gid != obj->oo_attr.la_gid) {
+ rc = qsd_transfer(env, osd->od_quota_slave,
+ &oh->ot_quota_trans, GRPQUOTA,
+ obj->oo_attr.la_gid, attr->la_gid,
+ bspace, &info->oti_qi);
+ if (rc)
+ RETURN(rc);
+ }
}
RETURN(0);
obj->oo_attr.la_gid, rc);
}
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
cnt = 0;
if (la->la_valid & LA_ATIME) {
osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
&osa->rdev, 8);
}
if (la->la_valid & LA_FLAGS) {
- osa->flags = obj->oo_attr.la_flags = la->la_flags;
+ osa->flags = attrs_fs2zfs(la->la_flags);
+ /* many flags are not supported by zfs, so ensure a good cached
+ * copy */
+ obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(uos), NULL,
&osa->flags, 8);
}
&osa->gid, 8);
}
obj->oo_attr.la_valid |= la->la_valid;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
*/
static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
- struct dt_object *parent, cfs_umode_t child_mode)
+ struct dt_object *parent, struct dt_object *child,
+ umode_t child_mode)
{
LASSERT(ah);
memset(ah, 0, sizeof(*ah));
+ ah->dah_parent = parent;
ah->dah_mode = child_mode;
}
struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
uint64_t zapid;
+ int rc;
ENTRY;
LASSERT(dof);
dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
- RETURN(0);
+ __osd_xattr_declare_set(env, obj, sizeof(struct lustre_mdt_attrs),
+ XATTR_NAME_LMA, oh);
+
+ rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh,
+ false, NULL, false);
+ RETURN(rc);
}
-int __osd_attr_init(const struct lu_env *env, udmu_objset_t *uos,
- uint64_t oid, dmu_tx_t *tx, struct lu_attr *la)
+int __osd_attr_init(const struct lu_env *env, udmu_objset_t *uos, uint64_t oid,
+ dmu_tx_t *tx, struct lu_attr *la, uint64_t parent)
{
sa_bulk_attr_t *bulk;
sa_handle_t *sa_hdl;
struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
uint64_t gen;
- uint64_t parent;
uint64_t crtime[2];
timestruc_t now;
int cnt;
gen = dmu_tx_get_txg(tx);
ZFS_TIME_ENCODE(&now, crtime);
- /* XXX: this should be real id of parent for ZPL access, but we have no
- * such info in OSD, probably it can be part of dt_object_format */
- parent = 0;
osa->atime[0] = la->la_atime;
osa->ctime[0] = la->la_ctime;
osa->gid = la->la_gid;
osa->rdev = la->la_rdev;
osa->nlink = la->la_nlink;
- osa->flags = la->la_flags;
+ osa->flags = attrs_fs2zfs(la->la_flags);
osa->size = la->la_size;
/* Now add in all of the "SA" attributes */
* to a transaction group.
*/
int __osd_object_create(const struct lu_env *env, udmu_objset_t *uos,
- dmu_buf_t **dbp, dmu_tx_t *tx,
- struct lu_attr *la, void *tag)
+ dmu_buf_t **dbp, dmu_tx_t *tx, struct lu_attr *la,
+ uint64_t parent, void *tag)
{
uint64_t oid;
int rc;
LASSERT(tag);
- cfs_spin_lock(&uos->lock);
+ spin_lock(&uos->lock);
uos->objects++;
- cfs_spin_unlock(&uos->lock);
+ spin_unlock(&uos->lock);
/* Assert that the transaction has been assigned to a
transaction group. */
la->la_size = 0;
la->la_nlink = 1;
- return __osd_attr_init(env, uos, oid, tx, la);
+ return __osd_attr_init(env, uos, oid, tx, la, parent);
}
/*
* a conversion from the different internal ZAP hash formats being used. */
int __osd_zap_create(const struct lu_env *env, udmu_objset_t *uos,
dmu_buf_t **zap_dbp, dmu_tx_t *tx,
- struct lu_attr *la, void *tag, zap_flags_t flags)
+ struct lu_attr *la, uint64_t parent,
+ void *tag, zap_flags_t flags)
{
uint64_t oid;
int rc;
LASSERT(tag);
- cfs_spin_lock(&uos->lock);
+ spin_lock(&uos->lock);
uos->objects++;
- cfs_spin_unlock(&uos->lock);
+ spin_unlock(&uos->lock);
/* Assert that the transaction has been assigned to a
transaction group. */
la->la_size = 2;
la->la_nlink = 1;
- return __osd_attr_init(env, uos, oid, tx, la);
+ return __osd_attr_init(env, uos, oid, tx, la, parent);
}
static dmu_buf_t *osd_mkidx(const struct lu_env *env, struct osd_device *osd,
- struct lu_attr *la, struct osd_thandle *oh)
+ struct lu_attr *la, uint64_t parent,
+ struct osd_thandle *oh)
{
dmu_buf_t *db;
int rc;
* binary keys */
LASSERT(S_ISREG(la->la_mode));
rc = __osd_zap_create(env, &osd->od_objset, &db, oh->ot_tx, la,
- osd_obj_tag, ZAP_FLAG_UINT64_KEY);
+ parent, osd_obj_tag, ZAP_FLAG_UINT64_KEY);
if (rc)
return ERR_PTR(rc);
return db;
}
static dmu_buf_t *osd_mkdir(const struct lu_env *env, struct osd_device *osd,
- struct lu_attr *la, struct osd_thandle *oh)
+ struct lu_attr *la, uint64_t parent,
+ struct osd_thandle *oh)
{
dmu_buf_t *db;
int rc;
LASSERT(S_ISDIR(la->la_mode));
rc = __osd_zap_create(env, &osd->od_objset, &db, oh->ot_tx, la,
- osd_obj_tag, 0);
+ parent, osd_obj_tag, 0);
if (rc)
return ERR_PTR(rc);
return db;
}
static dmu_buf_t* osd_mkreg(const struct lu_env *env, struct osd_device *osd,
- struct lu_attr *la, struct osd_thandle *oh)
+ struct lu_attr *la, uint64_t parent,
+ struct osd_thandle *oh)
{
dmu_buf_t *db;
int rc;
LASSERT(S_ISREG(la->la_mode));
rc = __osd_object_create(env, &osd->od_objset, &db, oh->ot_tx, la,
- osd_obj_tag);
+ parent, osd_obj_tag);
if (rc)
return ERR_PTR(rc);
}
static dmu_buf_t *osd_mksym(const struct lu_env *env, struct osd_device *osd,
- struct lu_attr *la, struct osd_thandle *oh)
+ struct lu_attr *la, uint64_t parent,
+ struct osd_thandle *oh)
{
dmu_buf_t *db;
int rc;
LASSERT(S_ISLNK(la->la_mode));
rc = __osd_object_create(env, &osd->od_objset, &db, oh->ot_tx, la,
- osd_obj_tag);
+ parent, osd_obj_tag);
if (rc)
return ERR_PTR(rc);
return db;
}
static dmu_buf_t *osd_mknod(const struct lu_env *env, struct osd_device *osd,
- struct lu_attr *la, struct osd_thandle *oh)
+ struct lu_attr *la, uint64_t parent,
+ struct osd_thandle *oh)
{
dmu_buf_t *db;
int rc;
la->la_valid |= LA_RDEV;
rc = __osd_object_create(env, &osd->od_objset, &db, oh->ot_tx, la,
- osd_obj_tag);
+ parent, osd_obj_tag);
if (rc)
return ERR_PTR(rc);
return db;
}
-typedef dmu_buf_t *(*osd_obj_type_f)(const struct lu_env *env, struct osd_device *osd,
- struct lu_attr *la, struct osd_thandle *oh);
+typedef dmu_buf_t *(*osd_obj_type_f)(const struct lu_env *env,
+ struct osd_device *osd,
+ struct lu_attr *la,
+ uint64_t parent,
+ struct osd_thandle *oh);
static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
{
/*
* Primitives for directory (i.e. ZAP) handling
*/
+static inline int osd_init_lma(const struct lu_env *env, struct osd_object *obj,
+ const struct lu_fid *fid, struct osd_thandle *oh)
+{
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct lu_buf buf;
+ int rc;
+
+ lustre_lma_init(lma, fid, 0, 0);
+ lustre_lma_swab(lma);
+ buf.lb_buf = lma;
+ buf.lb_len = sizeof(*lma);
+
+ rc = osd_xattr_set_internal(env, obj, &buf, XATTR_NAME_LMA,
+ LU_XATTR_CREATE, oh, BYPASS_CAPA);
+
+ return rc;
+}
/*
* Concurrency: @dt is write locked.
/* concurrent create declarations should not see
* the object inconsistent (db, attr, etc).
* in regular cases acquisition should be cheap */
- cfs_down(&obj->oo_guard);
+ down(&obj->oo_guard);
LASSERT(osd_invariant(obj));
LASSERT(!dt_object_exists(dt));
LASSERT(obj->oo_db == NULL);
- db = osd_create_type_f(dof->dof_type)(env, osd, attr, oh);
+ /* to follow ZFS on-disk format we need
+ * to initialize parent dnode properly */
+ zapid = 0;
+ if (hint && hint->dah_parent)
+ zapid = osd_dt_obj(hint->dah_parent)->oo_db->db_object;
+
+ db = osd_create_type_f(dof->dof_type)(env, osd, attr, zapid, oh);
if (IS_ERR(db))
- GOTO(out, rc = PTR_ERR(th));
+ GOTO(out, rc = PTR_ERR(db));
zde->zde_pad = 0;
zde->zde_dnode = db->db_object;
LASSERT(ergo(rc == 0, dt_object_exists(dt)));
LASSERT(osd_invariant(obj));
+ rc = osd_init_lma(env, obj, fid, oh);
+ if (rc) {
+ CERROR("%s: can not set LMA on "DFID": rc = %d\n",
+ osd->od_svname, PFID(fid), rc);
+ /* ignore errors during LMA initialization */
+ rc = 0;
+ }
+
out:
- cfs_up(&obj->oo_guard);
+ up(&obj->oo_guard);
RETURN(rc);
}
oh = container_of0(handle, struct osd_thandle, ot_super);
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
nlink = ++obj->oo_attr.la_nlink;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = osd_object_sa_update(obj, SA_ZPL_LINKS(uos), &nlink, 8, oh);
return rc;
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
nlink = --obj->oo_attr.la_nlink;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = osd_object_sa_update(obj, SA_ZPL_LINKS(uos), &nlink, 8, oh);
return rc;
RETURN(rc);
}
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
for (i = 0; i < 2; i++) {
if (keys[i].lk_keyid == capa->lc_keyid) {
oti->oti_capa_key = keys[i];
break;
}
}
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
if (i == 2) {
DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
RETURN(oc);
}
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
*key = dev->od_capa_keys[1];
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
capa->lc_keyid = key->lk_keyid;
capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
.loo_object_invariant = osd_object_invariant,
};
+static int osd_otable_it_attr_get(const struct lu_env *env,
+ struct dt_object *dt,
+ struct lu_attr *attr,
+ struct lustre_capa *capa)
+{
+ attr->la_valid = 0;
+ return 0;
+}
+
+static struct dt_object_operations osd_obj_otable_it_ops = {
+ .do_attr_get = osd_otable_it_attr_get,
+ .do_index_try = osd_index_try,
+};
+