* Use is subject to license terms.
*/
/*
- * Copyright (c) 2011, 2012 Whamcloud, Inc.
+ * Copyright (c) 2012, Intel Corporation.
* Use is subject to license terms.
*/
/*
if (!cfs_list_empty(&obj->oo_sa_linkage))
return;
- cfs_down(&oh->ot_sa_lock);
+ down(&oh->ot_sa_lock);
+ write_lock(&obj->oo_attr_lock);
if (likely(cfs_list_empty(&obj->oo_sa_linkage)))
cfs_list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
- cfs_up(&oh->ot_sa_lock);
+ write_unlock(&obj->oo_attr_lock);
+ up(&oh->ot_sa_lock);
}
/*
{
struct osd_object *obj;
- cfs_down(&oh->ot_sa_lock);
+ down(&oh->ot_sa_lock);
while (!cfs_list_empty(&oh->ot_sa_list)) {
obj = cfs_list_entry(oh->ot_sa_list.next,
struct osd_object, oo_sa_linkage);
sa_spill_rele(obj->oo_sa_hdl);
+ write_lock(&obj->oo_attr_lock);
cfs_list_del_init(&obj->oo_sa_linkage);
+ write_unlock(&obj->oo_attr_lock);
}
- cfs_up(&oh->ot_sa_lock);
+ up(&oh->ot_sa_lock);
}
/*
mo->oo_dt.do_ops = &osd_obj_ops;
l->lo_ops = &osd_lu_obj_ops;
CFS_INIT_LIST_HEAD(&mo->oo_sa_linkage);
- cfs_init_rwsem(&mo->oo_sem);
- cfs_sema_init(&mo->oo_guard, 1);
- cfs_rwlock_init(&mo->oo_attr_lock);
+ init_rwsem(&mo->oo_sem);
+ sema_init(&mo->oo_guard, 1);
+ rwlock_init(&mo->oo_attr_lock);
return l;
} else {
return NULL;
LASSERT(osd_invariant(obj));
+ if (fid_is_otable_it(&l->lo_header->loh_fid)) {
+ obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
+ l->lo_header->loh_attr |= LOHA_EXISTS;
+ RETURN(0);
+ }
+
rc = osd_fid_lookup(env, osd, lu_object_fid(l), &oid);
if (rc == 0) {
LASSERT(obj->oo_db == NULL);
osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
}
} else if (rc == -ENOENT) {
- if (fid_is_otable_it(&l->lo_header->loh_fid)) {
- obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
- /* LFSCK iterator object is special without inode */
- l->lo_header->loh_attr |= LOHA_EXISTS;
- }
rc = 0;
}
LASSERT(osd_invariant(obj));
int __osd_object_free(udmu_objset_t *uos, uint64_t oid, dmu_tx_t *tx)
{
LASSERT(uos->objects != 0);
- cfs_spin_lock(&uos->lock);
+ spin_lock(&uos->lock);
uos->objects--;
- cfs_spin_unlock(&uos->lock);
+ spin_unlock(&uos->lock);
return -dmu_object_free(uos->os, oid, tx);
}
LASSERT(osd_invariant(obj));
- cfs_down_read(&obj->oo_sem);
+ down_read(&obj->oo_sem);
}
static void osd_object_write_lock(const struct lu_env *env,
LASSERT(osd_invariant(obj));
- cfs_down_write(&obj->oo_sem);
+ down_write(&obj->oo_sem);
}
static void osd_object_read_unlock(const struct lu_env *env,
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(osd_invariant(obj));
- cfs_up_read(&obj->oo_sem);
+ up_read(&obj->oo_sem);
}
static void osd_object_write_unlock(const struct lu_env *env,
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(osd_invariant(obj));
- cfs_up_write(&obj->oo_sem);
+ up_write(&obj->oo_sem);
}
static int osd_object_write_locked(const struct lu_env *env,
LASSERT(osd_invariant(obj));
- if (cfs_down_write_trylock(&obj->oo_sem)) {
+ if (down_write_trylock(&obj->oo_sem)) {
rc = 0;
- cfs_up_write(&obj->oo_sem);
+ up_write(&obj->oo_sem);
}
return rc;
}
LASSERT(osd_invariant(obj));
LASSERT(obj->oo_db);
- cfs_read_lock(&obj->oo_attr_lock);
+ read_lock(&obj->oo_attr_lock);
*attr = obj->oo_attr;
- cfs_read_unlock(&obj->oo_attr_lock);
+ read_unlock(&obj->oo_attr_lock);
/* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
* from within sa_object_size() can block on a mutex, so
* we can't call sa_object_size() holding rwlock */
sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
+ /* we do not control size of indices, so always calculate
+ * it from number of blocks reported by DMU */
+ if (S_ISDIR(attr->la_mode))
+ attr->la_size = 512 * blocks;
/* Block size may be not set; suggest maximal I/O transfers. */
if (blksize == 0)
blksize = 1ULL << SPA_MAXBLOCKSHIFT;
/* and one less inode for the current id */
qi->lqi_id.qid_uid = orig_id;;
qi->lqi_space = -1;
+ /* can't get EDQUOT when reducing usage */
rc = qsd_op_begin(env, qsd, trans, qi, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
+ if (rc == -EINPROGRESS)
rc = 0;
if (rc)
return rc;
qi->lqi_id.qid_uid = orig_id;
qi->lqi_space = -bspace;
rc = qsd_op_begin(env, qsd, trans, qi, NULL);
- if (rc == -EDQUOT || rc == -EINPROGRESS)
+ /* can't get EDQUOT when reducing usage */
+ if (rc == -EINPROGRESS)
rc = 0;
return rc;
}
obj->oo_attr.la_gid, rc);
}
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
cnt = 0;
if (la->la_valid & LA_ATIME) {
osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
&osa->gid, 8);
}
obj->oo_attr.la_valid |= la->la_valid;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
*/
static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
- struct dt_object *parent, cfs_umode_t child_mode)
+ struct dt_object *parent, struct dt_object *child,
+ cfs_umode_t child_mode)
{
LASSERT(ah);
struct osd_device *osd = osd_obj2dev(obj);
struct osd_thandle *oh;
uint64_t zapid;
+ int rc;
ENTRY;
LASSERT(dof);
dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
- RETURN(osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh,
- false, NULL, false));
+ __osd_xattr_declare_set(env, obj, sizeof(struct lustre_mdt_attrs),
+ XATTR_NAME_LMA, oh);
+
+ rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh,
+ false, NULL, false);
+ RETURN(rc);
}
int __osd_attr_init(const struct lu_env *env, udmu_objset_t *uos,
int rc;
LASSERT(tag);
- cfs_spin_lock(&uos->lock);
+ spin_lock(&uos->lock);
uos->objects++;
- cfs_spin_unlock(&uos->lock);
+ spin_unlock(&uos->lock);
/* Assert that the transaction has been assigned to a
transaction group. */
LASSERT(tag);
- cfs_spin_lock(&uos->lock);
+ spin_lock(&uos->lock);
uos->objects++;
- cfs_spin_unlock(&uos->lock);
+ spin_unlock(&uos->lock);
/* Assert that the transaction has been assigned to a
transaction group. */
/*
* Primitives for directory (i.e. ZAP) handling
*/
+static inline int osd_init_lma(const struct lu_env *env, struct osd_object *obj,
+ const struct lu_fid *fid, struct osd_thandle *oh)
+{
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
+ struct lu_buf buf;
+ int rc;
+
+ lustre_lma_init(lma, fid);
+ lustre_lma_swab(lma);
+ buf.lb_buf = lma;
+ buf.lb_len = sizeof(*lma);
+
+ rc = osd_xattr_set_internal(env, obj, &buf, XATTR_NAME_LMA,
+ LU_XATTR_CREATE, oh, BYPASS_CAPA);
+
+ return rc;
+}
/*
* Concurrency: @dt is write locked.
/* concurrent create declarations should not see
* the object inconsistent (db, attr, etc).
* in regular cases acquisition should be cheap */
- cfs_down(&obj->oo_guard);
+ down(&obj->oo_guard);
LASSERT(osd_invariant(obj));
LASSERT(!dt_object_exists(dt));
LASSERT(ergo(rc == 0, dt_object_exists(dt)));
LASSERT(osd_invariant(obj));
+ rc = osd_init_lma(env, obj, fid, oh);
+ if (rc) {
+ CERROR("%s: can not set LMA on "DFID": rc = %d\n",
+ osd->od_svname, PFID(fid), rc);
+ /* ignore errors during LMA initialization */
+ rc = 0;
+ }
+
out:
- cfs_up(&obj->oo_guard);
+ up(&obj->oo_guard);
RETURN(rc);
}
oh = container_of0(handle, struct osd_thandle, ot_super);
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
nlink = ++obj->oo_attr.la_nlink;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = osd_object_sa_update(obj, SA_ZPL_LINKS(uos), &nlink, 8, oh);
return rc;
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
nlink = --obj->oo_attr.la_nlink;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = osd_object_sa_update(obj, SA_ZPL_LINKS(uos), &nlink, 8, oh);
return rc;
RETURN(rc);
}
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
for (i = 0; i < 2; i++) {
if (keys[i].lk_keyid == capa->lc_keyid) {
oti->oti_capa_key = keys[i];
break;
}
}
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
if (i == 2) {
DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
RETURN(oc);
}
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
*key = dev->od_capa_keys[1];
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
capa->lc_keyid = key->lk_keyid;
capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;