lu_device_get(&d->dd_lu_dev);
oh->ot_dev_link = lu_ref_add(&d->dd_lu_dev.ld_reference,
"osd-tx", th);
-
- /*
- * XXX: current rule is that we first start tx,
- * then lock object(s), but we can't use
- * this rule for data (due to locking specifics
- * in ldiskfs). also in long-term we'd like to
- * use usually-used (locks;tx) ordering. so,
- * UGLY thing is that we'll use one ordering for
- * data (ofd) and reverse ordering for metadata
- * (mdd). then at some point we'll fix the latter
- */
- if (dev->od_is_md) {
- LASSERT(oti->oti_r_locks == 0);
- LASSERT(oti->oti_w_locks == 0);
- }
-
oti->oti_txns++;
rc = 0;
} else {
if (oh->ot_handle != NULL) {
handle_t *hdl = oh->ot_handle;
- hdl->h_sync = th->th_sync;
-
/*
* add commit callback
* notice we don't do this in osd_trans_start()
LASSERT(oti->oti_txns == 1);
oti->oti_txns--;
- /*
- * XXX: current rule is that we first start tx,
- * then lock object(s), but we can't use
- * this rule for data (due to locking specifics
- * in ldiskfs). also in long-term we'd like to
- * use usually-used (locks;tx) ordering. so,
- * UGLY thing is that we'll use one ordering for
- * data (ofd) and reverse ordering for metadata
- * (mdd). then at some point we'll fix the latter
- */
- if (osd_dt_dev(th->th_dev)->od_is_md) {
- LASSERT(oti->oti_r_locks == 0);
- LASSERT(oti->oti_w_locks == 0);
- }
rc = dt_txn_hook_stop(env, th);
if (rc != 0)
CERROR("Failure in transaction hook: %d\n", rc);
+
+ /* hook functions might modify th_sync */
+ hdl->h_sync = th->th_sync;
+
oh->ot_handle = NULL;
OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
rc = ldiskfs_journal_stop(hdl));
param->ddp_max_name_len = LDISKFS_NAME_LEN;
param->ddp_max_nlink = LDISKFS_LINK_MAX;
param->ddp_block_shift = sb->s_blocksize_bits;
+ param->ddp_mount_type = LDD_MT_LDISKFS;
param->ddp_mntopts = 0;
if (test_opt(sb, XATTR_USER))
param->ddp_mntopts |= MNTOPT_USERXATTR;
LASSERT(dt_object_exists(dt));
LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
- LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
}
LINVRNT(osd_invariant(obj));
+ if (is_quota_glb_feat(feat))
+ result = osd_quota_migration(env, dt, feat);
+
return result;
}
struct thandle *handle,
struct lustre_capa *capa)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thandle *oh;
- struct iam_path_descr *ipd;
- struct iam_container *bag = &obj->oo_dir->od_container;
- int rc;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thandle *oh;
+ struct iam_path_descr *ipd;
+ struct iam_container *bag = &obj->oo_dir->od_container;
+ int rc;
ENTRY;
LASSERT(oh->ot_handle != NULL);
LASSERT(oh->ot_handle->h_transaction != NULL);
+ if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
+ /* swab quota uid/gid provided by caller */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (const struct dt_key *)&oti->oti_quota_id;
+ }
+
rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
osd_ipd_put(env, bag, ipd);
LINVRNT(osd_invariant(obj));
/* got ipd now we can start iterator. */
iam_it_init(it, bag, 0, ipd);
+ if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
+ /* swab quota uid/gid provided by caller */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (const struct dt_key *)&oti->oti_quota_id;
+ }
+
rc = iam_it_get(it, (struct iam_key *)key);
if (rc >= 0) {
if (S_ISDIR(obj->oo_inode->i_mode))
iam_rec = (struct iam_rec *) rec;
iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
+
if (S_ISDIR(obj->oo_inode->i_mode))
osd_fid_unpack((struct lu_fid *) rec,
(struct osd_fid_pack *)iam_rec);
+ else if (fid_is_quota(lu_object_fid(&dt->do_lu)))
+ osd_quota_unpack(obj, rec);
}
+
iam_it_put(it);
iam_it_fini(it);
osd_ipd_put(env, bag, ipd);
cfs_cap_t save = cfs_curproc_cap_pack();
#endif
struct osd_thread_info *oti = osd_oti_get(env);
- struct iam_rec *iam_rec = (struct iam_rec *)oti->oti_ldp;
+ struct iam_rec *iam_rec;
int rc;
ENTRY;
else
cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
#endif
- if (S_ISDIR(obj->oo_inode->i_mode))
- osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
- else
- iam_rec = (struct iam_rec *) rec;
+ if (S_ISDIR(obj->oo_inode->i_mode)) {
+ iam_rec = (struct iam_rec *)oti->oti_ldp;
+ osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
+ } else if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
+ /* pack quota uid/gid */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (const struct dt_key *)&oti->oti_quota_id;
+ /* pack quota record */
+ rec = osd_quota_pack(obj, rec, &oti->oti_quota_rec);
+ iam_rec = (struct iam_rec *)rec;
+ } else {
+ iam_rec = (struct iam_rec *)rec;
+ }
+
rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
iam_rec, ipd);
#ifdef HAVE_QUOTA_SUPPORT
static int osd_it_iam_get(const struct lu_env *env,
struct dt_it *di, const struct dt_key *key)
{
- struct osd_it_iam *it = (struct osd_it_iam *)di;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_it_iam *it = (struct osd_it_iam *)di;
+
+ if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
+ /* swab quota uid/gid */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (struct dt_key *)&oti->oti_quota_id;
+ }
return iam_it_get(&it->oi_it, (const struct iam_key *)key);
}
static struct dt_key *osd_it_iam_key(const struct lu_env *env,
const struct dt_it *di)
{
- struct osd_it_iam *it = (struct osd_it_iam *)di;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_it_iam *it = (struct osd_it_iam *)di;
+ struct osd_object *obj = it->oi_obj;
+ struct dt_key *key;
- return (struct dt_key *)iam_it_key_get(&it->oi_it);
+ key = (struct dt_key *)iam_it_key_get(&it->oi_it);
+
+ if (!IS_ERR(key) && fid_is_quota(lu_object_fid(&obj->oo_dt.do_lu))) {
+ /* swab quota uid/gid */
+ oti->oti_quota_id = le64_to_cpu(*((__u64 *)key));
+ key = (struct dt_key *)&oti->oti_quota_id;
+ }
+
+ return key;
}
/**
/* IAM does not store object type in IAM index (dir) */
osd_it_pack_dirent(lde, fid, hash, name, namelen,
0, LUDA_FID);
+ } else if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
+ iam_reccpy(&it->oi_it.ii_path.ip_leaf,
+ (struct iam_rec *)dtrec);
+ osd_quota_unpack(it->oi_obj, dtrec);
} else {
iam_reccpy(&it->oi_it.ii_path.ip_leaf,
(struct iam_rec *)dtrec);
OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD);
if (__page == NULL)
- RETURN(-ENOMEM);
+ GOTO(out, rc = -ENOMEM);
str = lustre_cfg_string(cfg, 2);
s_flags = simple_strtoul(str, NULL, 0);
out:
if (__page)
OBD_PAGE_FREE(__page);
+ if (rc)
+ fsfilt_put_ops(o->od_fsops);
RETURN(rc);
}
break;
case LCFG_CLEANUP:
lu_dev_del_linkage(d->ld_site, d);
- err = 0;
- break;
+ err = osd_shutdown(env, o);
+ break;
default:
err = -ENOSYS;
}
struct lu_device *dev)
{
struct osd_device *osd = osd_dev(dev);
- int result;
+ int result = 0;
ENTRY;
- /* 2. setup quota slave instance */
- osd->od_quota_slave = qsd_init(env, osd->od_svname, &osd->od_dt_dev,
- osd->od_proc_entry);
- if (IS_ERR(osd->od_quota_slave)) {
- result = PTR_ERR(osd->od_quota_slave);
- osd->od_quota_slave = NULL;
- RETURN(result);
- }
-
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 3, 55, 0)
/* Unfortunately, the current MDD implementation relies on some specific
* code to be executed in the OSD layer. Since OFD now also uses the OSD
#warning "all is_md checks must be removed from osd-ldiskfs"
#endif
- if (!osd->od_is_md)
- RETURN(0);
+ if (osd->od_is_md) {
+ /* 1. setup local objects */
+ result = llo_local_objects_setup(env, lu2md_dev(pdev),
+ lu2dt_dev(dev));
+ if (result)
+ RETURN(result);
+ }
- /* 3. setup local objects */
- result = llo_local_objects_setup(env, lu2md_dev(pdev), lu2dt_dev(dev));
- RETURN(result);
+ /* 2. setup quota slave instance */
+ osd->od_quota_slave = qsd_init(env, osd->od_svname, &osd->od_dt_dev,
+ osd->od_proc_entry);
+ if (IS_ERR(osd->od_quota_slave)) {
+ result = PTR_ERR(osd->od_quota_slave);
+ osd->od_quota_slave = NULL;
+ }
+
+ RETURN(result);
}
static const struct lu_object_operations osd_lu_obj_ops = {