return 0;
}
-static inline int osd_qid_type(struct osd_thandle *oh, int i)
-{
- return (oh->ot_id_type & (1 << i)) ? GRPQUOTA : USRQUOTA;
-}
-
-static inline void osd_qid_set_type(struct osd_thandle *oh, int i, int type)
-{
- oh->ot_id_type |= ((type == GRPQUOTA) ? (1 << i) : 0);
-}
-
-void osd_declare_qid(struct dt_object *dt, struct osd_thandle *oh,
- int type, uid_t id, struct inode *inode)
-{
-#ifdef CONFIG_QUOTA
- int i, allocated = 0;
- struct osd_object *obj;
-
- LASSERT(dt != NULL);
- LASSERT(oh != NULL);
- LASSERTF(oh->ot_id_cnt <= OSD_MAX_UGID_CNT, "count=%u",
- oh->ot_id_cnt);
-
- /* id entry is allocated in the quota file */
- if (inode && inode->i_dquot[type] && inode->i_dquot[type]->dq_off)
- allocated = 1;
-
- for (i = 0; i < oh->ot_id_cnt; i++) {
- if (oh->ot_id_array[i] == id && osd_qid_type(oh, i) == type)
- return;
- }
-
- if (unlikely(i >= OSD_MAX_UGID_CNT)) {
- CERROR("more than %d uid/gids for a transaction?\n", i);
- return;
- }
-
- oh->ot_id_array[i] = id;
- osd_qid_set_type(oh, i, type);
- oh->ot_id_cnt++;
- obj = osd_dt_obj(dt);
- oh->ot_credits += (allocated || id == 0) ?
- 1 : LDISKFS_QUOTA_INIT_BLOCKS(osd_sb(osd_obj2dev(obj)));
-#endif
-}
-
/*
* OSD object methods.
*/
th = ERR_PTR(-ENOMEM);
OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
if (oh != NULL) {
+ oh->ot_quota_trans = &oti->oti_quota_trans;
+ memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
th = &oh->ot_super;
th->th_dev = d;
th->th_result = 0;
lu_device_get(&d->dd_lu_dev);
oh->ot_dev_link = lu_ref_add(&d->dd_lu_dev.ld_reference,
"osd-tx", th);
-
- /*
- * XXX: current rule is that we first start tx,
- * then lock object(s), but we can't use
- * this rule for data (due to locking specifics
- * in ldiskfs). also in long-term we'd like to
- * use usually-used (locks;tx) ordering. so,
- * UGLY thing is that we'll use one ordering for
- * data (ofd) and reverse ordering for metadata
- * (mdd). then at some point we'll fix the latter
- */
- if (dev->od_is_md) {
- LASSERT(oti->oti_r_locks == 0);
- LASSERT(oti->oti_w_locks == 0);
- }
-
oti->oti_txns++;
rc = 0;
} else {
struct osd_thandle *oh;
struct osd_thread_info *oti = osd_oti_get(env);
struct osd_iobuf *iobuf = &oti->oti_iobuf;
-
+ struct qsd_instance *qsd = oti->oti_dev->od_quota_slave;
ENTRY;
oh = container_of0(th, struct osd_thandle, ot_super);
+ if (qsd != NULL)
+ /* inform the quota slave device that the transaction is
+ * stopping */
+ qsd_op_end(env, qsd, oh->ot_quota_trans);
+ oh->ot_quota_trans = NULL;
+
if (oh->ot_handle != NULL) {
handle_t *hdl = oh->ot_handle;
- hdl->h_sync = th->th_sync;
-
/*
* add commit callback
* notice we don't do this in osd_trans_start()
LASSERT(oti->oti_txns == 1);
oti->oti_txns--;
- /*
- * XXX: current rule is that we first start tx,
- * then lock object(s), but we can't use
- * this rule for data (due to locking specifics
- * in ldiskfs). also in long-term we'd like to
- * use usually-used (locks;tx) ordering. so,
- * UGLY thing is that we'll use one ordering for
- * data (ofd) and reverse ordering for metadata
- * (mdd). then at some point we'll fix the latter
- */
- if (osd_dt_dev(th->th_dev)->od_is_md) {
- LASSERT(oti->oti_r_locks == 0);
- LASSERT(oti->oti_w_locks == 0);
- }
rc = dt_txn_hook_stop(env, th);
if (rc != 0)
CERROR("Failure in transaction hook: %d\n", rc);
+
+ /* hook functions might modify th_sync */
+ hdl->h_sync = th->th_sync;
+
oh->ot_handle = NULL;
OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
rc = ldiskfs_journal_stop(hdl));
param->ddp_max_name_len = LDISKFS_NAME_LEN;
param->ddp_max_nlink = LDISKFS_LINK_MAX;
param->ddp_block_shift = sb->s_blocksize_bits;
+ param->ddp_mount_type = LDD_MT_LDISKFS;
param->ddp_mntopts = 0;
if (test_opt(sb, XATTR_USER))
param->ddp_mntopts |= MNTOPT_USERXATTR;
const struct lu_attr *attr,
struct thandle *handle)
{
- struct osd_thandle *oh;
- struct osd_object *obj;
+ struct osd_thandle *oh;
+ struct osd_object *obj;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lquota_id_info *qi = &info->oti_qi;
+ long long bspace;
+ int rc = 0;
+ bool allocated;
+ ENTRY;
- LASSERT(dt != NULL);
- LASSERT(handle != NULL);
+ LASSERT(dt != NULL);
+ LASSERT(handle != NULL);
- obj = osd_dt_obj(dt);
- LASSERT(osd_invariant(obj));
+ obj = osd_dt_obj(dt);
+ LASSERT(osd_invariant(obj));
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, attr_set);
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
+ OSD_DECLARE_OP(oh, attr_set);
+ oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
- if (attr && attr->la_valid & LA_UID) {
- if (obj->oo_inode)
- osd_declare_qid(dt, oh, USRQUOTA, obj->oo_inode->i_uid,
- obj->oo_inode);
- osd_declare_qid(dt, oh, USRQUOTA, attr->la_uid, NULL);
- }
- if (attr && attr->la_valid & LA_GID) {
- if (obj->oo_inode)
- osd_declare_qid(dt, oh, GRPQUOTA, obj->oo_inode->i_gid,
- obj->oo_inode);
- osd_declare_qid(dt, oh, GRPQUOTA, attr->la_gid, NULL);
- }
+ if (attr == NULL || obj->oo_inode == NULL)
+ RETURN(rc);
- return 0;
+ bspace = obj->oo_inode->i_blocks;
+ bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
+ bspace = toqb(bspace);
+
+ /* Changing ownership is always preformed by super user, it should not
+ * fail with EDQUOT.
+ *
+ * We still need to call the osd_declare_qid() to calculate the journal
+ * credits for updating quota accounting files and to trigger quota
+ * space adjustment once the operation is completed.*/
+ if ((attr->la_valid & LA_UID) != 0 &&
+ attr->la_uid != obj->oo_inode->i_uid) {
+ qi->lqi_type = USRQUOTA;
+
+ /* inode accounting */
+ qi->lqi_is_blk = false;
+
+ /* one more inode for the new owner ... */
+ qi->lqi_id.qid_uid = attr->la_uid;
+ qi->lqi_space = 1;
+ allocated = (attr->la_uid == 0) ? true : false;
+ rc = osd_declare_qid(env, oh, qi, allocated, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and one less inode for the current uid */
+ qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
+ qi->lqi_space = -1;
+ rc = osd_declare_qid(env, oh, qi, true, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* block accounting */
+ qi->lqi_is_blk = true;
+
+ /* more blocks for the new owner ... */
+ qi->lqi_id.qid_uid = attr->la_uid;
+ qi->lqi_space = bspace;
+ allocated = (attr->la_uid == 0) ? true : false;
+ rc = osd_declare_qid(env, oh, qi, allocated, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and finally less blocks for the current owner */
+ qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
+ qi->lqi_space = -bspace;
+ rc = osd_declare_qid(env, oh, qi, true, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+ }
+
+ if (attr->la_valid & LA_GID &&
+ attr->la_gid != obj->oo_inode->i_gid) {
+ qi->lqi_type = GRPQUOTA;
+
+ /* inode accounting */
+ qi->lqi_is_blk = false;
+
+ /* one more inode for the new group owner ... */
+ qi->lqi_id.qid_gid = attr->la_gid;
+ qi->lqi_space = 1;
+ allocated = (attr->la_gid == 0) ? true : false;
+ rc = osd_declare_qid(env, oh, qi, allocated, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and one less inode for the current gid */
+ qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
+ qi->lqi_space = -1;
+ rc = osd_declare_qid(env, oh, qi, true, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* block accounting */
+ qi->lqi_is_blk = true;
+
+ /* more blocks for the new owner ... */
+ qi->lqi_id.qid_gid = attr->la_gid;
+ qi->lqi_space = bspace;
+ allocated = (attr->la_gid == 0) ? true : false;
+ rc = osd_declare_qid(env, oh, qi, allocated, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and finally less blocks for the current owner */
+ qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
+ qi->lqi_space = -bspace;
+ rc = osd_declare_qid(env, oh, qi, true, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+ }
+
+ RETURN(rc);
}
static int osd_inode_setattr(const struct lu_env *env,
static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
- struct dt_object *parent, cfs_umode_t child_mode)
+ struct dt_object *parent, struct dt_object *child,
+ cfs_umode_t child_mode)
{
LASSERT(ah);
struct dt_object_format *dof,
struct thandle *handle)
{
- struct osd_thandle *oh;
+ struct osd_thandle *oh;
+ int rc;
+ ENTRY;
LASSERT(handle != NULL);
oh->ot_credits += osd_dto_credits_noquota[DTO_WRITE_BASE];
}
- if (attr) {
- osd_declare_qid(dt, oh, USRQUOTA, attr->la_uid, NULL);
- osd_declare_qid(dt, oh, GRPQUOTA, attr->la_gid, NULL);
- }
- return 0;
+ if (!attr)
+ RETURN(0);
+
+ rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
+ false, false, NULL, false);
+ RETURN(rc);
}
static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
struct osd_thandle *oh;
-
+ int rc;
ENTRY;
oh = container_of0(th, struct osd_thandle, ot_super);
oh->ot_credits += 3;
}
- osd_declare_qid(dt, oh, USRQUOTA, inode->i_uid, inode);
- osd_declare_qid(dt, oh, GRPQUOTA, inode->i_gid, inode);
-
- RETURN(0);
+ /* one less inode */
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
+ false, true, NULL, false);
+ if (rc)
+ RETURN(rc);
+ /* data to be truncated */
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh, true,
+ true, NULL, false);
+ RETURN(rc);
}
static int osd_object_destroy(const struct lu_env *env,
LASSERT(dt_object_exists(dt));
LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
- LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
}
LINVRNT(osd_invariant(obj));
+ if (is_quota_glb_feat(feat))
+ result = osd_quota_migration(env, dt, feat);
+
return result;
}
struct thandle *handle,
struct lustre_capa *capa)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thandle *oh;
- struct iam_path_descr *ipd;
- struct iam_container *bag = &obj->oo_dir->od_container;
- int rc;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thandle *oh;
+ struct iam_path_descr *ipd;
+ struct iam_container *bag = &obj->oo_dir->od_container;
+ int rc;
ENTRY;
LASSERT(oh->ot_handle != NULL);
LASSERT(oh->ot_handle->h_transaction != NULL);
+ if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
+ /* swab quota uid/gid provided by caller */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (const struct dt_key *)&oti->oti_quota_id;
+ }
+
rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
osd_ipd_put(env, bag, ipd);
LINVRNT(osd_invariant(obj));
struct thandle *handle)
{
struct osd_thandle *oh;
+ struct inode *inode;
+ int rc;
+ ENTRY;
LASSERT(dt_object_exists(dt));
LASSERT(handle != NULL);
OSD_DECLARE_OP(oh, delete);
oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
- LASSERT(osd_dt_obj(dt)->oo_inode);
- osd_declare_qid(dt, oh, USRQUOTA, osd_dt_obj(dt)->oo_inode->i_uid,
- osd_dt_obj(dt)->oo_inode);
- osd_declare_qid(dt, oh, GRPQUOTA, osd_dt_obj(dt)->oo_inode->i_gid,
- osd_dt_obj(dt)->oo_inode);
+ inode = osd_dt_obj(dt)->oo_inode;
+ LASSERT(inode);
- return 0;
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
+ true, true, NULL, false);
+ RETURN(rc);
}
static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
/* got ipd now we can start iterator. */
iam_it_init(it, bag, 0, ipd);
+ if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
+ /* swab quota uid/gid provided by caller */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (const struct dt_key *)&oti->oti_quota_id;
+ }
+
rc = iam_it_get(it, (struct iam_key *)key);
if (rc >= 0) {
if (S_ISDIR(obj->oo_inode->i_mode))
iam_rec = (struct iam_rec *) rec;
iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
+
if (S_ISDIR(obj->oo_inode->i_mode))
osd_fid_unpack((struct lu_fid *) rec,
(struct osd_fid_pack *)iam_rec);
+ else if (fid_is_quota(lu_object_fid(&dt->do_lu)))
+ osd_quota_unpack(obj, rec);
}
+
iam_it_put(it);
iam_it_fini(it);
osd_ipd_put(env, bag, ipd);
cfs_cap_t save = cfs_curproc_cap_pack();
#endif
struct osd_thread_info *oti = osd_oti_get(env);
- struct iam_rec *iam_rec = (struct iam_rec *)oti->oti_ldp;
+ struct iam_rec *iam_rec;
int rc;
ENTRY;
else
cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
#endif
- if (S_ISDIR(obj->oo_inode->i_mode))
- osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
- else
- iam_rec = (struct iam_rec *) rec;
+ if (S_ISDIR(obj->oo_inode->i_mode)) {
+ iam_rec = (struct iam_rec *)oti->oti_ldp;
+ osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
+ } else if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
+ /* pack quota uid/gid */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (const struct dt_key *)&oti->oti_quota_id;
+ /* pack quota record */
+ rec = osd_quota_pack(obj, rec, &oti->oti_quota_rec);
+ iam_rec = (struct iam_rec *)rec;
+ } else {
+ iam_rec = (struct iam_rec *)rec;
+ }
+
rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
iam_rec, ipd);
#ifdef HAVE_QUOTA_SUPPORT
struct thandle *handle)
{
struct osd_thandle *oh;
+ struct inode *inode;
+ int rc;
+ ENTRY;
LASSERT(dt_object_exists(dt));
LASSERT(handle != NULL);
OSD_DECLARE_OP(oh, insert);
oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
- LASSERT(osd_dt_obj(dt)->oo_inode);
- osd_declare_qid(dt, oh, USRQUOTA, osd_dt_obj(dt)->oo_inode->i_uid,
- osd_dt_obj(dt)->oo_inode);
- osd_declare_qid(dt, oh, GRPQUOTA, osd_dt_obj(dt)->oo_inode->i_gid,
- osd_dt_obj(dt)->oo_inode);
+ inode = osd_dt_obj(dt)->oo_inode;
+ LASSERT(inode);
- return 0;
+ /* We ignore block quota on meta pool (MDTs), so needn't
+ * calculate how many blocks will be consumed by this index
+ * insert */
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
+ true, true, NULL, false);
+ RETURN(rc);
}
/**
static int osd_it_iam_get(const struct lu_env *env,
struct dt_it *di, const struct dt_key *key)
{
- struct osd_it_iam *it = (struct osd_it_iam *)di;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_it_iam *it = (struct osd_it_iam *)di;
+
+ if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
+ /* swab quota uid/gid */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (struct dt_key *)&oti->oti_quota_id;
+ }
return iam_it_get(&it->oi_it, (const struct iam_key *)key);
}
static struct dt_key *osd_it_iam_key(const struct lu_env *env,
const struct dt_it *di)
{
- struct osd_it_iam *it = (struct osd_it_iam *)di;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_it_iam *it = (struct osd_it_iam *)di;
+ struct osd_object *obj = it->oi_obj;
+ struct dt_key *key;
+
+ key = (struct dt_key *)iam_it_key_get(&it->oi_it);
- return (struct dt_key *)iam_it_key_get(&it->oi_it);
+ if (!IS_ERR(key) && fid_is_quota(lu_object_fid(&obj->oo_dt.do_lu))) {
+ /* swab quota uid/gid */
+ oti->oti_quota_id = le64_to_cpu(*((__u64 *)key));
+ key = (struct dt_key *)&oti->oti_quota_id;
+ }
+
+ return key;
}
/**
/* IAM does not store object type in IAM index (dir) */
osd_it_pack_dirent(lde, fid, hash, name, namelen,
0, LUDA_FID);
+ } else if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
+ iam_reccpy(&it->oi_it.ii_path.ip_leaf,
+ (struct iam_rec *)dtrec);
+ osd_quota_unpack(it->oi_obj, dtrec);
} else {
iam_reccpy(&it->oi_it.ii_path.ip_leaf,
(struct iam_rec *)dtrec);
OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD);
if (__page == NULL)
- RETURN(-ENOMEM);
+ GOTO(out, rc = -ENOMEM);
str = lustre_cfg_string(cfg, 2);
s_flags = simple_strtoul(str, NULL, 0);
out:
if (__page)
OBD_PAGE_FREE(__page);
+ if (rc)
+ fsfilt_put_ops(o->od_fsops);
RETURN(rc);
}
break;
case LCFG_CLEANUP:
lu_dev_del_linkage(d->ld_site, d);
- err = 0;
- break;
+ err = osd_shutdown(env, o);
+ break;
default:
err = -ENOSYS;
}
struct lu_device *dev)
{
struct osd_device *osd = osd_dev(dev);
- int result;
+ int result = 0;
ENTRY;
- /* 2. setup quota slave instance */
- osd->od_quota_slave = qsd_init(env, osd->od_svname, &osd->od_dt_dev,
- osd->od_proc_entry);
- if (IS_ERR(osd->od_quota_slave)) {
- result = PTR_ERR(osd->od_quota_slave);
- osd->od_quota_slave = NULL;
- RETURN(result);
- }
-
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 3, 55, 0)
/* Unfortunately, the current MDD implementation relies on some specific
* code to be executed in the OSD layer. Since OFD now also uses the OSD
#warning "all is_md checks must be removed from osd-ldiskfs"
#endif
- if (!osd->od_is_md)
- RETURN(0);
+ if (osd->od_is_md) {
+ /* 1. setup local objects */
+ result = llo_local_objects_setup(env, lu2md_dev(pdev),
+ lu2dt_dev(dev));
+ if (result)
+ RETURN(result);
+ }
- /* 3. setup local objects */
- result = llo_local_objects_setup(env, lu2md_dev(pdev), lu2dt_dev(dev));
- RETURN(result);
+ /* 2. setup quota slave instance */
+ osd->od_quota_slave = qsd_init(env, osd->od_svname, &osd->od_dt_dev,
+ osd->od_proc_entry);
+ if (IS_ERR(osd->od_quota_slave)) {
+ result = PTR_ERR(osd->od_quota_slave);
+ osd->od_quota_slave = NULL;
+ }
+
+ RETURN(result);
}
static const struct lu_object_operations osd_lu_obj_ops = {