/* llo_* api support */
#include <md_object.h>
-/* dt_acct_features */
-#include <lquota.h>
+#include <lustre_quota.h>
#ifdef HAVE_LDISKFS_PDO
int ldiskfs_pdo = 1;
static const struct lu_object_operations osd_lu_obj_ops;
static const struct dt_object_operations osd_obj_ops;
static const struct dt_object_operations osd_obj_ea_ops;
+static const struct dt_object_operations osd_obj_otable_it_ops;
static const struct dt_index_operations osd_index_iam_ops;
static const struct dt_index_operations osd_index_ea_ops;
return osd_invariant(osd_obj(l));
}
-#ifdef HAVE_QUOTA_SUPPORT
-static inline void
-osd_push_ctxt(const struct lu_env *env, struct osd_ctxt *save)
-{
- struct md_ucred *uc = md_ucred(env);
- struct cred *tc;
-
- LASSERT(uc != NULL);
-
- save->oc_uid = current_fsuid();
- save->oc_gid = current_fsgid();
- save->oc_cap = current_cap();
- if ((tc = prepare_creds())) {
- tc->fsuid = uc->mu_fsuid;
- tc->fsgid = uc->mu_fsgid;
- commit_creds(tc);
- }
- /* XXX not suboptimal */
- cfs_curproc_cap_unpack(uc->mu_cap);
-}
-
-static inline void
-osd_pop_ctxt(struct osd_ctxt *save)
-{
- struct cred *tc;
-
- if ((tc = prepare_creds())) {
- tc->fsuid = save->oc_uid;
- tc->fsgid = save->oc_gid;
- tc->cap_effective = save->oc_cap;
- commit_creds(tc);
- }
-}
-#endif
-
/*
* Concurrency: doesn't matter
*/
return 0;
}
-static inline int osd_qid_type(struct osd_thandle *oh, int i)
-{
- return (oh->ot_id_type & (1 << i)) ? GRPQUOTA : USRQUOTA;
-}
-
-static inline void osd_qid_set_type(struct osd_thandle *oh, int i, int type)
-{
- oh->ot_id_type |= ((type == GRPQUOTA) ? (1 << i) : 0);
-}
-
-void osd_declare_qid(struct dt_object *dt, struct osd_thandle *oh,
- int type, uid_t id, struct inode *inode)
-{
-#ifdef CONFIG_QUOTA
- int i, allocated = 0;
- struct osd_object *obj;
-
- LASSERT(dt != NULL);
- LASSERT(oh != NULL);
- LASSERTF(oh->ot_id_cnt <= OSD_MAX_UGID_CNT, "count=%u",
- oh->ot_id_cnt);
-
- /* id entry is allocated in the quota file */
- if (inode && inode->i_dquot[type] && inode->i_dquot[type]->dq_off)
- allocated = 1;
-
- for (i = 0; i < oh->ot_id_cnt; i++) {
- if (oh->ot_id_array[i] == id && osd_qid_type(oh, i) == type)
- return;
- }
-
- if (unlikely(i >= OSD_MAX_UGID_CNT)) {
- CERROR("more than %d uid/gids for a transaction?\n", i);
- return;
- }
-
- oh->ot_id_array[i] = id;
- osd_qid_set_type(oh, i, type);
- oh->ot_id_cnt++;
- obj = osd_dt_obj(dt);
- oh->ot_credits += (allocated || id == 0) ?
- 1 : LDISKFS_QUOTA_INIT_BLOCKS(osd_sb(osd_obj2dev(obj)));
-#endif
-}
-
/*
* OSD object methods.
*/
iput(inode);
inode = ERR_PTR(-ESTALE);
} else if (is_bad_inode(inode)) {
- CWARN("%s: bad inode: ino = %u\n",
- dev->od_dt_dev.dd_lu_dev.ld_obd->obd_name, id->oii_ino);
+ CWARN("%.16s: bad inode: ino = %u\n",
+ LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
iput(inode);
inode = ERR_PTR(-ENOENT);
} else {
return inode;
rc = osd_get_lma(inode, &info->oti_obj_dentry, lma);
+ if (rc == -ENODATA)
+ return inode;
+
if (rc != 0) {
- if (rc == -ENODATA) {
- CDEBUG(D_LFSCK, "inconsistent obj: NULL, %lu, "DFID"\n",
- inode->i_ino, PFID(fid));
- rc = -EREMCHG;
- }
iput(inode);
return ERR_PTR(rc);
}
iput(inode);
return ERR_PTR(EREMCHG);
}
+
return inode;
}
struct inode *inode;
struct osd_scrub *scrub;
struct scrub_file *sf;
- int result;
+ int result;
int verify = 0;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
RETURN(-ENOENT);
- if (fid_is_norm(fid)) {
- /* Search order: 1. per-thread cache. */
- if (lu_fid_eq(fid, &oic->oic_fid)) {
+ /* Search order: 1. per-thread cache. */
+ if (lu_fid_eq(fid, &oic->oic_fid)) {
+ goto iget;
+ } else if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
+ /* Search order: 2. OI scrub pending list. */
+ result = osd_oii_lookup(dev, fid, id);
+ if (result == 0)
goto iget;
- } else if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
- /* Search order: 2. OI scrub pending list. */
- result = osd_oii_lookup(dev, fid, id);
- if (result == 0)
- goto iget;
- }
-
- if (sf->sf_flags & SF_INCONSISTENT)
- verify = 1;
}
- fid_zero(&oic->oic_fid);
+ if (sf->sf_flags & SF_INCONSISTENT)
+ verify = 1;
+
+ /*
+ * Objects are created as locking anchors or place holders for objects
+ * yet to be created. No need to osd_oi_lookup() at here because FID
+ * shouldn't never be re-used, if it's really a duplicate FID from
+ * unexpected reason, we should be able to detect it later by calling
+ * do_create->osd_oi_insert()
+ */
+ if (conf != NULL && (conf->loc_flags & LOC_F_NEW) != 0)
+ GOTO(out, result = 0);
+
/* Search order: 3. OI files. */
result = osd_oi_lookup(info, dev, fid, id);
- if (result != 0 && result != -ENOENT)
- GOTO(out, result);
-
- /* If fid wasn't found in oi, inode-less object is created,
- * for which lu_object_exists() returns false. This is used
- * in a (frequent) case when objects are created as locking
- * anchors or place holders for objects yet to be created. */
- if (conf != NULL && conf->loc_flags & LOC_F_NEW) {
- if (unlikely(result == 0))
- GOTO(out, result = -EEXIST);
- else
- GOTO(out, result = 0);
- }
-
if (result == -ENOENT) {
if (!fid_is_norm(fid) ||
!ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
goto trigger;
}
+ if (result != 0)
+ GOTO(out, result);
+
iget:
if (verify == 0)
inode = osd_iget(info, dev, id);
if (IS_ERR(inode)) {
result = PTR_ERR(inode);
if (result == -ENOENT || result == -ESTALE) {
+ fid_zero(&oic->oic_fid);
result = 0;
} else if (result == -EREMCHG) {
trigger:
if (thread_is_running(&scrub->os_thread)) {
result = -EINPROGRESS;
- } else if (!scrub->os_no_scrub) {
+ } else if (!dev->od_noscrub) {
result = osd_scrub_start(dev);
- LCONSOLE_ERROR("Trigger OI scrub by RPC for "
- DFID", rc = %d\n",
- PFID(fid), result);
+ LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC "
+ "for "DFID", rc = %d [1]\n",
+ LDISKFS_SB(osd_sb(dev))->s_es->\
+ s_volume_name,PFID(fid), result);
if (result == 0 || result == -EALREADY)
result = -EINPROGRESS;
else
LINVRNT(osd_invariant(obj));
result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
- obj->oo_dt.do_body_ops = &osd_body_ops_new;
- if (result == 0) {
- if (obj->oo_inode != NULL)
- osd_object_init0(obj);
- }
- LINVRNT(osd_invariant(obj));
- return result;
+ obj->oo_dt.do_body_ops = &osd_body_ops_new;
+ if (result == 0) {
+ if (obj->oo_inode != NULL) {
+ osd_object_init0(obj);
+ } else if (fid_is_otable_it(&l->lo_header->loh_fid)) {
+ obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
+ /* LFSCK iterator object is special without inode */
+ l->lo_header->loh_attr |= LOHA_EXISTS;
+ }
+ }
+ LINVRNT(osd_invariant(obj));
+ return result;
}
/*
/* call per-transaction callbacks if any */
cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
+ LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
+ "commit callback entry: magic=%x name='%s'\n",
+ dcb->dcb_magic, dcb->dcb_name);
cfs_list_del_init(&dcb->dcb_linkage);
dcb->dcb_func(NULL, th, dcb, error);
}
th = ERR_PTR(-ENOMEM);
OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
if (oh != NULL) {
+ oh->ot_quota_trans = &oti->oti_quota_trans;
+ memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
th = &oh->ot_super;
th->th_dev = d;
th->th_result = 0;
GOTO(out, rc);
if (!osd_param_is_sane(dev, th)) {
- CWARN("%s: too many transaction credits (%d > %d)\n",
- d->dd_lu_dev.ld_obd->obd_name, oh->ot_credits,
- osd_journal(dev)->j_max_transaction_buffers);
+ CWARN("%.16s: too many transaction credits (%d > %d)\n",
+ LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
+ oh->ot_credits,
+ osd_journal(dev)->j_max_transaction_buffers);
/* XXX Limit the credits to 'max_transaction_buffers', and
* let the underlying filesystem to catch the error if
* we really need so many credits.
lu_device_get(&d->dd_lu_dev);
oh->ot_dev_link = lu_ref_add(&d->dd_lu_dev.ld_reference,
"osd-tx", th);
-
- /*
- * XXX: current rule is that we first start tx,
- * then lock object(s), but we can't use
- * this rule for data (due to locking specifics
- * in ldiskfs). also in long-term we'd like to
- * use usually-used (locks;tx) ordering. so,
- * UGLY thing is that we'll use one ordering for
- * data (ofd) and reverse ordering for metadata
- * (mdd). then at some point we'll fix the latter
- */
- if (lu_device_is_md(&d->dd_lu_dev)) {
- LASSERT(oti->oti_r_locks == 0);
- LASSERT(oti->oti_w_locks == 0);
- }
-
oti->oti_txns++;
rc = 0;
} else {
struct osd_thandle *oh;
struct osd_thread_info *oti = osd_oti_get(env);
struct osd_iobuf *iobuf = &oti->oti_iobuf;
-
+ struct qsd_instance *qsd = oti->oti_dev->od_quota_slave;
ENTRY;
oh = container_of0(th, struct osd_thandle, ot_super);
+ if (qsd != NULL)
+ /* inform the quota slave device that the transaction is
+ * stopping */
+ qsd_op_end(env, qsd, oh->ot_quota_trans);
+ oh->ot_quota_trans = NULL;
+
if (oh->ot_handle != NULL) {
handle_t *hdl = oh->ot_handle;
- hdl->h_sync = th->th_sync;
-
/*
* add commit callback
* notice we don't do this in osd_trans_start()
LASSERT(oti->oti_txns == 1);
oti->oti_txns--;
- /*
- * XXX: current rule is that we first start tx,
- * then lock object(s), but we can't use
- * this rule for data (due to locking specifics
- * in ldiskfs). also in long-term we'd like to
- * use usually-used (locks;tx) ordering. so,
- * UGLY thing is that we'll use one ordering for
- * data (ofd) and reverse ordering for metadata
- * (mdd). then at some point we'll fix the latter
- */
- if (lu_device_is_md(&th->th_dev->dd_lu_dev)) {
- LASSERT(oti->oti_r_locks == 0);
- LASSERT(oti->oti_w_locks == 0);
- }
rc = dt_txn_hook_stop(env, th);
if (rc != 0)
CERROR("Failure in transaction hook: %d\n", rc);
+
+ /* hook functions might modify th_sync */
+ hdl->h_sync = th->th_sync;
+
oh->ot_handle = NULL;
OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
rc = ldiskfs_journal_stop(hdl));
static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
{
- struct osd_thandle *oh = container_of0(th, struct osd_thandle,
- ot_super);
+ struct osd_thandle *oh = container_of0(th, struct osd_thandle,
+ ot_super);
- cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
+ LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
+ LASSERT(&dcb->dcb_func != NULL);
+ cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
- return 0;
+ return 0;
}
/*
osd_index_fini(obj);
if (inode != NULL) {
+ struct qsd_instance *qsd = osd_obj2dev(obj)->od_quota_slave;
+ qid_t uid = inode->i_uid;
+ qid_t gid = inode->i_gid;
+
iput(inode);
obj->oo_inode = NULL;
+
+ if (qsd != NULL) {
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lquota_id_info *qi = &info->oti_qi;
+
+ /* Release granted quota to master if necessary */
+ qi->lqi_id.qid_uid = uid;
+ qsd_adjust_quota(env, qsd, &qi->lqi_id, USRQUOTA);
+
+ qi->lqi_id.qid_uid = gid;
+ qsd_adjust_quota(env, qsd, &qi->lqi_id, GRPQUOTA);
+ }
}
}
d = o->oo_dir->od_container.ic_descr;
else
d = NULL;
- return (*p)(env, cookie, LUSTRE_OSD_NAME"-object@%p(i:%p:%lu/%u)[%s]",
+ return (*p)(env, cookie,
+ LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
o, o->oo_inode,
o->oo_inode ? o->oo_inode->i_ino : 0UL,
o->oo_inode ? o->oo_inode->i_generation : 0,
struct kstatfs *ksfs;
int result = 0;
+ if (unlikely(osd->od_mnt == NULL))
+ return -EINPROGRESS;
+
/* osd_lproc.c call this without env, allocate ksfs for that case */
if (unlikely(env == NULL)) {
OBD_ALLOC_PTR(ksfs);
ksfs = &osd_oti_get(env)->oti_ksfs;
}
- cfs_spin_lock(&osd->od_osfs_lock);
- /* cache 1 second */
- if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
- result = ll_do_statfs(sb, ksfs);
- if (likely(result == 0)) { /* N.B. statfs can't really fail */
- osd->od_osfs_age = cfs_time_current_64();
- statfs_pack(&osd->od_statfs, ksfs);
- }
- }
+ cfs_spin_lock(&osd->od_osfs_lock);
+ /* cache 1 second */
+ if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
+ result = sb->s_op->statfs(sb->s_root, ksfs);
+ if (likely(result == 0)) { /* N.B. statfs can't really fail */
+ osd->od_osfs_age = cfs_time_current_64();
+ statfs_pack(&osd->od_statfs, ksfs);
+ if (sb->s_flags & MS_RDONLY)
+ sfs->os_state = OS_STATE_READONLY;
+ }
+ }
if (likely(result == 0))
*sfs = osd->od_statfs;
return result;
}
+/**
+ * Estimate space needed for file creations. We assume the largest filename
+ * which is 2^64 - 1, hence a filename of 20 chars.
+ * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
+ */
+#ifdef __LDISKFS_DIR_REC_LEN
+#define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
+#else
+#define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
+#endif
+
/*
* Concurrency: doesn't access mutable data.
*/
/*
* XXX should be taken from not-yet-existing fs abstraction layer.
*/
+ param->ddp_mnt = osd_dt_dev(dev)->od_mnt;
param->ddp_max_name_len = LDISKFS_NAME_LEN;
param->ddp_max_nlink = LDISKFS_LINK_MAX;
param->ddp_block_shift = sb->s_blocksize_bits;
+ param->ddp_mount_type = LDD_MT_LDISKFS;
+ param->ddp_maxbytes = sb->s_maxbytes;
+ /* Overhead estimate should be fairly accurate, so we really take a tiny
+ * error margin which also avoids fragmenting the filesystem too much */
+ param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
+ /* inode are statically allocated, so per-inode space consumption
+ * is the space consumed by the directory entry */
+ param->ddp_inodespace = PER_OBJ_USAGE;
+ /* per-fragment overhead to be used by the client code */
+ param->ddp_grant_frag = 6 * LDISKFS_BLOCK_SIZE(sb);
param->ddp_mntopts = 0;
if (test_opt(sb, XATTR_USER))
param->ddp_mntopts |= MNTOPT_USERXATTR;
*/
static int osd_sync(const struct lu_env *env, struct dt_device *d)
{
- CDEBUG(D_HA, "syncing OSD %s\n", LUSTRE_OSD_NAME);
+ CDEBUG(D_HA, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
return ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
}
struct super_block *s = osd_sb(osd_dt_dev(d));
ENTRY;
- CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_NAME);
+ CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
RETURN(s->s_op->sync_fs(s, 0));
}
int rc;
ENTRY;
- CERROR("*** setting device %s read-only ***\n", LUSTRE_OSD_NAME);
+ CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
rc = __lvfs_set_rdonly(sb->s_bdev, LDISKFS_SB(sb)->journal_bdev);
RETURN(rc);
}
/**
- * Concurrency: serialization provided by callers.
- */
-static void osd_init_quota_ctxt(const struct lu_env *env, struct dt_device *d,
- struct dt_quota_ctxt *ctxt, void *data)
-{
- struct obd_device *obd = (void *)ctxt;
- struct vfsmount *mnt = (struct vfsmount *)data;
- ENTRY;
-
- obd->u.obt.obt_sb = mnt->mnt_root->d_inode->i_sb;
- OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
- obd->obd_lvfs_ctxt.pwdmnt = mnt;
- obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
- obd->obd_lvfs_ctxt.fs = get_ds();
-
- EXIT;
-}
-
-/**
* Note: we do not count into QUOTA here.
* If we mount with --data_journal we may need more.
*/
.dt_ro = osd_ro,
.dt_commit_async = osd_commit_async,
.dt_init_capa_ctxt = osd_init_capa_ctxt,
- .dt_init_quota_ctxt= osd_init_quota_ctxt,
};
static void osd_object_read_lock(const struct lu_env *env,
}
static struct timespec *osd_inode_time(const struct lu_env *env,
- struct inode *inode, __u64 seconds)
+ struct inode *inode, __u64 seconds)
{
- struct osd_thread_info *oti = osd_oti_get(env);
- struct timespec *t = &oti->oti_time;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct timespec *t = &oti->oti_time;
- t->tv_sec = seconds;
- t->tv_nsec = 0;
- *t = timespec_trunc(*t, get_sb_time_gran(inode->i_sb));
- return t;
+ t->tv_sec = seconds;
+ t->tv_nsec = 0;
+ *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
+ return t;
}
attr->la_flags = LDISKFS_I(inode)->i_flags;
attr->la_nlink = inode->i_nlink;
attr->la_rdev = inode->i_rdev;
- attr->la_blksize = ll_inode_blksize(inode);
- attr->la_blkbits = inode->i_blkbits;
+ attr->la_blksize = 1 << inode->i_blkbits;
+ attr->la_blkbits = inode->i_blkbits;
}
static int osd_attr_get(const struct lu_env *env,
const struct lu_attr *attr,
struct thandle *handle)
{
- struct osd_thandle *oh;
- struct osd_object *obj;
+ struct osd_thandle *oh;
+ struct osd_object *obj;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lquota_id_info *qi = &info->oti_qi;
+ long long bspace;
+ int rc = 0;
+ bool allocated;
+ ENTRY;
- LASSERT(dt != NULL);
- LASSERT(handle != NULL);
+ LASSERT(dt != NULL);
+ LASSERT(handle != NULL);
- obj = osd_dt_obj(dt);
- LASSERT(osd_invariant(obj));
+ obj = osd_dt_obj(dt);
+ LASSERT(osd_invariant(obj));
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, attr_set);
- oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
+ OSD_DECLARE_OP(oh, attr_set);
+ oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
- if (attr && attr->la_valid & LA_UID) {
- if (obj->oo_inode)
- osd_declare_qid(dt, oh, USRQUOTA, obj->oo_inode->i_uid,
- obj->oo_inode);
- osd_declare_qid(dt, oh, USRQUOTA, attr->la_uid, NULL);
- }
- if (attr && attr->la_valid & LA_GID) {
- if (obj->oo_inode)
- osd_declare_qid(dt, oh, GRPQUOTA, obj->oo_inode->i_gid,
- obj->oo_inode);
- osd_declare_qid(dt, oh, GRPQUOTA, attr->la_gid, NULL);
- }
+ if (attr == NULL || obj->oo_inode == NULL)
+ RETURN(rc);
- return 0;
+ bspace = obj->oo_inode->i_blocks;
+ bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
+ bspace = toqb(bspace);
+
+ /* Changing ownership is always preformed by super user, it should not
+ * fail with EDQUOT.
+ *
+ * We still need to call the osd_declare_qid() to calculate the journal
+ * credits for updating quota accounting files and to trigger quota
+ * space adjustment once the operation is completed.*/
+ if ((attr->la_valid & LA_UID) != 0 &&
+ attr->la_uid != obj->oo_inode->i_uid) {
+ qi->lqi_type = USRQUOTA;
+
+ /* inode accounting */
+ qi->lqi_is_blk = false;
+
+ /* one more inode for the new owner ... */
+ qi->lqi_id.qid_uid = attr->la_uid;
+ qi->lqi_space = 1;
+ allocated = (attr->la_uid == 0) ? true : false;
+ rc = osd_declare_qid(env, oh, qi, allocated, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and one less inode for the current uid */
+ qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
+ qi->lqi_space = -1;
+ rc = osd_declare_qid(env, oh, qi, true, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* block accounting */
+ qi->lqi_is_blk = true;
+
+ /* more blocks for the new owner ... */
+ qi->lqi_id.qid_uid = attr->la_uid;
+ qi->lqi_space = bspace;
+ allocated = (attr->la_uid == 0) ? true : false;
+ rc = osd_declare_qid(env, oh, qi, allocated, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and finally less blocks for the current owner */
+ qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
+ qi->lqi_space = -bspace;
+ rc = osd_declare_qid(env, oh, qi, true, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+ }
+
+ if (attr->la_valid & LA_GID &&
+ attr->la_gid != obj->oo_inode->i_gid) {
+ qi->lqi_type = GRPQUOTA;
+
+ /* inode accounting */
+ qi->lqi_is_blk = false;
+
+ /* one more inode for the new group owner ... */
+ qi->lqi_id.qid_gid = attr->la_gid;
+ qi->lqi_space = 1;
+ allocated = (attr->la_gid == 0) ? true : false;
+ rc = osd_declare_qid(env, oh, qi, allocated, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and one less inode for the current gid */
+ qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
+ qi->lqi_space = -1;
+ rc = osd_declare_qid(env, oh, qi, true, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* block accounting */
+ qi->lqi_is_blk = true;
+
+ /* more blocks for the new owner ... */
+ qi->lqi_id.qid_gid = attr->la_gid;
+ qi->lqi_space = bspace;
+ allocated = (attr->la_gid == 0) ? true : false;
+ rc = osd_declare_qid(env, oh, qi, allocated, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+
+ /* and finally less blocks for the current owner */
+ qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
+ qi->lqi_space = -bspace;
+ rc = osd_declare_qid(env, oh, qi, true, NULL);
+ if (rc == -EDQUOT || rc == -EINPROGRESS)
+ rc = 0;
+ if (rc)
+ RETURN(rc);
+ }
+
+ RETURN(rc);
}
static int osd_inode_setattr(const struct lu_env *env,
OSD_EXEC_OP(handle, attr_set);
inode = obj->oo_inode;
- if (LDISKFS_HAS_RO_COMPAT_FEATURE(inode->i_sb,
- LDISKFS_FEATURE_RO_COMPAT_QUOTA)) {
- rc = osd_quota_transfer(inode, attr);
- if (rc)
- return rc;
- } else {
-#ifdef HAVE_QUOTA_SUPPORT
- if ((attr->la_valid & LA_UID && attr->la_uid != inode->i_uid) ||
- (attr->la_valid & LA_GID && attr->la_gid != inode->i_gid)) {
- struct osd_ctxt *save = &osd_oti_get(env)->oti_ctxt;
- struct iattr iattr;
- int rc;
-
- iattr.ia_valid = 0;
- if (attr->la_valid & LA_UID)
- iattr.ia_valid |= ATTR_UID;
- if (attr->la_valid & LA_GID)
- iattr.ia_valid |= ATTR_GID;
- iattr.ia_uid = attr->la_uid;
- iattr.ia_gid = attr->la_gid;
- osd_push_ctxt(env, save);
- rc = ll_vfs_dq_transfer(inode, &iattr) ? -EDQUOT : 0;
- osd_pop_ctxt(save);
- if (rc != 0)
- return rc;
- }
-#endif
- }
+ ll_vfs_dq_init(inode);
+
+ rc = osd_quota_transfer(inode, attr);
+ if (rc)
+ return rc;
+
cfs_spin_lock(&obj->oo_guard);
rc = osd_inode_setattr(env, inode, attr);
cfs_spin_unlock(&obj->oo_guard);
struct osd_thandle *oth;
struct dt_object *parent = NULL;
struct inode *inode;
-#ifdef HAVE_QUOTA_SUPPORT
- struct osd_ctxt *save = &info->oti_ctxt;
-#endif
LINVRNT(osd_invariant(obj));
LASSERT(obj->oo_inode == NULL);
if (hint && hint->dah_parent)
parent = hint->dah_parent;
-#ifdef HAVE_QUOTA_SUPPORT
- osd_push_ctxt(info->oti_env, save);
-#endif
inode = ldiskfs_create_inode(oth->ot_handle,
parent ? osd_dt_obj(parent)->oo_inode :
osd_sb(osd)->s_root->d_inode,
mode);
-#ifdef HAVE_QUOTA_SUPPORT
- osd_pop_ctxt(save);
-#endif
if (!IS_ERR(inode)) {
/* Do not update file c/mtime in ldiskfs.
* NB: don't need any lock because no contention at this
static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
- struct dt_object *parent, cfs_umode_t child_mode)
+ struct dt_object *parent, struct dt_object *child,
+ cfs_umode_t child_mode)
{
LASSERT(ah);
if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
attr->la_valid &= ~LA_MTIME;
- if (LDISKFS_HAS_RO_COMPAT_FEATURE(inode->i_sb,
- LDISKFS_FEATURE_RO_COMPAT_QUOTA)) {
- result = osd_quota_transfer(inode, attr);
- if (result)
- return;
- } else {
-#ifdef HAVE_QUOTA_SUPPORT
- attr->la_valid &= ~(LA_UID | LA_GID);
-#endif
- }
+ result = osd_quota_transfer(inode, attr);
+ if (result)
+ return;
if (attr->la_valid != 0) {
result = osd_inode_setattr(info->oti_env, inode, attr);
struct osd_thread_info *info = osd_oti_get(env);
struct osd_inode_id *id = &info->oti_id;
struct osd_device *osd = osd_obj2dev(obj);
- struct md_ucred *uc = md_ucred(env);
LASSERT(obj->oo_inode != NULL);
- LASSERT(uc != NULL);
osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
return osd_oi_insert(info, osd, fid, id, th);
struct dt_object_format *dof,
struct thandle *handle)
{
- struct osd_thandle *oh;
+ struct osd_thandle *oh;
+ int rc;
+ ENTRY;
LASSERT(handle != NULL);
if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
OSD_DECLARE_OP(oh, insert);
oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
+ /* Reuse idle OI block may cause additional one OI block
+ * to be changed. */
+ oh->ot_credits += 1;
}
/* If this is directory, then we expect . and .. to be inserted as
* well. The one directory block always needs to be created for the
oh->ot_credits += osd_dto_credits_noquota[DTO_WRITE_BASE];
}
- if (attr) {
- osd_declare_qid(dt, oh, USRQUOTA, attr->la_uid, NULL);
- osd_declare_qid(dt, oh, GRPQUOTA, attr->la_gid, NULL);
- }
- return 0;
+ if (!attr)
+ RETURN(0);
+
+ rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
+ false, false, NULL, false);
+ RETURN(rc);
}
static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
struct osd_object *obj = osd_dt_obj(dt);
struct inode *inode = obj->oo_inode;
struct osd_thandle *oh;
-
+ int rc;
ENTRY;
oh = container_of0(th, struct osd_thandle, ot_super);
OSD_DECLARE_OP(oh, destroy);
OSD_DECLARE_OP(oh, delete);
oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_DELETE];
- oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
-
- osd_declare_qid(dt, oh, USRQUOTA, inode->i_uid, inode);
- osd_declare_qid(dt, oh, GRPQUOTA, inode->i_gid, inode);
+ /* XXX: So far, only normal fid needs to be inserted into the OI,
+ * so only normal fid needs to be removed from the OI also. */
+ if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
+ oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
+ /* Recycle idle OI leaf may cause additional three OI blocks
+ * to be changed. */
+ oh->ot_credits += 3;
+ }
- RETURN(0);
+ /* one less inode */
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
+ false, true, NULL, false);
+ if (rc)
+ RETURN(rc);
+ /* data to be truncated */
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh, true,
+ true, NULL, false);
+ RETURN(rc);
}
static int osd_object_destroy(const struct lu_env *env,
if (fl & LU_XATTR_CREATE)
fs_flags |= XATTR_CREATE;
+ ll_vfs_dq_init(inode);
dentry->d_inode = inode;
rc = inode->i_op->setxattr(dentry, name, buf->lb_buf,
buf->lb_len, fs_flags);
LASSERT(dt_object_exists(dt));
LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
- LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
const struct lu_buf *buf, const char *name,
int fl, struct thandle *handle)
{
- struct osd_thandle *oh;
-
- LASSERT(handle != NULL);
+ struct osd_thandle *oh;
- if (strcmp(name, XATTR_NAME_VERSION) == 0) {
- /* no credits for version */
- return 0;
- }
+ LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle == NULL);
+ oh = container_of0(handle, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle == NULL);
- OSD_DECLARE_OP(oh, xattr_set);
- oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
+ OSD_DECLARE_OP(oh, xattr_set);
+ if (strcmp(name, XATTR_NAME_VERSION) == 0)
+ oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
+ else
+ oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
- return 0;
+ return 0;
}
/*
OSD_EXEC_OP(handle, xattr_set);
+ ll_vfs_dq_init(inode);
dentry->d_inode = inode;
rc = inode->i_op->removexattr(dentry, name);
return rc;
static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- struct osd_thread_info *info = osd_oti_get(env);
- struct dentry *dentry = &info->oti_obj_dentry;
- struct file *file = &info->oti_file;
- int rc;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct dentry *dentry = &info->oti_obj_dentry;
+ struct file *file = &info->oti_file;
+ int rc;
- ENTRY;
+ ENTRY;
- dentry->d_inode = inode;
- file->f_dentry = dentry;
- file->f_mapping = inode->i_mapping;
- file->f_op = inode->i_fop;
- LOCK_INODE_MUTEX(inode);
- rc = file->f_op->fsync(file, dentry, 0);
- UNLOCK_INODE_MUTEX(inode);
- RETURN(rc);
+ dentry->d_inode = inode;
+ file->f_dentry = dentry;
+ file->f_mapping = inode->i_mapping;
+ file->f_op = inode->i_fop;
+ mutex_lock(&inode->i_mutex);
+ rc = file->f_op->fsync(file, dentry, 0);
+ mutex_unlock(&inode->i_mutex);
+ RETURN(rc);
}
static int osd_data_get(const struct lu_env *env, struct dt_object *dt,
return result;
result = iam_container_setup(bag);
- if (result != 0)
- goto out;
-
- if (osd_obj2dev(obj)->od_iop_mode) {
- u32 ptr = bag->ic_descr->id_ops->id_root_ptr(bag);
-
- bag->ic_root_bh = ldiskfs_bread(NULL, obj->oo_inode,
- ptr, 0, &result);
- }
-
- out:
if (result == 0)
obj->oo_dt.do_index_ops = &osd_index_iam_ops;
else
}
LINVRNT(osd_invariant(obj));
+ if (is_quota_glb_feat(feat))
+ result = osd_quota_migration(env, dt, feat);
+
return result;
}
+static int osd_otable_it_attr_get(const struct lu_env *env,
+ struct dt_object *dt,
+ struct lu_attr *attr,
+ struct lustre_capa *capa)
+{
+ attr->la_valid = 0;
+ return 0;
+}
+
static const struct dt_object_operations osd_obj_ops = {
.do_read_lock = osd_object_read_lock,
.do_write_lock = osd_object_write_lock,
.do_data_get = osd_data_get,
};
+static const struct dt_object_operations osd_obj_otable_it_ops = {
+ .do_attr_get = osd_otable_it_attr_get,
+ .do_index_try = osd_index_try,
+};
+
static int osd_index_declare_iam_delete(const struct lu_env *env,
struct dt_object *dt,
const struct dt_key *key,
struct thandle *handle,
struct lustre_capa *capa)
{
- struct osd_object *obj = osd_dt_obj(dt);
- struct osd_thandle *oh;
- struct iam_path_descr *ipd;
- struct iam_container *bag = &obj->oo_dir->od_container;
- int rc;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_thandle *oh;
+ struct iam_path_descr *ipd;
+ struct iam_container *bag = &obj->oo_dir->od_container;
+ int rc;
ENTRY;
LASSERT(oh->ot_handle != NULL);
LASSERT(oh->ot_handle->h_transaction != NULL);
+ if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
+ /* swab quota uid/gid provided by caller */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (const struct dt_key *)&oti->oti_quota_id;
+ }
+
rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
osd_ipd_put(env, bag, ipd);
LINVRNT(osd_invariant(obj));
struct thandle *handle)
{
struct osd_thandle *oh;
+ struct inode *inode;
+ int rc;
+ ENTRY;
LASSERT(dt_object_exists(dt));
LASSERT(handle != NULL);
OSD_DECLARE_OP(oh, delete);
oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
- LASSERT(osd_dt_obj(dt)->oo_inode);
- osd_declare_qid(dt, oh, USRQUOTA, osd_dt_obj(dt)->oo_inode->i_uid,
- osd_dt_obj(dt)->oo_inode);
- osd_declare_qid(dt, oh, GRPQUOTA, osd_dt_obj(dt)->oo_inode->i_gid,
- osd_dt_obj(dt)->oo_inode);
+ inode = osd_dt_obj(dt)->oo_inode;
+ LASSERT(inode);
- return 0;
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
+ true, true, NULL, false);
+ RETURN(rc);
}
static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
RETURN(-EACCES);
+ ll_vfs_dq_init(dir);
dentry = osd_child_dentry_get(env, obj,
(char *)key, strlen((char *)key));
/* got ipd now we can start iterator. */
iam_it_init(it, bag, 0, ipd);
+ if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
+ /* swab quota uid/gid provided by caller */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (const struct dt_key *)&oti->oti_quota_id;
+ }
+
rc = iam_it_get(it, (struct iam_key *)key);
if (rc >= 0) {
if (S_ISDIR(obj->oo_inode->i_mode))
iam_rec = (struct iam_rec *) rec;
iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
+
if (S_ISDIR(obj->oo_inode->i_mode))
osd_fid_unpack((struct lu_fid *) rec,
(struct osd_fid_pack *)iam_rec);
+ else if (fid_is_quota(lu_object_fid(&dt->do_lu)))
+ osd_quota_unpack(obj, rec);
}
+
iam_it_put(it);
iam_it_fini(it);
osd_ipd_put(env, bag, ipd);
struct iam_path_descr *ipd;
struct osd_thandle *oh;
struct iam_container *bag = &obj->oo_dir->od_container;
-#ifdef HAVE_QUOTA_SUPPORT
- cfs_cap_t save = cfs_curproc_cap_pack();
-#endif
struct osd_thread_info *oti = osd_oti_get(env);
- struct iam_rec *iam_rec = (struct iam_rec *)oti->oti_ldp;
+ struct iam_rec *iam_rec;
int rc;
ENTRY;
oh = container_of0(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
LASSERT(oh->ot_handle->h_transaction != NULL);
-#ifdef HAVE_QUOTA_SUPPORT
- if (ignore_quota)
- cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
- else
- cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
-#endif
- if (S_ISDIR(obj->oo_inode->i_mode))
- osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
- else
- iam_rec = (struct iam_rec *) rec;
+ if (S_ISDIR(obj->oo_inode->i_mode)) {
+ iam_rec = (struct iam_rec *)oti->oti_ldp;
+ osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
+ } else if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
+ /* pack quota uid/gid */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (const struct dt_key *)&oti->oti_quota_id;
+ /* pack quota record */
+ rec = osd_quota_pack(obj, rec, &oti->oti_quota_rec);
+ iam_rec = (struct iam_rec *)rec;
+ } else {
+ iam_rec = (struct iam_rec *)rec;
+ }
+
rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
iam_rec, ipd);
-#ifdef HAVE_QUOTA_SUPPORT
- cfs_curproc_cap_unpack(save);
-#endif
osd_ipd_put(env, bag, ipd);
LINVRNT(osd_invariant(obj));
RETURN(rc);
} else {
child->d_fsdata = NULL;
}
+ LASSERT(pobj->oo_inode);
+ ll_vfs_dq_init(pobj->oo_inode);
rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
RETURN(rc);
int rc;
ENTRY;
+ if (!fid_is_norm(fid) && !fid_is_igif(fid))
+ RETURN(0);
+
again:
rc = osd_oi_lookup(oti, dev, fid, id);
if (rc != 0 && rc != -ENOENT)
RETURN(rc);
}
- if (!scrub->os_no_scrub && ++once == 1) {
+ if (!dev->od_noscrub && ++once == 1) {
CDEBUG(D_LFSCK, "Trigger OI scrub by RPC for "DFID"\n",
PFID(fid));
rc = osd_scrub_start(dev);
- CDEBUG(D_LFSCK, "Trigger OI scrub by RPC for "DFID", rc = %d\n",
- PFID(fid), rc);
+ LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC for "DFID
+ ", rc = %d [2]\n",
+ LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
+ PFID(fid), rc);
if (rc == 0)
goto again;
}
- RETURN(rc = -EREMCHG);
+ RETURN(0);
}
/**
rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
else
osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
-
- if (rc != 0 || !fid_is_norm(fid))
+ if (rc != 0) {
+ fid_zero(&oic->oic_fid);
GOTO(out, rc);
+ }
oic->oic_fid = *fid;
if ((scrub->os_pos_current <= ino) &&
(sf->sf_flags & SF_INCONSISTENT ||
ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
sf->sf_oi_bitmap)))
- rc = osd_consistency_check(oti, dev, oic);
+ osd_consistency_check(oti, dev, oic);
} else {
rc = -ENOENT;
}
struct lu_object *luch;
struct lu_object *lo;
- luch = lu_object_find(env, ludev, fid, NULL);
+ /*
+ * at this point topdev might not exist yet
+ * (i.e. MGS is preparing profiles). so we can
+ * not rely on topdev and instead lookup with
+ * our device passed as topdev. this can't work
+ * if the object isn't cached yet (as osd doesn't
+ * allocate lu_header). IOW, the object must be
+ * in the cache, otherwise lu_object_alloc() crashes
+ * -bzzz
+ */
+ luch = lu_object_find_at(env, ludev, fid, NULL);
if (!IS_ERR(luch)) {
if (lu_object_exists(luch)) {
lo = lu_object_locate(luch->lo_header, ludev->ld_type);
struct thandle *handle)
{
struct osd_thandle *oh;
+ struct inode *inode;
+ int rc;
+ ENTRY;
LASSERT(dt_object_exists(dt));
LASSERT(handle != NULL);
OSD_DECLARE_OP(oh, insert);
oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
- LASSERT(osd_dt_obj(dt)->oo_inode);
- osd_declare_qid(dt, oh, USRQUOTA, osd_dt_obj(dt)->oo_inode->i_uid,
- osd_dt_obj(dt)->oo_inode);
- osd_declare_qid(dt, oh, GRPQUOTA, osd_dt_obj(dt)->oo_inode->i_gid,
- osd_dt_obj(dt)->oo_inode);
+ inode = osd_dt_obj(dt)->oo_inode;
+ LASSERT(inode);
- return 0;
+ /* We ignore block quota on meta pool (MDTs), so needn't
+ * calculate how many blocks will be consumed by this index
+ * insert */
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
+ true, true, NULL, false);
+ RETURN(rc);
}
/**
struct lu_fid *fid = (struct lu_fid *) rec;
const char *name = (const char *)key;
struct osd_object *child;
-#ifdef HAVE_QUOTA_SUPPORT
- cfs_cap_t save = cfs_curproc_cap_pack();
-#endif
int rc;
ENTRY;
child = osd_object_find(env, dt, fid);
if (!IS_ERR(child)) {
-#ifdef HAVE_QUOTA_SUPPORT
- if (ignore_quota)
- cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
- else
- cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
-#endif
rc = osd_ea_add_rec(env, obj, child->oo_inode, name, rec, th);
-#ifdef HAVE_QUOTA_SUPPORT
- cfs_curproc_cap_unpack(save);
-#endif
osd_object_put(env, child);
} else {
rc = PTR_ERR(child);
static int osd_it_iam_get(const struct lu_env *env,
struct dt_it *di, const struct dt_key *key)
{
- struct osd_it_iam *it = (struct osd_it_iam *)di;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_it_iam *it = (struct osd_it_iam *)di;
+
+ if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
+ /* swab quota uid/gid */
+ oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
+ key = (struct dt_key *)&oti->oti_quota_id;
+ }
return iam_it_get(&it->oi_it, (const struct iam_key *)key);
}
static struct dt_key *osd_it_iam_key(const struct lu_env *env,
const struct dt_it *di)
{
- struct osd_it_iam *it = (struct osd_it_iam *)di;
+ struct osd_thread_info *oti = osd_oti_get(env);
+ struct osd_it_iam *it = (struct osd_it_iam *)di;
+ struct osd_object *obj = it->oi_obj;
+ struct dt_key *key;
+
+ key = (struct dt_key *)iam_it_key_get(&it->oi_it);
- return (struct dt_key *)iam_it_key_get(&it->oi_it);
+ if (!IS_ERR(key) && fid_is_quota(lu_object_fid(&obj->oo_dt.do_lu))) {
+ /* swab quota uid/gid */
+ oti->oti_quota_id = le64_to_cpu(*((__u64 *)key));
+ key = (struct dt_key *)&oti->oti_quota_id;
+ }
+
+ return key;
}
/**
/* IAM does not store object type in IAM index (dir) */
osd_it_pack_dirent(lde, fid, hash, name, namelen,
0, LUDA_FID);
+ } else if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
+ iam_reccpy(&it->oi_it.ii_path.ip_leaf,
+ (struct iam_rec *)dtrec);
+ osd_quota_unpack(it->oi_obj, dtrec);
} else {
iam_reccpy(&it->oi_it.ii_path.ip_leaf,
(struct iam_rec *)dtrec);
it->oie_file.f_pos = 0;
it->oie_file.f_dentry = obj_dentry;
if (attr & LUDA_64BITHASH)
- it->oie_file.f_flags = O_64BITHASH;
+ it->oie_file.f_mode |= FMODE_64BITHASH;
else
- it->oie_file.f_flags = O_32BITHASH;
+ it->oie_file.f_mode |= FMODE_32BITHASH;
it->oie_file.f_mapping = obj->oo_inode->i_mapping;
it->oie_file.f_op = obj->oo_inode->i_fop;
it->oie_file.private_data = NULL;
it->oie_it_dirent++;
RETURN(0);
} else {
- if (it->oie_file.f_pos == LDISKFS_HTREE_EOF)
+ if (it->oie_file.f_pos == ldiskfs_get_htree_eof(&it->oie_file))
rc = +1;
else
rc = osd_ldiskfs_it_fill(env, di);
it->oie_dirent->oied_name,
it->oie_dirent->oied_namelen,
it->oie_dirent->oied_type, attr);
-
- if (!fid_is_norm(fid))
- RETURN(0);
-
oic->oic_fid = *fid;
if ((scrub->os_pos_current <= ino) &&
(sf->sf_flags & SF_INCONSISTENT ||
ldiskfs_test_bit(osd_oi_fid2idx(dev, fid), sf->sf_oi_bitmap)))
- rc = osd_consistency_check(oti, dev, oic);
+ osd_consistency_check(oti, dev, oic);
RETURN(rc);
}
return -EACCES;
rc = osd_ea_lookup_rec(env, obj, rec, key);
-
if (rc == 0)
rc = +1;
RETURN(rc);
static int osd_mount(const struct lu_env *env,
struct osd_device *o, struct lustre_cfg *cfg)
{
- struct lustre_mount_info *lmi;
- const char *dev = lustre_cfg_string(cfg, 0);
- struct lustre_disk_data *ldd;
- struct lustre_sb_info *lsi;
- int rc = 0;
-
+ const char *name = lustre_cfg_string(cfg, 0);
+ const char *dev = lustre_cfg_string(cfg, 1);
+ const char *opts;
+ unsigned long page, s_flags, lmd_flags = 0;
+ struct page *__page;
+ struct file_system_type *type;
+ char *options = NULL;
+ char *str;
+ int rc = 0;
ENTRY;
+ if (o->od_mnt != NULL)
+ RETURN(0);
+
o->od_fsops = fsfilt_get_ops(mt_str(LDD_MT_LDISKFS));
if (o->od_fsops == NULL) {
CERROR("Can't find fsfilt_ldiskfs\n");
RETURN(-ENOTSUPP);
}
- if (o->od_mount != NULL) {
- CERROR("Already mounted (%s)\n", dev);
- RETURN(-EEXIST);
- }
+ OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD);
+ if (__page == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ str = lustre_cfg_string(cfg, 2);
+ s_flags = simple_strtoul(str, NULL, 0);
+ str = strstr(str, ":");
+ if (str)
+ lmd_flags = simple_strtoul(str + 1, NULL, 0);
+ opts = lustre_cfg_string(cfg, 3);
+ page = (unsigned long)cfs_page_address(__page);
+ options = (char *)page;
+ *options = '\0';
+ if (opts == NULL)
+ strcat(options, "user_xattr,acl");
+ else
+ strcat(options, opts);
- /* get mount */
- lmi = server_get_mount(dev);
- if (lmi == NULL) {
- CERROR("Cannot get mount info for %s!\n", dev);
- RETURN(-EFAULT);
- }
+ /* Glom up mount options */
+ if (*options != '\0')
+ strcat(options, ",");
+ strlcat(options, "no_mbcache", CFS_PAGE_SIZE);
- LASSERT(lmi != NULL);
- /* save lustre_mount_info in dt_device */
- o->od_mount = lmi;
- o->od_mnt = lmi->lmi_mnt;
+ type = get_fs_type("ldiskfs");
+ if (!type) {
+ CERROR("%s: cannot find ldiskfs module\n", name);
+ GOTO(out, rc = -ENODEV);
+ }
+
+ o->od_mnt = vfs_kern_mount(type, s_flags, dev, options);
+ cfs_module_put(type->owner);
+
+ if (IS_ERR(o->od_mnt)) {
+ rc = PTR_ERR(o->od_mnt);
+ CERROR("%s: can't mount %s: %d\n", name, dev, rc);
+ o->od_mnt = NULL;
+ GOTO(out, rc);
+ }
- lsi = s2lsi(lmi->lmi_sb);
- ldd = lsi->lsi_ldd;
+ if (lvfs_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
+ CERROR("%s: underlying device %s is marked as read-only. "
+ "Setup failed\n", name, dev);
+ mntput(o->od_mnt);
+ o->od_mnt = NULL;
+ GOTO(out, rc = -EROFS);
+ }
- if (ldd->ldd_flags & LDD_F_IAM_DIR) {
+ if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
+ LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
+ CERROR("%s: device %s is mounted w/o journal\n", name, dev);
+ mntput(o->od_mnt);
+ o->od_mnt = NULL;
+ GOTO(out, rc = -EINVAL);
+ }
+
+ if (lmd_flags & LMD_FLG_IAM) {
o->od_iop_mode = 0;
- LCONSOLE_WARN("%s: OSD: IAM mode enabled\n", dev);
+ LCONSOLE_WARN("%s: OSD: IAM mode enabled\n", name);
} else
o->od_iop_mode = 1;
+ if (lmd_flags & LMD_FLG_NOSCRUB)
+ o->od_noscrub = 1;
- if (ldd->ldd_flags & LDD_F_SV_TYPE_OST) {
- rc = osd_compat_init(o);
- if (rc)
- CERROR("%s: can't initialize compats: %d\n", dev, rc);
- }
+out:
+ if (__page)
+ OBD_PAGE_FREE(__page);
+ if (rc)
+ fsfilt_put_ops(o->od_fsops);
RETURN(rc);
}
int rc;
ENTRY;
+ rc = osd_shutdown(env, osd_dev(d));
+
osd_compat_fini(osd_dev(d));
shrink_dcache_sb(osd_sb(osd_dev(d)));
RETURN (ERR_PTR(rc));
}
- if (osd_dev(d)->od_mount)
- server_put_mount(osd_dev(d)->od_mount->lmi_name,
- osd_dev(d)->od_mount->lmi_mnt);
- osd_dev(d)->od_mount = NULL;
+ if (osd_dev(d)->od_mnt) {
+ mntput(osd_dev(d)->od_mnt);
+ osd_dev(d)->od_mnt = NULL;
+ }
RETURN(NULL);
}
+static int osd_device_init0(const struct lu_env *env,
+ struct osd_device *o,
+ struct lustre_cfg *cfg)
+{
+ struct lu_device *l = osd2lu_dev(o);
+ struct osd_thread_info *info;
+ int rc;
+
+ /* if the module was re-loaded, env can loose its keys */
+ rc = lu_env_refill((struct lu_env *) env);
+ if (rc)
+ GOTO(out, rc);
+ info = osd_oti_get(env);
+ LASSERT(info);
+
+ l->ld_ops = &osd_lu_ops;
+ o->od_dt_dev.dd_ops = &osd_dt_ops;
+
+ cfs_spin_lock_init(&o->od_osfs_lock);
+ cfs_mutex_init(&o->od_otable_mutex);
+ o->od_osfs_age = cfs_time_shift_64(-1000);
+
+ o->od_capa_hash = init_capa_hash();
+ if (o->od_capa_hash == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ o->od_read_cache = 1;
+ o->od_writethrough_cache = 1;
+ o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
+
+ rc = osd_mount(env, o, cfg);
+ if (rc)
+ GOTO(out_capa, rc);
+
+ /* setup scrub, including OI files initialization */
+ rc = osd_scrub_setup(env, o);
+ if (rc < 0)
+ GOTO(out_mnt, rc);
+
+ strncpy(o->od_svname, lustre_cfg_string(cfg, 4),
+ sizeof(o->od_svname) - 1);
+
+ rc = osd_compat_init(o);
+ if (rc != 0)
+ GOTO(out_scrub, rc);
+
+ rc = lu_site_init(&o->od_site, l);
+ if (rc)
+ GOTO(out_compat, rc);
+ o->od_site.ls_bottom_dev = l;
+
+ rc = lu_site_init_finish(&o->od_site);
+ if (rc)
+ GOTO(out_site, rc);
+
+ rc = osd_procfs_init(o, o->od_svname);
+ if (rc != 0) {
+ CERROR("%s: can't initialize procfs: rc = %d\n",
+ o->od_svname, rc);
+ GOTO(out_site, rc);
+ }
+
+ LASSERT(l->ld_site->ls_linkage.next && l->ld_site->ls_linkage.prev);
+
+ /* initialize quota slave instance */
+ o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
+ o->od_proc_entry);
+ if (IS_ERR(o->od_quota_slave)) {
+ rc = PTR_ERR(o->od_quota_slave);
+ o->od_quota_slave = NULL;
+ GOTO(out_procfs, rc);
+ }
+
+ RETURN(0);
+out_procfs:
+ osd_procfs_fini(o);
+out_site:
+ lu_site_fini(&o->od_site);
+out_compat:
+ osd_compat_fini(o);
+out_scrub:
+ osd_scrub_cleanup(env, o);
+out_mnt:
+ osd_oi_fini(info, o);
+ osd_shutdown(env, o);
+ mntput(o->od_mnt);
+ o->od_mnt = NULL;
+out_capa:
+ cleanup_capa_hash(o->od_capa_hash);
+out:
+ RETURN(rc);
+}
+
static struct lu_device *osd_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
{
- struct lu_device *l;
- struct osd_device *o;
-
- OBD_ALLOC_PTR(o);
- if (o != NULL) {
- int result;
-
- result = dt_device_init(&o->od_dt_dev, t);
- if (result == 0) {
- l = osd2lu_dev(o);
- l->ld_ops = &osd_lu_ops;
- o->od_dt_dev.dd_ops = &osd_dt_ops;
- cfs_spin_lock_init(&o->od_osfs_lock);
- cfs_mutex_init(&o->od_otable_mutex);
- o->od_osfs_age = cfs_time_shift_64(-1000);
- o->od_capa_hash = init_capa_hash();
- if (o->od_capa_hash == NULL) {
- dt_device_fini(&o->od_dt_dev);
- l = ERR_PTR(-ENOMEM);
- }
- } else
- l = ERR_PTR(result);
+ struct osd_device *o;
+ int rc;
- if (IS_ERR(l))
- OBD_FREE_PTR(o);
- } else
- l = ERR_PTR(-ENOMEM);
- return l;
+ OBD_ALLOC_PTR(o);
+ if (o == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ rc = dt_device_init(&o->od_dt_dev, t);
+ if (rc == 0) {
+ rc = osd_device_init0(env, o, cfg);
+ if (rc)
+ dt_device_fini(&o->od_dt_dev);
+ }
+
+ if (unlikely(rc != 0))
+ OBD_FREE_PTR(o);
+
+ return rc == 0 ? osd2lu_dev(o) : ERR_PTR(rc);
}
static struct lu_device *osd_device_free(const struct lu_env *env,
ENTRY;
cleanup_capa_hash(o->od_capa_hash);
+ /* XXX: make osd top device in order to release reference */
+ d->ld_site->ls_top_dev = d;
+ lu_site_purge(env, d->ld_site, -1);
+ if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
+ lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
+ }
+ lu_site_fini(&o->od_site);
dt_device_fini(&o->od_dt_dev);
OBD_FREE_PTR(o);
RETURN(NULL);
err = osd_mount(env, o, cfg);
break;
case LCFG_CLEANUP:
- err = osd_shutdown(env, o);
- break;
+ lu_dev_del_linkage(d->ld_site, d);
+ err = osd_shutdown(env, o);
+ break;
default:
err = -ENOSYS;
}
static int osd_recovery_complete(const struct lu_env *env,
struct lu_device *d)
{
- RETURN(0);
+ struct osd_device *osd = osd_dev(d);
+ int rc = 0;
+ ENTRY;
+
+ if (osd->od_quota_slave == NULL)
+ RETURN(0);
+
+ /* start qsd instance on recovery completion, this notifies the quota
+ * slave code that we are about to process new requests now */
+ rc = qsd_start(env, osd->od_quota_slave);
+ RETURN(rc);
+}
+
+/*
+ * we use exports to track all osd users
+ */
+static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
+ struct obd_device *obd, struct obd_uuid *cluuid,
+ struct obd_connect_data *data, void *localdata)
+{
+ struct osd_device *osd = osd_dev(obd->obd_lu_dev);
+ struct lustre_handle conn;
+ int rc;
+ ENTRY;
+
+ CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
+
+ rc = class_connect(&conn, obd, cluuid);
+ if (rc)
+ RETURN(rc);
+
+ *exp = class_conn2export(&conn);
+
+ cfs_spin_lock(&osd->od_osfs_lock);
+ osd->od_connects++;
+ cfs_spin_unlock(&osd->od_osfs_lock);
+
+ RETURN(0);
+}
+
+/*
+ * once last export (we don't count self-export) disappeared
+ * osd can be released
+ */
+static int osd_obd_disconnect(struct obd_export *exp)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct osd_device *osd = osd_dev(obd->obd_lu_dev);
+ int rc, release = 0;
+ ENTRY;
+
+ /* Only disconnect the underlying layers on the final disconnect. */
+ cfs_spin_lock(&osd->od_osfs_lock);
+ osd->od_connects--;
+ if (osd->od_connects == 0)
+ release = 1;
+ cfs_spin_unlock(&osd->od_osfs_lock);
+
+ rc = class_disconnect(exp); /* bz 9811 */
+
+ if (rc == 0 && release)
+ class_manual_cleanup(obd);
+ RETURN(rc);
}
static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
struct lu_device *dev)
{
struct osd_device *osd = osd_dev(dev);
- int result;
+ int result = 0;
ENTRY;
- /* 1. setup scrub, including OI files initialization */
- result = osd_scrub_setup(env, osd);
- if (result < 0)
- RETURN(result);
-
- /* 2. setup quota slave instance */
- osd->od_quota_slave = qsd_init(env, osd->od_svname, &osd->od_dt_dev,
- osd->od_proc_entry);
- if (IS_ERR(osd->od_quota_slave)) {
- result = PTR_ERR(osd->od_quota_slave);
- osd->od_quota_slave = NULL;
- RETURN(result);
+ if (dev->ld_site && lu_device_is_md(dev->ld_site->ls_top_dev)) {
+ /* MDT/MDD still use old infrastructure to create
+ * special files */
+ result = llo_local_objects_setup(env, lu2md_dev(pdev),
+ lu2dt_dev(dev));
+ if (result)
+ RETURN(result);
}
- if (!lu_device_is_md(pdev))
- RETURN(0);
+ if (osd->od_quota_slave != NULL)
+ /* set up quota slave objects */
+ result = qsd_prepare(env, osd->od_quota_slave);
- /* 3. setup local objects */
- result = llo_local_objects_setup(env, lu2md_dev(pdev), lu2dt_dev(dev));
- RETURN(result);
+ RETURN(result);
}
static const struct lu_object_operations osd_lu_obj_ops = {
.ldto_device_fini = osd_device_fini
};
-static struct lu_device_type osd_device_type = {
+struct lu_device_type osd_device_type = {
.ldt_tags = LU_DEVICE_DT,
- .ldt_name = LUSTRE_OSD_NAME,
+ .ldt_name = LUSTRE_OSD_LDISKFS_NAME,
.ldt_ops = &osd_device_type_ops,
.ldt_ctx_tags = LCT_LOCAL,
};
* lprocfs legacy support.
*/
static struct obd_ops osd_obd_device_ops = {
- .o_owner = THIS_MODULE
+ .o_owner = THIS_MODULE,
+ .o_connect = osd_obd_connect,
+ .o_disconnect = osd_obd_disconnect
};
static int __init osd_mod_init(void)
osd_oi_mod_init();
lprocfs_osd_init_vars(&lvars);
return class_register_type(&osd_obd_device_ops, NULL, lvars.module_vars,
- LUSTRE_OSD_NAME, &osd_device_type);
+ LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
}
static void __exit osd_mod_exit(void)
{
- class_unregister_type(LUSTRE_OSD_NAME);
+ class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
}
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_NAME")");
+MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_LDISKFS_NAME")");
MODULE_LICENSE("GPL");
cfs_module(osd, "0.1.0", osd_mod_init, osd_mod_exit);