#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/user_namespace.h>
-#ifdef HAVE_UIDGID_HEADER
-# include <linux/uidgid.h>
-#endif
+#include <linux/uidgid.h>
/* prerequisite for linux/xattr.h */
#include <linux/types.h>
i = oti->oti_ins_cache_size * 2;
if (i == 0)
i = OSD_INS_CACHE_SIZE;
- OBD_ALLOC(idc, sizeof(*idc) * i);
+ OBD_ALLOC_PTR_ARRAY(idc, i);
if (idc == NULL)
return ERR_PTR(-ENOMEM);
if (oti->oti_ins_cache != NULL) {
memcpy(idc, oti->oti_ins_cache,
oti->oti_ins_cache_used * sizeof(*idc));
- OBD_FREE(oti->oti_ins_cache,
- oti->oti_ins_cache_used * sizeof(*idc));
+ OBD_FREE_PTR_ARRAY(oti->oti_ins_cache,
+ oti->oti_ins_cache_used);
}
oti->oti_ins_cache = idc;
oti->oti_ins_cache_size = i;
*/
/* LASSERT(current->journal_info == NULL); */
- inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
+ inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
if (IS_ERR(inode)) {
CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
id->oii_ino, PTR_ERR(inode));
struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
struct inode *parent = child->d_parent->d_inode;
struct lu_fid *fid = NULL;
+ char fidstr[FID_LEN + 1] = "unknown";
rc2 = osd_get_lma(info, parent, child->d_parent, loa);
if (!rc2) {
}
if (fid != NULL)
- CWARN("%s: directory (inode: %lu, FID: "DFID") %s "
- "maximum entry limit\n",
- osd_name(osd), parent->i_ino, PFID(fid),
- rc == -ENOSPC ? "has reached" : "is approaching");
- else
- CWARN("%s: directory (inode: %lu, FID: unknown) %s "
- "maximum entry limit\n",
- osd_name(osd), parent->i_ino,
- rc == -ENOSPC ? "has reached" : "is approaching");
+ snprintf(fidstr, sizeof(fidstr), DFID, PFID(fid));
+
+ /* below message is checked in sanity.sh test_129 */
+ if (rc == -ENOSPC) {
+ CWARN("%s: directory (inode: %lu, FID: %s) has reached max size limit\n",
+ osd_name(osd), parent->i_ino, fidstr);
+ } else {
+ rc = 0; /* ignore such error now */
+ CWARN("%s: directory (inode: %lu, FID: %s) is approaching max size limit\n",
+ osd_name(osd), parent->i_ino, fidstr);
+ }
- /* ignore such error now */
- if (rc == -ENOBUFS)
- rc = 0;
}
return rc;
*/
again:
- inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
+ inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
if (IS_ERR(inode)) {
rc = PTR_ERR(inode);
if (!trusted && (rc == -ENOENT || rc == -ESTALE))
}
struct osd_check_lmv_buf {
-#ifdef HAVE_DIR_CONTEXT
/* please keep it as first member */
struct dir_context ctx;
-#endif
struct osd_thread_info *oclb_info;
struct osd_device *oclb_dev;
struct osd_idmap_cache *oclb_oic;
};
/**
- * It is called internally by ->readdir() to filter out the
+ * It is called internally by ->iterate*() to filter out the
* local slave object's FID of the striped directory.
*
* \retval 1 found the local slave's FID
const struct file_operations *fops;
struct lmv_mds_md_v1 *lmv1;
struct osd_check_lmv_buf oclb = {
-#ifdef HAVE_DIR_CONTEXT
.ctx.actor = osd_stripe_dir_filldir,
-#endif
.oclb_info = oti,
.oclb_dev = dev,
.oclb_oic = oic,
dentry->d_sb = inode->i_sb;
filp->f_pos = 0;
filp->f_path.dentry = dentry;
- filp->f_mode = FMODE_64BITHASH;
+ filp->f_flags |= O_NOATIME;
+ filp->f_mode = FMODE_64BITHASH | FMODE_NONOTIFY;
filp->f_mapping = inode->i_mapping;
filp->f_op = fops;
filp->private_data = NULL;
- set_file_inode(filp, inode);
+ filp->f_cred = current_cred();
+ filp->f_inode = inode;
+ rc = osd_security_file_alloc(filp);
+ if (rc)
+ goto out;
do {
oclb.oclb_items = 0;
-#ifdef HAVE_DIR_CONTEXT
- oclb.ctx.pos = filp->f_pos;
-#ifdef HAVE_ITERATE_SHARED
- rc = fops->iterate_shared(filp, &oclb.ctx);
-#else
- rc = fops->iterate(filp, &oclb.ctx);
-#endif
- filp->f_pos = oclb.ctx.pos;
-#else
- rc = fops->readdir(filp, &oclb, osd_stripe_dir_filldir);
-#endif
+ rc = iterate_dir(filp, &oclb.ctx);
} while (rc >= 0 && oclb.oclb_items > 0 && !oclb.oclb_found &&
filp->f_pos != LDISKFS_HTREE_EOF_64BIT);
fops->release(inode, filp);
goto trigger;
}
+ /* -ESTALE is returned if inode of OST object doesn't exist */
+ if (result == -ESTALE &&
+ fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
+ GOTO(out, result = 0);
+ }
+
if (result)
GOTO(out, result);
LASSERT(!updated);
+ /*
+ * if two OST objects map to the same inode, and inode mode is
+ * (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666), which means it's
+ * reserved by precreate, and not written yet, in this case, don't
+ * set inode for the object whose FID mismatch, so that it can create
+ * inode and not block precreate.
+ */
+ if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) &&
+ inode->i_mode == (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666)) {
+ obj->oo_inode = NULL;
+ GOTO(out, result = 0);
+ }
+
result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
/*
* "result == -ENOENT" means the cached OI mapping has been removed
LINVRNT(osd_invariant(obj));
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_LLOG_UMOUNT_RACE) &&
+ cfs_fail_val == 2) {
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct osd_idmap_cache *oic = &info->oti_cache;
+ /* invalidate thread cache */
+ memset(&oic->oic_fid, 0, sizeof(oic->oic_fid));
+ }
if (fid_is_otable_it(&l->lo_header->loh_fid)) {
obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
l->lo_header->loh_attr |= LOHA_EXISTS;
result = 0;
}
}
+ obj->oo_dirent_count = LU_DIRENT_COUNT_UNSET;
LINVRNT(osd_invariant(obj));
return result;
size_t namelen = strlen(name);
int rc;
- ENTRY;
-
rcu_read_lock();
list_for_each_entry_rcu(tmp, &obj->oo_xattr_list, oxe_list) {
if (namelen == tmp->oxe_namelen &&
GOTO(out, rc = -ERANGE);
memcpy(buf->lb_buf, &oxe->oxe_buf[namelen + 1], rc);
- EXIT;
out:
rcu_read_unlock();
dt_object_fini(&obj->oo_dt);
if (obj->oo_hl_head != NULL)
ldiskfs_htree_lock_head_free(obj->oo_hl_head);
+ /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
OBD_FREE_PTR(obj);
if (unlikely(h)) {
lu_object_header_fini(h);
- OBD_FREE_PTR(h);
+ OBD_FREE_PRE(h, sizeof(*h), "kfreed");
+ kfree_rcu(h, loh_rcu);
}
}
static void osd_trans_commit_cb(struct super_block *sb,
struct ldiskfs_journal_cb_entry *jcb, int error)
{
- struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
+ struct osd_thandle *oh = container_of(jcb, struct osd_thandle, ot_jcb);
struct thandle *th = &oh->ot_super;
struct lu_device *lud = &th->th_dev->dd_lu_dev;
struct dt_txn_commit_cb *dcb, *tmp;
OBD_FREE_PTR(oh);
}
-#ifndef HAVE_SB_START_WRITE
-# define sb_start_write(sb) do {} while (0)
-# define sb_end_write(sb) do {} while (0)
-#endif
-
static struct thandle *osd_trans_create(const struct lu_env *env,
struct dt_device *d)
{
struct osd_thread_info *oti = osd_oti_get(env);
struct osd_thandle *oh;
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of(th, struct osd_thandle, ot_super);
LASSERT(oh != NULL);
CWARN(" create: %u/%u/%u, destroy: %u/%u/%u\n",
LASSERT(current->journal_info == NULL);
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of(th, struct osd_thandle, ot_super);
LASSERT(oh != NULL);
LASSERT(oh->ot_handle == NULL);
*/
if (last_credits != oh->ot_credits &&
time_after(jiffies, last_printed +
- msecs_to_jiffies(60 * MSEC_PER_SEC)) &&
+ cfs_time_seconds(60)) &&
osd_transaction_size(dev) > 512) {
CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
oh->ot_credits, osd_transaction_size(dev));
struct osd_device *osd = osd_dt_dev(th->th_dev);
struct qsd_instance *qsd = osd_def_qsd(osd);
struct lquota_trans *qtrans;
- struct list_head truncates = LIST_HEAD_INIT(truncates);
+ LIST_HEAD(truncates);
int rc = 0, remove_agents = 0;
ENTRY;
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of(th, struct osd_thandle, ot_super);
remove_agents = oh->ot_remove_agents;
OBD_FREE_PTR(oh);
}
- osd_trunc_unlock_all(&truncates);
+ osd_trunc_unlock_all(env, &truncates);
/* inform the quota slave device that the transaction is stopping */
qsd_op_end(env, qsd, qtrans);
static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
{
- struct osd_thandle *oh = container_of0(th, struct osd_thandle,
- ot_super);
+ struct osd_thandle *oh = container_of(th, struct osd_thandle,
+ ot_super);
LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
LASSERT(&dcb->dcb_func != NULL);
if (!inode)
return;
+ if (osd_has_index(obj) && obj->oo_dt.do_index_ops == &osd_index_iam_ops)
+ ldiskfs_set_inode_flag(inode, LDISKFS_INODE_JOURNAL_DATA);
+
uid = i_uid_read(inode);
gid = i_gid_read(inode);
projid = i_projid_read(inode);
* Concurrency: shouldn't matter.
*/
int osd_statfs(const struct lu_env *env, struct dt_device *d,
- struct obd_statfs *sfs)
+ struct obd_statfs *sfs, struct obd_statfs_info *info)
{
struct osd_device *osd = osd_dt_dev(d);
struct super_block *sb = osd_sb(osd);
goto out;
statfs_pack(sfs, ksfs);
- if (unlikely(sb->s_flags & MS_RDONLY))
- sfs->os_state |= OS_STATE_READONLY;
+ if (unlikely(sb->s_flags & SB_RDONLY))
+ sfs->os_state |= OS_STATFS_READONLY;
+
+ sfs->os_state |= osd->od_nonrotational ? OS_STATFS_NONROT : 0;
+
if (ldiskfs_has_feature_extents(sb))
sfs->os_maxbytes = sb->s_maxbytes;
else
*
* Reserve 0.78% of total space, at least 8MB for small filesystems.
*/
- CLASSERT(OSD_STATFS_RESERVED > LDISKFS_MAX_BLOCK_SIZE);
+ BUILD_BUG_ON(OSD_STATFS_RESERVED <= LDISKFS_MAX_BLOCK_SIZE);
reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
*/
param->ddp_inodespace = PER_OBJ_USAGE;
/*
- * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
- * = 128MB) which is unlikely to be hit in real life. Report a smaller
- * maximum length to not under count the actual number of extents
- * needed for writing a file.
+ * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
+ * is 128MB) which is unlikely to be hit in real life. Report a smaller
+ * maximum length to not under-count the actual number of extents
+ * needed for writing a file if there are sub-optimal block allocations.
*/
- param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 2;
+ param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 1;
/* worst-case extent insertion metadata overhead */
param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
param->ddp_mntopts = 0;
#endif
param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
- if (param->ddp_max_ea_size > OBD_MAX_EA_SIZE - ea_overhead)
- param->ddp_max_ea_size = OBD_MAX_EA_SIZE - ea_overhead;
+ if (param->ddp_max_ea_size > OBD_MAX_EA_SIZE)
+ param->ddp_max_ea_size = OBD_MAX_EA_SIZE;
/*
* Preferred RPC size for efficient disk IO. 4MB shows good
RETURN(rc);
}
-/* Our own copy of the set readonly functions if present, or NU if not. */
-static int (*priv_dev_set_rdonly)(struct block_device *bdev);
-static int (*priv_dev_check_rdonly)(struct block_device *bdev);
-/* static int (*priv_dev_clear_rdonly)(struct block_device *bdev); */
+static int (*priv_security_file_alloc)(struct file *file);
+
+int osd_security_file_alloc(struct file *file)
+{
+ if (priv_security_file_alloc)
+ return priv_security_file_alloc(file);
+ return 0;
+}
/*
* Concurrency: shouldn't matter.
ENTRY;
- if (priv_dev_set_rdonly) {
- struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
-
- rc = 0;
- CERROR("*** setting %s read-only ***\n",
- osd_dt_dev(d)->od_svname);
-
- if (sb->s_op->freeze_fs) {
- rc = sb->s_op->freeze_fs(sb);
- if (rc)
- goto out;
- }
-
- if (jdev && (jdev != dev)) {
- CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
- (long)jdev);
- priv_dev_set_rdonly(jdev);
- }
- CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
- priv_dev_set_rdonly(dev);
-
- if (sb->s_op->unfreeze_fs)
- sb->s_op->unfreeze_fs(sb);
- }
-
-out:
- if (rc)
- CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
- osd_dt_dev(d)->od_svname, (long)dev, rc);
+ CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
+ osd_dt_dev(d)->od_svname, (long)dev, rc);
RETURN(rc);
}
return obj->oo_owner == env;
}
-static struct timespec *osd_inode_time(const struct lu_env *env,
- struct inode *inode, __u64 seconds)
-{
- struct osd_thread_info *oti = osd_oti_get(env);
- struct timespec *t = &oti->oti_time;
-
- t->tv_sec = seconds;
- t->tv_nsec = 0;
- *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
- return t;
-}
-
static void osd_inode_getattr(const struct lu_env *env,
struct inode *inode, struct lu_attr *attr)
{
attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
- LA_BLKSIZE | LA_TYPE;
+ LA_BLKSIZE | LA_TYPE | LA_BTIME;
attr->la_atime = inode->i_atime.tv_sec;
attr->la_mtime = inode->i_mtime.tv_sec;
attr->la_ctime = inode->i_ctime.tv_sec;
+ attr->la_btime = LDISKFS_I(inode)->i_crtime.tv_sec;
attr->la_mode = inode->i_mode;
attr->la_size = i_size_read(inode);
attr->la_blocks = inode->i_blocks;
attr->la_flags |= LUSTRE_PROJINHERIT_FL;
}
+static int osd_dirent_count(const struct lu_env *env, struct dt_object *dt,
+ u64 *count)
+{
+ struct osd_object *obj = osd_dt_obj(dt);
+ const struct dt_it_ops *iops;
+ struct dt_it *it;
+ int rc;
+
+ ENTRY;
+
+ LASSERT(S_ISDIR(obj->oo_inode->i_mode));
+ LASSERT(fid_is_namespace_visible(lu_object_fid(&obj->oo_dt.do_lu)));
+
+ if (obj->oo_dirent_count != LU_DIRENT_COUNT_UNSET) {
+ *count = obj->oo_dirent_count;
+ RETURN(0);
+ }
+
+ /* directory not initialized yet */
+ if (!dt->do_index_ops) {
+ *count = 0;
+ RETURN(0);
+ }
+
+ iops = &dt->do_index_ops->dio_it;
+ it = iops->init(env, dt, LUDA_64BITHASH);
+ if (IS_ERR(it))
+ RETURN(PTR_ERR(it));
+
+ rc = iops->load(env, it, 0);
+ if (rc < 0) {
+ if (rc == -ENODATA) {
+ rc = 0;
+ *count = 0;
+ }
+ GOTO(out, rc);
+ }
+ if (rc > 0)
+ rc = iops->next(env, it);
+
+ for (*count = 0; rc == 0 || rc == -ESTALE; rc = iops->next(env, it)) {
+ if (rc == -ESTALE)
+ continue;
+
+ if (iops->key_size(env, it) == 0)
+ continue;
+
+ (*count)++;
+ }
+ if (rc == 1) {
+ obj->oo_dirent_count = *count;
+ rc = 0;
+ }
+out:
+ iops->put(env, it);
+ iops->fini(env, it);
+
+ RETURN(rc);
+}
+
static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
struct lu_attr *attr)
{
struct osd_object *obj = osd_dt_obj(dt);
+ int rc = 0;
if (unlikely(!dt_object_exists(dt)))
return -ENOENT;
attr->la_valid |= LA_FLAGS;
attr->la_flags |= LUSTRE_ORPHAN_FL;
}
+ if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
+ attr->la_valid |= LA_FLAGS;
+ attr->la_flags |= LUSTRE_ENCRYPT_FL;
+ }
spin_unlock(&obj->oo_guard);
- return 0;
+ if (S_ISDIR(obj->oo_inode->i_mode) &&
+ fid_is_namespace_visible(lu_object_fid(&dt->do_lu)))
+ rc = osd_dirent_count(env, dt, &attr->la_dirent_count);
+
+ return rc;
}
static int osd_declare_attr_qid(const struct lu_env *env,
obj = osd_dt_obj(dt);
LASSERT(osd_invariant(obj));
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
RETURN(rc);
gid = i_gid_read(obj->oo_inode);
+ CDEBUG(D_QUOTA, "declare uid %d -> %d gid %d -> %d\n", uid,
+ attr->la_uid, gid, attr->la_gid);
enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
- rc = osd_declare_attr_qid(env, obj, oh, bspace,
- i_gid_read(obj->oo_inode),
+ rc = osd_declare_attr_qid(env, obj, oh, bspace, gid,
attr->la_gid, enforce, GRPQUOTA,
ignore_edquot);
if (rc)
return 0;
if (bits & LA_ATIME)
- inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
+ inode->i_atime = osd_inode_time(inode, attr->la_atime);
if (bits & LA_CTIME)
- inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
+ inode->i_ctime = osd_inode_time(inode, attr->la_ctime);
if (bits & LA_MTIME)
- inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
+ inode->i_mtime = osd_inode_time(inode, attr->la_mtime);
if (bits & LA_SIZE) {
spin_lock(&inode->i_lock);
LDISKFS_I(inode)->i_disksize = attr->la_size;
}
#ifdef HAVE_PROJECT_QUOTA
-static int osd_transfer_project(struct inode *inode, __u32 projid)
+static int osd_transfer_project(struct inode *inode, __u32 projid,
+ struct thandle *handle)
{
struct super_block *sb = inode->i_sb;
struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
raw_inode = ldiskfs_raw_inode(&iloc);
if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
- err = -EOVERFLOW;
- brelse(iloc.bh);
- return err;
+ struct osd_thandle *oh =
+ container_of0(handle, struct osd_thandle,
+ ot_super);
+ /**
+ * try to expand inode size automatically.
+ */
+ ldiskfs_mark_inode_dirty(oh->ot_handle, inode);
+ if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
+ err = -EOVERFLOW;
+ brelse(iloc.bh);
+ return err;
+ }
}
brelse(iloc.bh);
}
#endif
-static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
+static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr,
+ struct thandle *handle)
{
int rc;
(attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
struct iattr iattr;
- ll_vfs_dq_init(inode);
+ CDEBUG(D_QUOTA,
+ "executing dquot_transfer inode %ld uid %d -> %d gid %d -> %d\n",
+ inode->i_ino, i_uid_read(inode), attr->la_uid,
+ i_gid_read(inode), attr->la_gid);
+
+ dquot_initialize(inode);
iattr.ia_valid = 0;
if (attr->la_valid & LA_UID)
iattr.ia_valid |= ATTR_UID;
iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
- rc = ll_vfs_dq_transfer(inode, &iattr);
+ rc = dquot_transfer(inode, &iattr);
if (rc) {
CERROR("%s: quota transfer failed: rc = %d. Is quota "
"enforcement enabled on the ldiskfs "
if (attr->la_valid & LA_PROJID &&
attr->la_projid != i_projid_read(inode)) {
#ifdef HAVE_PROJECT_QUOTA
- rc = osd_transfer_project(inode, attr->la_projid);
+ rc = osd_transfer_project(inode, attr->la_projid, handle);
#else
rc = -ENOTSUPP;
#endif
if (unlikely(ipd == NULL))
RETURN(-ENOMEM);
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
rc = iam_update(oh->ot_handle, bag,
(const struct iam_key *)fid1,
(const struct iam_rec *)id, ipd);
inode = obj->oo_inode;
- rc = osd_quota_transfer(inode, attr);
+ rc = osd_quota_transfer(inode, attr, handle);
if (rc)
return rc;
if (rc != 0)
GOTO(out, rc);
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ osd_dirty_inode(inode, I_DIRTY_DATASYNC);
+
+ osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
if (!(attr->la_valid & LA_FLAGS))
GOTO(out, rc);
lma->lma_incompat |=
lustre_to_lma_flags(attr->la_flags);
lustre_lma_swab(lma);
+
+ osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
+
rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
lma, sizeof(*lma), XATTR_REPLACE);
if (rc != 0) {
osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
}
out:
- osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
return rc;
}
oth = container_of(th, struct osd_thandle, ot_super);
LASSERT(oth->ot_handle->h_transaction != NULL);
+ if (fid_is_namespace_visible(lu_object_fid(&obj->oo_dt.do_lu)))
+ obj->oo_dirent_count = 0;
result = osd_mkfile(info, obj, mode, hint, th, attr);
return result;
}
static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
- struct lu_attr *attr, struct dt_object_format *dof)
+ struct lu_attr *attr, struct dt_object_format *dof,
+ struct thandle *handle)
{
struct inode *inode = obj->oo_inode;
__u64 valid = attr->la_valid;
if ((valid & LA_MTIME) && (attr->la_mtime == inode->i_mtime.tv_sec))
attr->la_valid &= ~LA_MTIME;
- result = osd_quota_transfer(inode, attr);
+ result = osd_quota_transfer(inode, attr, handle);
if (result)
return;
* enabled on ldiskfs (lquota takes care of it).
*/
LASSERTF(result == 0, "%d\n", result);
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ osd_dirty_inode(inode, I_DIRTY_DATASYNC);
}
attr->la_valid = valid;
}
if (likely(result == 0)) {
- osd_attr_init(info, obj, attr, dof);
+ osd_attr_init(info, obj, attr, dof, th);
osd_object_init0(obj);
}
LASSERT(obj->oo_inode != NULL);
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle);
osd_trans_exec_op(env, th, OSD_OT_INSERT);
osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle,
OI_CHECK_FLD, NULL);
+ if (CFS_FAIL_CHECK(OBD_FAIL_OSD_DUPLICATE_MAP) && osd->od_is_ost) {
+ struct lu_fid next_fid = *fid;
+
+ /* insert next object in advance, and map to the same inode */
+ next_fid.f_oid++;
+ if (next_fid.f_oid != 0) {
+ osd_trans_exec_op(env, th, OSD_OT_INSERT);
+ osd_oi_insert(info, osd, &next_fid, id, oh->ot_handle,
+ OI_CHECK_FLD, NULL);
+ osd_trans_exec_check(env, th, OSD_OT_INSERT);
+ }
+ }
+
osd_trans_exec_check(env, th, OSD_OT_INSERT);
return rc;
LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
/*
*/
osd_trans_declare_op(env, oh, OSD_OT_INSERT,
osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
+ if (CFS_FAIL_CHECK(OBD_FAIL_OSD_DUPLICATE_MAP))
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
/* will help to find FID->ino mapping at dt_insert() */
rc = osd_idc_find_and_init(env, osd_obj2dev(osd_dt_obj(dt)),
if (inode == NULL)
RETURN(-ENOENT);
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
ENTRY;
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle);
LASSERT(inode);
LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
spin_lock(&obj->oo_guard);
clear_nlink(inode);
spin_unlock(&obj->oo_guard);
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ osd_dirty_inode(inode, I_DIRTY_DATASYNC);
}
osd_trans_exec_op(env, th, OSD_OT_DESTROY);
* debugging if we need to determine where this symlink came from.
*/
if (S_ISLNK(type)) {
- CLASSERT(LDISKFS_N_BLOCKS * 4 >= FID_LEN + 1);
+ BUILD_BUG_ON(LDISKFS_N_BLOCKS * 4 < FID_LEN + 1);
rc = snprintf((char *)LDISKFS_I(local)->i_data,
LDISKFS_N_BLOCKS * 4, DFID, PFID(fid));
#ifdef HAVE_PROJECT_QUOTA
if (LDISKFS_I(pobj->oo_inode)->i_flags & LUSTRE_PROJINHERIT_FL &&
i_projid_read(pobj->oo_inode) != 0) {
- rc = osd_transfer_project(local, 0);
+ rc = osd_transfer_project(local, 0, th);
if (rc) {
CERROR("%s: quota transfer failed: rc = %d. Is project "
"quota enforcement enabled on the ldiskfs "
struct osd_thread_info *info = osd_oti_get(env);
struct osd_obj_orphan *oor, *tmp;
struct osd_inode_id id;
- struct list_head list;
+ LIST_HEAD(list);
struct inode *inode;
struct lu_fid fid;
handle_t *jh;
__u32 ino;
- INIT_LIST_HEAD(&list);
-
spin_lock(&osd->od_osfs_lock);
list_for_each_entry_safe(oor, tmp, &osd->od_orphan_list, oor_list) {
- if (oor->oor_env == env) {
- list_del(&oor->oor_list);
- list_add(&oor->oor_list, &list);
- }
+ if (oor->oor_env == env)
+ list_move(&oor->oor_list, &list);
}
spin_unlock(&osd->od_osfs_lock);
/* it's possible that object doesn't exist yet */
LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
LASSERT(osd_is_write_locked(env, obj));
LASSERT(th != NULL);
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
}
spin_unlock(&obj->oo_guard);
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ osd_dirty_inode(inode, I_DIRTY_DATASYNC);
LINVRNT(osd_invariant(obj));
osd_trans_exec_check(env, th, OSD_OT_REF_ADD);
LASSERT(!dt_object_remote(dt));
LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
LASSERT(osd_is_write_locked(env, obj));
LASSERT(th != NULL);
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
ldiskfs_dec_count(oh->ot_handle, inode);
spin_unlock(&obj->oo_guard);
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ osd_dirty_inode(inode, I_DIRTY_DATASYNC);
LINVRNT(osd_invariant(obj));
osd_trans_exec_check(env, th, OSD_OT_REF_DEL);
LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
if (strcmp(name, XATTR_NAME_LMA) == 0) {
RETURN(fl);
/* Remove old PFID EA entry firstly. */
- ll_vfs_dq_init(inode);
- rc = osd_removexattr(dentry, inode, XATTR_NAME_FID);
+ dquot_initialize(inode);
+ rc = ll_vfs_removexattr(dentry, inode, XATTR_NAME_FID);
if (rc == -ENODATA) {
if ((fl & LU_XATTR_REPLACE) && !(fl & LU_XATTR_CREATE))
RETURN(rc);
ENTRY;
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
rc = linkea_init_with_rec(&ldata);
* Version is set after all inode operations are finished,
* so we should mark it dirty here
*/
- ll_dirty_inode(inode, I_DIRTY_DATASYNC);
+ osd_dirty_inode(inode, I_DIRTY_DATASYNC);
RETURN(0);
}
LASSERT(!dt_object_remote(dt));
LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
obj->oo_pfid_in_lma = 0;
}
} else {
- ll_vfs_dq_init(inode);
+ dquot_initialize(inode);
dentry->d_inode = inode;
dentry->d_sb = inode->i_sb;
- rc = osd_removexattr(dentry, inode, name);
+ rc = ll_vfs_removexattr(dentry, inode, name);
}
osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
file->f_path.dentry = dentry;
file->f_mapping = inode->i_mapping;
file->f_op = inode->i_fop;
- set_file_inode(file, inode);
+ file->f_inode = inode;
- rc = ll_vfs_fsync_range(file, start, end, 0);
+ rc = vfs_fsync_range(file, start, end, 0);
RETURN(rc);
}
{
struct osd_thandle *oh;
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
/* Recycle may cause additional three blocks to be changed. */
if (unlikely(ipd == NULL))
RETURN(-ENOMEM);
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
LASSERT(oh->ot_handle->h_transaction != NULL);
LASSERT(!dt_object_remote(dt));
LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
credits = osd_dto_credits_noquota[DTO_INDEX_DELETE];
LASSERT(oh->ot_handle != NULL);
LASSERT(oh->ot_handle->h_transaction != NULL);
- ll_vfs_dq_init(dir);
+ dquot_initialize(dir);
dentry = osd_child_dentry_get(env, obj,
(char *)key, strlen((char *)key));
} else {
rc = PTR_ERR(bh);
}
+
+ if (!rc && fid_is_namespace_visible(lu_object_fid(&dt->do_lu)) &&
+ obj->oo_dirent_count != LU_DIRENT_COUNT_UNSET) {
+ /* NB, dirent count may not be accurate, because it's counted
+ * without lock.
+ */
+ if (obj->oo_dirent_count)
+ obj->oo_dirent_count--;
+ else
+ obj->oo_dirent_count = LU_DIRENT_COUNT_UNSET;
+ }
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
up_write(&obj->oo_ext_idx_sem);
-
GOTO(out, rc);
out:
LASSERT(osd_invariant(obj));
LASSERT(handle != NULL);
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
osd_trans_declare_op(env, oh, OSD_OT_INSERT,
if (unlikely(ipd == NULL))
RETURN(-ENOMEM);
- oh = container_of0(th, struct osd_thandle, ot_super);
+ oh = container_of(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
LASSERT(oh->ot_handle->h_transaction != NULL);
if (S_ISDIR(obj->oo_inode->i_mode)) {
osd_get_ldiskfs_dirent_param(ldp, fid);
child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
child->d_fsdata = (void *)ldp;
- ll_vfs_dq_init(pobj->oo_inode);
+ dquot_initialize(pobj->oo_inode);
rc = osd_ldiskfs_add_entry(info, osd_obj2dev(pobj), oth->ot_handle,
child, cinode, hlock);
if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_TYPE)) {
hlock, th);
}
}
+ if (!rc && fid_is_namespace_visible(lu_object_fid(&pobj->oo_dt.do_lu))
+ && pobj->oo_dirent_count != LU_DIRENT_COUNT_UNSET)
+ pobj->oo_dirent_count++;
+
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
LASSERT(fid != NULL);
LASSERT(rec1->rec_type != 0);
- oh = container_of0(handle, struct osd_thandle, ot_super);
+ oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
credits = osd_dto_credits_noquota[DTO_INDEX_INSERT];
iput(child_inode);
LASSERT(osd_invariant(obj));
osd_trans_exec_check(env, th, OSD_OT_INSERT);
+
RETURN(rc);
}
file->f_path.dentry = obj_dentry;
file->f_mapping = obj->oo_inode->i_mapping;
file->f_op = obj->oo_inode->i_fop;
- set_file_inode(file, obj->oo_inode);
+ file->f_inode = obj->oo_inode;
lu_object_get(lo);
RETURN((struct dt_it *)oie);
}
struct osd_filldir_cbs {
-#ifdef HAVE_DIR_CONTEXT
struct dir_context ctx;
-#endif
struct osd_it_ea *it;
};
/**
- * It is called internally by ->readdir(). It fills the
+ * It is called internally by ->iterate*(). It fills the
* iterator's in-memory data structure with required
* information i.e. name, namelen, rec_size etc.
*
}
/**
- * Calls ->readdir() to load a directory entry at a time
+ * Calls ->iterate*() to load a directory entry at a time
* and stored it in iterator's in-memory data structure.
*
* \param di iterator's in memory structure
struct file *filp = &it->oie_file;
int rc = 0;
struct osd_filldir_cbs buf = {
-#ifdef HAVE_DIR_CONTEXT
.ctx.actor = osd_ldiskfs_filldir,
-#endif
.it = it
};
down_read(&obj->oo_ext_idx_sem);
}
-#ifdef HAVE_DIR_CONTEXT
- buf.ctx.pos = filp->f_pos;
-#ifdef HAVE_ITERATE_SHARED
- rc = inode->i_fop->iterate_shared(filp, &buf.ctx);
-#else
- rc = inode->i_fop->iterate(filp, &buf.ctx);
-#endif
- filp->f_pos = buf.ctx.pos;
-#else
- rc = inode->i_fop->readdir(filp, &buf, osd_ldiskfs_filldir);
-#endif
+ filp->f_cred = current_cred();
+ rc = osd_security_file_alloc(filp);
+ if (rc)
+ RETURN(rc);
+
+ filp->f_flags |= O_NOATIME;
+ filp->f_mode |= FMODE_NONOTIFY;
+ rc = iterate_dir(filp, &buf.ctx);
+ if (rc)
+ RETURN(rc);
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
}
/**
- * It calls osd_ldiskfs_it_fill() which will use ->readdir()
+ * It calls osd_ldiskfs_it_fill() which will use ->iterate*()
* to load a directory entry at a time and stored it in
* iterator's in-memory data structure.
*
return it->oie_dirent->oied_namelen;
}
+#if defined LDISKFS_DIR_ENTRY_LEN && defined LDISKFS_DIR_ENTRY_LEN_
+#undef LDISKFS_DIR_REC_LEN
+#define LDISKFS_DIR_REC_LEN(de) LDISKFS_DIR_ENTRY_LEN_((de))
+#endif
+
static inline bool osd_dotdot_has_space(struct ldiskfs_dir_entry_2 *de)
{
if (LDISKFS_DIR_REC_LEN(de) >=
ldp = (struct ldiskfs_dentry_param *)osd_oti_get(env)->oti_ldp;
osd_get_ldiskfs_dirent_param(ldp, fid);
dentry->d_fsdata = (void *)ldp;
- ll_vfs_dq_init(dir);
+ dquot_initialize(dir);
rc = osd_ldiskfs_add_entry(info, dev, jh, dentry, inode, hlock);
/*
* It is too bad, we cannot reinsert the name entry back.
}
/**
- * It calls osd_ldiskfs_it_fill() which will use ->readdir()
+ * It calls osd_ldiskfs_it_fill() which will use ->iterate*()
* to load a directory entry at a time and stored it i inn,
* in iterator's in-memory data structure.
*
if (info->oti_dio_pages) {
int i;
for (i = 0; i < PTLRPC_MAX_BRW_PAGES; i++) {
- if (info->oti_dio_pages[i])
- __free_page(info->oti_dio_pages[i]);
+ struct page *page = info->oti_dio_pages[i];
+ if (page) {
+ LASSERT(PagePrivate2(page));
+ LASSERT(PageLocked(page));
+ ClearPagePrivate2(page);
+ unlock_page(page);
+ __free_page(page);
+ }
}
- OBD_FREE(info->oti_dio_pages,
- sizeof(struct page *) * PTLRPC_MAX_BRW_PAGES);
+ OBD_FREE_PTR_ARRAY(info->oti_dio_pages, PTLRPC_MAX_BRW_PAGES);
}
if (info->oti_inode != NULL)
lu_buf_free(&info->oti_big_buf);
if (idc != NULL) {
LASSERT(info->oti_ins_cache_size > 0);
- OBD_FREE(idc, sizeof(*idc) * info->oti_ins_cache_size);
+ OBD_FREE_PTR_ARRAY(idc, info->oti_ins_cache_size);
info->oti_ins_cache = NULL;
info->oti_ins_cache_size = 0;
}
static int osd_fid_init(const struct lu_env *env, struct osd_device *osd)
{
struct seq_server_site *ss = osd_seq_site(osd);
- int rc;
+ int rc = 0;
ENTRY;
if (osd->od_cl_seq == NULL)
RETURN(-ENOMEM);
- rc = seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
- osd->od_svname, ss->ss_server_seq);
- if (rc != 0) {
- OBD_FREE_PTR(osd->od_cl_seq);
- osd->od_cl_seq = NULL;
- RETURN(rc);
- }
+ seq_client_init(osd->od_cl_seq, NULL, LUSTRE_SEQ_METADATA,
+ osd->od_svname, ss->ss_server_seq);
if (ss->ss_node_id == 0) {
/*
struct osd_thread_info *info = osd_oti_get(env);
struct lu_fid *fid = &info->oti_fid;
struct inode *inode;
- int rc = 0, force_over_512tb = 0;
+ int rc = 0, force_over_1024tb = 0;
ENTRY;
#endif
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 0, 53, 0)
if (opts != NULL && strstr(opts, "force_over_128tb") != NULL) {
- CWARN("force_over_128tb option is deprecated. "
- "Filesystems less than 512TB can be created without any "
- "force options. Use force_over_512tb option for "
- "filesystems greater than 512TB.\n");
+ CWARN("force_over_128tb option is deprecated. Filesystems smaller than 1024TB can be created without any force option. Use force_over_1024tb option for filesystems larger than 1024TB.\n");
}
#endif
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 1, 53, 0)
if (opts != NULL && strstr(opts, "force_over_256tb") != NULL) {
- CWARN("force_over_256tb option is deprecated. "
- "Filesystems less than 512TB can be created without any "
- "force options. Use force_over_512tb option for "
- "filesystems greater than 512TB.\n");
+ CWARN("force_over_256tb option is deprecated. Filesystems smaller than 1024TB can be created without any force options. Use force_over_1024tb option for filesystems larger than 1024TB.\n");
+ }
+#endif
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 53, 0)
+ if (opts != NULL && strstr(opts, "force_over_512tb") != NULL) {
+ CWARN("force_over_512tb option is deprecated. Filesystems smaller than 1024TB can be created without any force options. Use force_over_1024tb option for filesystems larger than 1024TB.\n");
}
#endif
- if (opts != NULL && strstr(opts, "force_over_512tb") != NULL)
- force_over_512tb = 1;
+ if (opts != NULL && strstr(opts, "force_over_1024tb") != NULL)
+ force_over_1024tb = 1;
__page = alloc_page(GFP_KERNEL);
if (__page == NULL)
*options = '\0';
if (opts != NULL) {
/* strip out the options for back compatiblity */
- static char *sout[] = {
+ static const char * const sout[] = {
"mballoc",
"iopen",
"noiopen",
"force_over_128tb",
"force_over_256tb",
"force_over_512tb",
+ "force_over_1024tb",
NULL
};
- strcat(options, opts);
+ strncat(options, opts, PAGE_SIZE);
for (rc = 0, str = options; sout[rc]; ) {
char *op = strstr(str, sout[rc]);
;
}
} else {
- strncat(options, "user_xattr,acl", 14);
+ strncat(options, "user_xattr,acl", PAGE_SIZE);
}
/* Glom up mount options */
if (*options != '\0')
- strcat(options, ",");
- strlcat(options, "no_mbcache,nodelalloc", PAGE_SIZE);
+ strncat(options, ",", PAGE_SIZE);
+ strncat(options, "no_mbcache,nodelalloc", PAGE_SIZE);
type = get_fs_type("ldiskfs");
if (!type) {
}
if (ldiskfs_blocks_count(LDISKFS_SB(osd_sb(o))->s_es) <<
- osd_sb(o)->s_blocksize_bits > 512ULL << 40 &&
- force_over_512tb == 0) {
- CERROR("%s: device %s LDISKFS does not support filesystems "
- "greater than 512TB and can cause data corruption. "
- "Use \"force_over_512tb\" mount option to override.\n",
+ osd_sb(o)->s_blocksize_bits > 1024ULL << 40 &&
+ force_over_1024tb == 0) {
+ CERROR("%s: device %s LDISKFS has not been tested on filesystems larger than 1024TB and may cause data corruption. Use 'force_over_1024tb' mount option to override.\n",
name, dev);
GOTO(out_mnt, rc = -EINVAL);
}
if (lmd_flags & LMD_FLG_DEV_RDONLY) {
- if (priv_dev_set_rdonly) {
- priv_dev_set_rdonly(osd_sb(o)->s_bdev);
- o->od_dt_dev.dd_rdonly = 1;
- LCONSOLE_WARN("%s: set dev_rdonly on this device\n",
- name);
- } else {
- LCONSOLE_WARN("%s: not support dev_rdonly on this device",
- name);
-
- GOTO(out_mnt, rc = -EOPNOTSUPP);
- }
- } else if (priv_dev_check_rdonly &&
- priv_dev_check_rdonly(osd_sb(o)->s_bdev)) {
- CERROR("%s: underlying device %s is marked as "
- "read-only. Setup failed\n", name, dev);
+ LCONSOLE_WARN("%s: not support dev_rdonly on this device",
+ name);
- GOTO(out_mnt, rc = -EROFS);
+ GOTO(out_mnt, rc = -EOPNOTSUPP);
}
if (!ldiskfs_has_feature_journal(o->od_mnt->mnt_sb)) {
osd_index_backup(env, o, false);
osd_shutdown(env, o);
osd_procfs_fini(o);
+ if (o->od_oi_table != NULL)
+ osd_oi_fini(osd_oti_get(env), o);
osd_obj_map_fini(o);
osd_umount(env, o);
o->od_read_cache = 1;
o->od_writethrough_cache = 1;
o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
+ o->od_readcache_max_iosize = OSD_READCACHE_MAX_IO_MB << 20;
+ o->od_writethrough_max_iosize = OSD_WRITECACHE_MAX_IO_MB << 20;
o->od_auto_scrub_interval = AS_DEFAULT;
cplen = strlcpy(o->od_svname, lustre_cfg_string(cfg, 4),
if (rc != 0)
GOTO(out, rc);
+ /* Can only check block device after mount */
+ o->od_nonrotational =
+ blk_queue_nonrot(bdev_get_queue(osd_sb(o)->s_bdev));
+
rc = osd_obj_map_init(env, o);
if (rc != 0)
GOTO(out_mnt, rc);
/* XXX: make osd top device in order to release reference */
d->ld_site->ls_top_dev = d;
lu_site_purge(env, d->ld_site, -1);
- if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
- lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
- }
+ lu_site_print(env, d->ld_site, &d->ld_site->ls_obj_hash.nelems,
+ D_ERROR, lu_cdebug_printer);
lu_site_fini(&o->od_site);
dt_device_fini(&o->od_dt_dev);
OBD_FREE_PTR(o);
RETURN(result);
}
-static int osd_fid_alloc(const struct lu_env *env, struct obd_export *exp,
- struct lu_fid *fid, struct md_op_data *op_data)
+/**
+ * Implementation of lu_device_operations::ldo_fid_alloc() for OSD
+ *
+ * Allocate FID.
+ *
+ * see include/lu_object.h for the details.
+ */
+static int osd_fid_alloc(const struct lu_env *env, struct lu_device *d,
+ struct lu_fid *fid, struct lu_object *parent,
+ const struct lu_name *name)
{
- struct osd_device *osd = osd_dev(exp->exp_obd->obd_lu_dev);
+ struct osd_device *osd = osd_dev(d);
return seq_client_alloc_fid(env, osd->od_cl_seq, fid);
}
.ldo_process_config = osd_process_config,
.ldo_recovery_complete = osd_recovery_complete,
.ldo_prepare = osd_prepare,
+ .ldo_fid_alloc = osd_fid_alloc,
};
static const struct lu_device_type_operations osd_device_type_ops = {
struct osd_device *osd = osd_dev(obd->obd_lu_dev);
struct super_block *sb = osd_sb(osd);
- return (osd->od_mnt == NULL || sb->s_flags & MS_RDONLY);
+ return (osd->od_mnt == NULL || sb->s_flags & SB_RDONLY);
}
/*
* lprocfs legacy support.
*/
-static struct obd_ops osd_obd_device_ops = {
+static const struct obd_ops osd_obd_device_ops = {
.o_owner = THIS_MODULE,
.o_connect = osd_obd_connect,
.o_disconnect = osd_obd_disconnect,
- .o_fid_alloc = osd_fid_alloc,
.o_health_check = osd_health_check,
};
struct kobject *kobj;
int rc;
- CLASSERT(BH_DXLock < sizeof(((struct buffer_head *)0)->b_state) * 8);
+ BUILD_BUG_ON(BH_DXLock >=
+ sizeof(((struct buffer_head *)0)->b_state) * 8);
#if !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_DEBUG_SPINLOCK)
/* please, try to keep osd_thread_info smaller than a page */
- CLASSERT(sizeof(struct osd_thread_info) <= PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(struct osd_thread_info) > PAGE_SIZE);
#endif
osd_oi_mod_init();
return rc;
#ifdef CONFIG_KALLSYMS
- priv_dev_set_rdonly = (void *)kallsyms_lookup_name("dev_set_rdonly");
- priv_dev_check_rdonly =
- (void *)kallsyms_lookup_name("dev_check_rdonly");
+ priv_security_file_alloc =
+ (void *)kallsyms_lookup_name("security_file_alloc");
#endif
rc = class_register_type(&osd_obd_device_ops, NULL, true, NULL,