*/
/* LASSERT(current->journal_info == NULL); */
- inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
+ inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
if (IS_ERR(inode)) {
CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
id->oii_ino, PTR_ERR(inode));
}
if (fid != NULL)
- CWARN("%s: directory (inode: %lu, FID: "DFID") %s "
- "maximum entry limit\n",
+ /* below message is checked in sanity.sh test_129 */
+ CWARN("%s: directory (inode: %lu, FID: "DFID") %s maximum entry limit\n",
osd_name(osd), parent->i_ino, PFID(fid),
rc == -ENOSPC ? "has reached" : "is approaching");
else
- CWARN("%s: directory (inode: %lu, FID: unknown) %s "
- "maximum entry limit\n",
+ /* below message is checked in sanity.sh test_129 */
+ CWARN("%s: directory (inode: %lu, FID: unknown) %s maximum entry limit\n",
osd_name(osd), parent->i_ino,
rc == -ENOSPC ? "has reached" : "is approaching");
*/
again:
- inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
+ inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
if (IS_ERR(inode)) {
rc = PTR_ERR(inode);
if (!trusted && (rc == -ENOENT || rc == -ESTALE))
oclb.oclb_items = 0;
#ifdef HAVE_DIR_CONTEXT
oclb.ctx.pos = filp->f_pos;
-#ifdef HAVE_ITERATE_SHARED
rc = fops->iterate_shared(filp, &oclb.ctx);
-#else
- rc = fops->iterate(filp, &oclb.ctx);
-#endif
filp->f_pos = oclb.ctx.pos;
#else
rc = fops->readdir(filp, &oclb, osd_stripe_dir_filldir);
goto trigger;
}
+ /* -ESTALE is returned if inode of OST object doesn't exist */
+ if (result == -ESTALE &&
+ fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
+ GOTO(out, result = 0);
+ }
+
if (result)
GOTO(out, result);
LASSERT(!updated);
+ /*
+ * if two OST objects map to the same inode, and inode mode is
+ * (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666), which means it's
+ * reserved by precreate, and not written yet, in this case, don't
+ * set inode for the object whose FID mismatch, so that it can create
+ * inode and not block precreate.
+ */
+ if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) &&
+ inode->i_mode == (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666)) {
+ obj->oo_inode = NULL;
+ GOTO(out, result = 0);
+ }
+
result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
/*
* "result == -ENOENT" means the cached OI mapping has been removed
*/
if (last_credits != oh->ot_credits &&
time_after(jiffies, last_printed +
- msecs_to_jiffies(60 * MSEC_PER_SEC)) &&
+ cfs_time_seconds(60)) &&
osd_transaction_size(dev) > 512) {
CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
oh->ot_credits, osd_transaction_size(dev));
static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
{
struct osd_object *obj = osd_obj(l);
+ struct qsd_instance *qsd = osd_def_qsd(osd_obj2dev(obj));
struct inode *inode = obj->oo_inode;
+ __u64 projid;
+ qid_t uid;
+ qid_t gid;
LINVRNT(osd_invariant(obj));
*/
osd_index_fini(obj);
- if (inode != NULL) {
- struct qsd_instance *qsd = osd_def_qsd(osd_obj2dev(obj));
- qid_t uid = i_uid_read(inode);
- qid_t gid = i_gid_read(inode);
- obj->oo_inode = NULL;
- iput(inode);
- if (!obj->oo_header && qsd) {
- struct osd_thread_info *info = osd_oti_get(env);
- struct lquota_id_info *qi = &info->oti_qi;
+ if (!inode)
+ return;
- /* Release granted quota to master if necessary */
- qi->lqi_id.qid_uid = uid;
- qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
+ uid = i_uid_read(inode);
+ gid = i_gid_read(inode);
+ projid = i_projid_read(inode);
- qi->lqi_id.qid_uid = gid;
- qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
+ obj->oo_inode = NULL;
+ iput(inode);
- qi->lqi_id.qid_uid = i_projid_read(inode);
- qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
- }
+ /* do not rebalance quota if the caller needs to release memory
+ * otherwise qsd_refresh_usage() may went into a new ldiskfs
+ * transaction and risk to deadlock - LU-12178 */
+ if (current->flags & (PF_MEMALLOC | PF_KSWAPD))
+ return;
+
+ if (!obj->oo_header && qsd) {
+ struct osd_thread_info *info = osd_oti_get(env);
+ struct lquota_id_info *qi = &info->oti_qi;
+
+ /* Release granted quota to master if necessary */
+ qi->lqi_id.qid_uid = uid;
+ qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
+
+ qi->lqi_id.qid_uid = gid;
+ qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
+
+ qi->lqi_id.qid_uid = projid;
+ qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
}
}
* Concurrency: shouldn't matter.
*/
int osd_statfs(const struct lu_env *env, struct dt_device *d,
- struct obd_statfs *sfs)
+ struct obd_statfs *sfs, struct obd_statfs_info *info)
{
struct osd_device *osd = osd_dt_dev(d);
struct super_block *sb = osd_sb(osd);
goto out;
statfs_pack(sfs, ksfs);
- if (unlikely(sb->s_flags & MS_RDONLY))
+ if (unlikely(sb->s_flags & SB_RDONLY))
sfs->os_state |= OS_STATE_READONLY;
+
+ sfs->os_state |= osd->od_nonrotational ? OS_STATE_NONROT : 0;
+
if (ldiskfs_has_feature_extents(sb))
sfs->os_maxbytes = sb->s_maxbytes;
else
*/
param->ddp_inodespace = PER_OBJ_USAGE;
/*
- * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
- * = 128MB) which is unlikely to be hit in real life. Report a smaller
- * maximum length to not under count the actual number of extents
- * needed for writing a file.
+ * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
+ * is 128MB) which is unlikely to be hit in real life. Report a smaller
+ * maximum length to not under-count the actual number of extents
+ * needed for writing a file if there are sub-optimal block allocations.
*/
- param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 2;
+ param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 1;
/* worst-case extent insertion metadata overhead */
param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
param->ddp_mntopts = 0;
#endif
param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
- if (param->ddp_max_ea_size > OSD_MAX_EA_SIZE)
- param->ddp_max_ea_size = OSD_MAX_EA_SIZE;
+ if (param->ddp_max_ea_size > OBD_MAX_EA_SIZE)
+ param->ddp_max_ea_size = OBD_MAX_EA_SIZE;
/*
* Preferred RPC size for efficient disk IO. 4MB shows good
}
}
+static struct super_block *osd_mnt_sb_get(const struct dt_device *d)
+{
+ return osd_sb(osd_dt_dev(d));
+}
+
/*
* Concurrency: shouldn't matter.
*/
.dt_trans_stop = osd_trans_stop,
.dt_trans_cb_add = osd_trans_cb_add,
.dt_conf_get = osd_conf_get,
+ .dt_mnt_sb_get = osd_mnt_sb_get,
.dt_sync = osd_sync,
.dt_ro = osd_ro,
.dt_commit_async = osd_commit_async,
return obj->oo_owner == env;
}
-static struct timespec *osd_inode_time(const struct lu_env *env,
- struct inode *inode, __u64 seconds)
-{
- struct osd_thread_info *oti = osd_oti_get(env);
- struct timespec *t = &oti->oti_time;
-
- t->tv_sec = seconds;
- t->tv_nsec = 0;
- *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
- return t;
-}
-
static void osd_inode_getattr(const struct lu_env *env,
struct inode *inode, struct lu_attr *attr)
{
return 0;
if (bits & LA_ATIME)
- inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
+ inode->i_atime = osd_inode_time(inode, attr->la_atime);
if (bits & LA_CTIME)
- inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
+ inode->i_ctime = osd_inode_time(inode, attr->la_ctime);
if (bits & LA_MTIME)
- inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
+ inode->i_mtime = osd_inode_time(inode, attr->la_mtime);
if (bits & LA_SIZE) {
spin_lock(&inode->i_lock);
LDISKFS_I(inode)->i_disksize = attr->la_size;
(attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
struct iattr iattr;
- ll_vfs_dq_init(inode);
+ dquot_initialize(inode);
iattr.ia_valid = 0;
if (attr->la_valid & LA_UID)
iattr.ia_valid |= ATTR_UID;
iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
- rc = ll_vfs_dq_transfer(inode, &iattr);
+ rc = dquot_transfer(inode, &iattr);
if (rc) {
CERROR("%s: quota transfer failed: rc = %d. Is quota "
"enforcement enabled on the ldiskfs "
osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
rc = osd_oi_insert(info, osd, fid, id, oh->ot_handle,
OI_CHECK_FLD, NULL);
+ if (CFS_FAIL_CHECK(OBD_FAIL_OSD_DUPLICATE_MAP) && osd->od_is_ost) {
+ struct lu_fid next_fid = *fid;
+
+ /* insert next object in advance, and map to the same inode */
+ next_fid.f_oid++;
+ if (next_fid.f_oid != 0) {
+ osd_trans_exec_op(env, th, OSD_OT_INSERT);
+ osd_oi_insert(info, osd, &next_fid, id, oh->ot_handle,
+ OI_CHECK_FLD, NULL);
+ osd_trans_exec_check(env, th, OSD_OT_INSERT);
+ }
+ }
+
osd_trans_exec_check(env, th, OSD_OT_INSERT);
return rc;
*/
osd_trans_declare_op(env, oh, OSD_OT_INSERT,
osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
+ if (CFS_FAIL_CHECK(OBD_FAIL_OSD_DUPLICATE_MAP))
+ osd_trans_declare_op(env, oh, OSD_OT_INSERT,
+ osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
/* will help to find FID->ino mapping at dt_insert() */
rc = osd_idc_find_and_init(env, osd_obj2dev(osd_dt_obj(dt)),
RETURN(fl);
/* Remove old PFID EA entry firstly. */
- ll_vfs_dq_init(inode);
+ dquot_initialize(inode);
rc = osd_removexattr(dentry, inode, XATTR_NAME_FID);
if (rc == -ENODATA) {
if ((fl & LU_XATTR_REPLACE) && !(fl & LU_XATTR_CREATE))
obj->oo_pfid_in_lma = 0;
}
} else {
- ll_vfs_dq_init(inode);
+ dquot_initialize(inode);
dentry->d_inode = inode;
dentry->d_sb = inode->i_sb;
rc = osd_removexattr(dentry, inode, name);
file->f_op = inode->i_fop;
set_file_inode(file, inode);
- rc = ll_vfs_fsync_range(file, start, end, 0);
+ rc = vfs_fsync_range(file, start, end, 0);
RETURN(rc);
}
LASSERT(oh->ot_handle != NULL);
LASSERT(oh->ot_handle->h_transaction != NULL);
- ll_vfs_dq_init(dir);
+ dquot_initialize(dir);
dentry = osd_child_dentry_get(env, obj,
(char *)key, strlen((char *)key));
osd_get_ldiskfs_dirent_param(ldp, fid);
child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
child->d_fsdata = (void *)ldp;
- ll_vfs_dq_init(pobj->oo_inode);
+ dquot_initialize(pobj->oo_inode);
rc = osd_ldiskfs_add_entry(info, osd_obj2dev(pobj), oth->ot_handle,
child, cinode, hlock);
if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_TYPE)) {
#ifdef HAVE_DIR_CONTEXT
buf.ctx.pos = filp->f_pos;
-#ifdef HAVE_ITERATE_SHARED
rc = inode->i_fop->iterate_shared(filp, &buf.ctx);
-#else
- rc = inode->i_fop->iterate(filp, &buf.ctx);
-#endif
filp->f_pos = buf.ctx.pos;
#else
rc = inode->i_fop->readdir(filp, &buf, osd_ldiskfs_filldir);
ldp = (struct ldiskfs_dentry_param *)osd_oti_get(env)->oti_ldp;
osd_get_ldiskfs_dirent_param(ldp, fid);
dentry->d_fsdata = (void *)ldp;
- ll_vfs_dq_init(dir);
+ dquot_initialize(dir);
rc = osd_ldiskfs_add_entry(info, dev, jh, dentry, inode, hlock);
/*
* It is too bad, we cannot reinsert the name entry back.
"force_over_512tb",
NULL
};
- strcat(options, opts);
+ strncat(options, opts, PAGE_SIZE);
for (rc = 0, str = options; sout[rc]; ) {
char *op = strstr(str, sout[rc]);
;
}
} else {
- strncat(options, "user_xattr,acl", 14);
+ strncat(options, "user_xattr,acl", PAGE_SIZE);
}
/* Glom up mount options */
if (*options != '\0')
- strcat(options, ",");
- strlcat(options, "no_mbcache,nodelalloc", PAGE_SIZE);
+ strncat(options, ",", PAGE_SIZE);
+ strncat(options, "no_mbcache,nodelalloc", PAGE_SIZE);
type = get_fs_type("ldiskfs");
if (!type) {
o->od_read_cache = 1;
o->od_writethrough_cache = 1;
o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
+
o->od_auto_scrub_interval = AS_DEFAULT;
cplen = strlcpy(o->od_svname, lustre_cfg_string(cfg, 4),
if (rc != 0)
GOTO(out, rc);
+ /* Can only check block device after mount */
+ o->od_nonrotational =
+ blk_queue_nonrot(bdev_get_queue(osd_sb(o)->s_bdev));
+
rc = osd_obj_map_init(env, o);
if (rc != 0)
GOTO(out_mnt, rc);
struct osd_device *osd = osd_dev(obd->obd_lu_dev);
struct super_block *sb = osd_sb(osd);
- return (osd->od_mnt == NULL || sb->s_flags & MS_RDONLY);
+ return (osd->od_mnt == NULL || sb->s_flags & SB_RDONLY);
}
/*