sbi->ll_flags |= LL_SBI_AGL_ENABLED;
sbi->ll_flags |= LL_SBI_FAST_READ;
sbi->ll_flags |= LL_SBI_TINY_WRITE;
+ sbi->ll_flags |= LL_SBI_PARALLEL_DIO;
ll_sbi_set_encrypt(sbi, true);
/* root squash */
OBD_CONNECT2_PCC |
OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
OBD_CONNECT2_GETATTR_PFID |
- OBD_CONNECT2_DOM_LVB;
+ OBD_CONNECT2_DOM_LVB |
+ OBD_CONNECT2_REP_MBITS |
+ OBD_CONNECT2_ATOMIC_OPEN_LOCK;
#ifdef HAVE_LRU_RESIZE_SUPPORT
if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
- OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK;
+ OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK |
+ OBD_CONNECT2_REP_MBITS;
if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
GOTO(out_lock_cn_cb, err);
}
- err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
- sbi->ll_md_exp, &lmd);
+ err = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill,
+ sbi->ll_dt_exp, sbi->ll_md_exp, &lmd);
if (err) {
CERROR("failed to understand root inode md: rc = %d\n", err);
ptlrpc_req_finished(request);
const struct lu_fid *fid,
struct lustre_md *md)
{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct mdt_body *body = md->body;
- struct inode *inode;
- ino_t ino;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct ll_inode_info *lli;
+ struct mdt_body *body = md->body;
+ struct inode *inode;
+ ino_t ino;
+
ENTRY;
+ LASSERT(md->lmv);
ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
inode = iget_locked(sb, ino);
if (inode == NULL) {
RETURN(ERR_PTR(-ENOENT));
}
+ lli = ll_i2info(inode);
if (inode->i_state & I_NEW) {
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lmv_stripe_md *lsm = md->lmv;
-
inode->i_mode = (inode->i_mode & ~S_IFMT) |
(body->mbo_mode & S_IFMT);
LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
lli->lli_fid = *fid;
ll_lli_init(lli);
- LASSERT(lsm != NULL);
/* master object FID */
lli->lli_pfid = body->mbo_fid1;
CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
lli, PFID(fid), PFID(&lli->lli_pfid));
unlock_new_inode(inode);
+ } else {
+ /* in directory restripe/auto-split, a directory will be
+ * transformed to a stripe if it's plain, set its pfid here,
+ * otherwise ll_lock_cancel_bits() can't find the master inode.
+ */
+ lli->lli_pfid = body->mbo_fid1;
}
RETURN(inode);
}
up_write(&lli->lli_lsm_sem);
}
- } else if (lli->lli_default_lsm_md) {
- /* update default lsm if it changes */
+ return;
+ }
+
+ if (lli->lli_default_lsm_md) {
+ /* do nonthing if default lsm isn't changed */
down_read(&lli->lli_lsm_sem);
if (lli->lli_default_lsm_md &&
- !lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
- up_read(&lli->lli_lsm_sem);
- down_write(&lli->lli_lsm_sem);
- if (lli->lli_default_lsm_md)
- lmv_free_memmd(lli->lli_default_lsm_md);
- lli->lli_default_lsm_md = md->default_lmv;
- lsm_md_dump(D_INODE, md->default_lmv);
- md->default_lmv = NULL;
- up_write(&lli->lli_lsm_sem);
- } else {
+ lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
up_read(&lli->lli_lsm_sem);
+ return;
}
- } else {
- /* init default lsm */
- down_write(&lli->lli_lsm_sem);
- lli->lli_default_lsm_md = md->default_lmv;
- lsm_md_dump(D_INODE, md->default_lmv);
- md->default_lmv = NULL;
- up_write(&lli->lli_lsm_sem);
+ up_read(&lli->lli_lsm_sem);
}
+
+ down_write(&lli->lli_lsm_sem);
+ if (lli->lli_default_lsm_md)
+ lmv_free_memmd(lli->lli_default_lsm_md);
+ lli->lli_default_lsm_md = md->default_lmv;
+ lsm_md_dump(D_INODE, md->default_lmv);
+ md->default_lmv = NULL;
+ up_write(&lli->lli_lsm_sem);
}
static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
if (md->default_lmv)
ll_update_default_lsm_md(inode, md);
+ /* after dir migration/restripe, a stripe may be turned into a
+ * directory, in this case, zero out its lli_pfid.
+ */
+ if (unlikely(fid_is_norm(&lli->lli_pfid)))
+ fid_zero(&lli->lli_pfid);
+
/*
* no striped information from request, lustre_md from req does not
* include stripeEA, see ll_md_setattr()
RETURN(rc);
}
- rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
- sbi->ll_md_exp, &md);
+ rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp,
+ sbi->ll_md_exp, &md);
if (rc) {
ptlrpc_req_finished(request);
RETURN(rc);
anchor = &vvp_env_info(env)->vti_anchor;
cl_sync_io_init(anchor, 1);
clpage->cp_sync_io = anchor;
- cl_2queue_add(queue, clpage);
+ cl_2queue_add(queue, clpage, true);
rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
if (rc)
GOTO(queuefini1, rc);
* it is necessary due to possible time
* de-synchronization between MDT inode and OST objects
*/
- if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
- attr->ia_valid & ATTR_SIZE) {
+ if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
xvalid |= OP_XVALID_FLAGS;
flags = LUSTRE_ENCRYPT_FL;
/* Call to ll_io_zero_page is not necessary if
* In case of Direct IO, all we need is to set
* new size.
*/
- if (attr->ia_size & ~PAGE_MASK &&
+ if (attr->ia_valid & ATTR_SIZE &&
+ attr->ia_size & ~PAGE_MASK &&
!(attr->ia_valid & ATTR_FILE &&
attr->ia_file->f_flags & O_DIRECT)) {
pgoff_t offset =
return 0;
}
+void ll_truncate_inode_pages_final(struct inode *inode)
+{
+ struct address_space *mapping = &inode->i_data;
+ unsigned long nrpages;
+ unsigned long flags;
+
+ truncate_inode_pages_final(mapping);
+
+ /* Workaround for LU-118: Note nrpages may not be totally updated when
+ * truncate_inode_pages() returns, as there can be a page in the process
+ * of deletion (inside __delete_from_page_cache()) in the specified
+ * range. Thus mapping->nrpages can be non-zero when this function
+ * returns even after truncation of the whole mapping. Only do this if
+ * npages isn't already zero.
+ */
+ nrpages = mapping->nrpages;
+ if (nrpages) {
+ ll_xa_lock_irqsave(&mapping->i_pages, flags);
+ nrpages = mapping->nrpages;
+ ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
+ } /* Workaround end */
+
+ LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
+ "see https://jira.whamcloud.com/browse/LU-118\n",
+ ll_i2sbi(inode)->ll_fsname,
+ PFID(ll_inode2fid(inode)), inode, nrpages);
+}
+
int ll_read_inode2(struct inode *inode, void *opaque)
{
struct lustre_md *md = opaque;
void ll_delete_inode(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct address_space *mapping = &inode->i_data;
- unsigned long nrpages;
- unsigned long flags;
-
ENTRY;
if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
}
- truncate_inode_pages_final(mapping);
-
- /* Workaround for LU-118: Note nrpages may not be totally updated when
- * truncate_inode_pages() returns, as there can be a page in the process
- * of deletion (inside __delete_from_page_cache()) in the specified
- * range. Thus mapping->nrpages can be non-zero when this function
- * returns even after truncation of the whole mapping. Only do this if
- * npages isn't already zero.
- */
- nrpages = mapping->nrpages;
- if (nrpages) {
- ll_xa_lock_irqsave(&mapping->i_pages, flags);
- nrpages = mapping->nrpages;
- ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
- } /* Workaround end */
-
- LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
- "see https://jira.whamcloud.com/browse/LU-118\n",
- ll_i2sbi(inode)->ll_fsname,
- PFID(ll_inode2fid(inode)), inode, nrpages);
+ ll_truncate_inode_pages_final(inode);
ll_clear_inode(inode);
clear_inode(inode);
if (flags & LUSTRE_PROJINHERIT_FL)
fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
- rc = ll_ioctl_check_project(inode, &fa);
+ rc = ll_ioctl_check_project(inode, fa.fsx_xflags,
+ fa.fsx_projid);
if (rc)
RETURN(rc);
* \param[in] sb super block for this file-system
* \param[in] open_req pointer to the original open request
*/
-void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
+void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill)
{
struct mdt_body *body;
struct md_op_data *op_data;
struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
ENTRY;
- body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
+ body = req_capsule_server_get(pill, &RMF_MDT_BODY);
OBD_ALLOC_PTR(op_data);
if (op_data == NULL) {
CWARN("%s: cannot allocate op_data to release open handle for "
EXIT;
}
-int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
+int ll_prep_inode(struct inode **inode, struct req_capsule *pill,
struct super_block *sb, struct lookup_intent *it)
{
struct ll_sb_info *sbi = NULL;
LASSERT(*inode || sb);
sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
- rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
+ rc = md_get_lustre_md(sbi->ll_md_exp, pill, sbi->ll_dt_exp,
sbi->ll_md_exp, &md);
if (rc != 0)
GOTO(out, rc);
if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
ll_intent_drop_lock(it);
- ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, req);
+ ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, pill);
}
return rc;