op_data->op_attr.ia_mtime = inode->i_mtime;
op_data->op_attr.ia_ctime = inode->i_ctime;
op_data->op_attr.ia_size = i_size_read(inode);
- op_data->op_attr.ia_valid |= ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
- ATTR_MTIME | ATTR_MTIME_SET |
- ATTR_CTIME | ATTR_CTIME_SET;
+ op_data->op_attr.ia_valid |= (ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET |
+ ATTR_CTIME);
+ op_data->op_xvalid |= OP_XVALID_CTIME_SET;
op_data->op_attr_blocks = inode->i_blocks;
op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
- op_data->op_handle = och->och_fh;
+ if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT))
+ op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
+ op_data->op_open_handle = och->och_open_handle;
if (och->och_flags & FMODE_WRITE &&
ll_file_test_and_clear_flag(ll_i2info(inode), LLIF_DATA_MODIFIED))
case MDS_CLOSE_LAYOUT_MERGE:
/* merge blocks from the victim inode */
op_data->op_attr_blocks += ((struct inode *)data)->i_blocks;
- op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
+ op_data->op_attr.ia_valid |= ATTR_SIZE;
+ op_data->op_xvalid |= OP_XVALID_BLOCKS;
case MDS_CLOSE_LAYOUT_SPLIT:
case MDS_CLOSE_LAYOUT_SWAP: {
struct split_param *sp = data;
LASSERT(data != NULL);
op_data->op_attr_blocks +=
ioc->lil_count * op_data->op_attr_blocks;
- op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
+ op_data->op_attr.ia_valid |= ATTR_SIZE;
+ op_data->op_xvalid |= OP_XVALID_BLOCKS;
op_data->op_bias |= MDS_CLOSE_RESYNC_DONE;
op_data->op_lease_handle = och->och_lease_handle;
op_data->op_bias |= MDS_HSM_RELEASE;
op_data->op_data_version = *(__u64 *)data;
op_data->op_lease_handle = och->och_lease_handle;
- op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
+ op_data->op_attr.ia_valid |= ATTR_SIZE;
+ op_data->op_xvalid |= OP_XVALID_BLOCKS;
break;
default:
break;
}
+ if (!(op_data->op_attr.ia_valid & ATTR_SIZE))
+ op_data->op_xvalid |= OP_XVALID_LAZYSIZE;
+ if (!(op_data->op_xvalid & OP_XVALID_BLOCKS))
+ op_data->op_xvalid |= OP_XVALID_LAZYBLOCKS;
+
rc = md_close(md_exp, op_data, och->och_mod, &req);
if (rc != 0 && rc != -EINTR)
CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
out:
md_clear_open_replay_data(md_exp, och);
- och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+ och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
OBD_FREE_PTR(och);
ptlrpc_req_finished(req); /* This is close request */
RETURN(rc);
}
+static inline int ll_dom_readpage(void *data, struct page *page)
+{
+ struct niobuf_local *lnb = data;
+ void *kaddr;
+
+ kaddr = ll_kmap_atomic(page, KM_USER0);
+ memcpy(kaddr, lnb->lnb_data, lnb->lnb_len);
+ if (lnb->lnb_len < PAGE_SIZE)
+ memset(kaddr + lnb->lnb_len, 0,
+ PAGE_SIZE - lnb->lnb_len);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ ll_kunmap_atomic(kaddr, KM_USER0);
+ unlock_page(page);
+
+ return 0;
+}
+
+void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req,
+ struct lookup_intent *it)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_object *obj = lli->lli_clob;
+ struct address_space *mapping = inode->i_mapping;
+ struct page *vmpage;
+ struct niobuf_remote *rnb;
+ char *data;
+ struct lustre_handle lockh;
+ struct ldlm_lock *lock;
+ unsigned long index, start;
+ struct niobuf_local lnb;
+ bool dom_lock = false;
+
+ ENTRY;
+
+ if (obj == NULL)
+ RETURN_EXIT;
+
+ if (it->it_lock_mode != 0) {
+ lockh.cookie = it->it_lock_handle;
+ lock = ldlm_handle2lock(&lockh);
+ if (lock != NULL)
+ dom_lock = ldlm_has_dom(lock);
+ LDLM_LOCK_PUT(lock);
+ }
+ if (!dom_lock)
+ RETURN_EXIT;
+
+ if (!req_capsule_has_field(&req->rq_pill, &RMF_NIOBUF_INLINE,
+ RCL_SERVER))
+ RETURN_EXIT;
+
+ rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
+ if (rnb == NULL || rnb->rnb_len == 0)
+ RETURN_EXIT;
+
+ CDEBUG(D_INFO, "Get data buffer along with open, len %i, i_size %llu\n",
+ rnb->rnb_len, i_size_read(inode));
+
+ data = (char *)rnb + sizeof(*rnb);
+
+ lnb.lnb_file_offset = rnb->rnb_offset;
+ start = lnb.lnb_file_offset / PAGE_SIZE;
+ index = 0;
+ LASSERT(lnb.lnb_file_offset % PAGE_SIZE == 0);
+ lnb.lnb_page_offset = 0;
+ do {
+ lnb.lnb_data = data + (index << PAGE_SHIFT);
+ lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
+ if (lnb.lnb_len > PAGE_SIZE)
+ lnb.lnb_len = PAGE_SIZE;
+
+ vmpage = read_cache_page(mapping, index + start,
+ ll_dom_readpage, &lnb);
+ if (IS_ERR(vmpage)) {
+ CWARN("%s: cannot fill page %lu for "DFID
+ " with data: rc = %li\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ index + start, PFID(lu_object_fid(&obj->co_lu)),
+ PTR_ERR(vmpage));
+ break;
+ }
+ put_page(vmpage);
+ index++;
+ } while (rnb->rnb_len > (index << PAGE_SHIFT));
+ EXIT;
+}
+
static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
struct lookup_intent *itp)
{
}
rc = ll_prep_inode(&de->d_inode, req, NULL, itp);
- if (!rc && itp->it_lock_mode)
+
+ if (!rc && itp->it_lock_mode) {
+ ll_dom_finish_open(de->d_inode, req, itp);
ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, NULL);
+ }
out:
ptlrpc_req_finished(req);
struct mdt_body *body;
body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
- och->och_fh = body->mbo_handle;
+ och->och_open_handle = body->mbo_open_handle;
och->och_fid = body->mbo_fid1;
och->och_lease_handle.cookie = it->it_lock_handle;
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
fd = ll_file_data_get();
if (fd == NULL)
- GOTO(out_openerr, rc = -ENOMEM);
+ GOTO(out_nofiledata, rc = -ENOMEM);
fd->fd_file = file;
if (S_ISDIR(inode->i_mode))
if (file->f_flags & O_TRUNC)
oit.it_flags |= FMODE_WRITE;
- /* kernel only call f_op->open in dentry_open. filp_open calls
- * dentry_open after call to open_namei that checks permissions.
- * Only nfsd_open call dentry_open directly without checking
- * permissions and because of that this code below is safe. */
- if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
- oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
+ /* kernel only call f_op->open in dentry_open. filp_open calls
+ * dentry_open after call to open_namei that checks permissions.
+ * Only nfsd_open call dentry_open directly without checking
+ * permissions and because of that this code below is safe.
+ */
+ if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
+ oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
/* We do not want O_EXCL here, presumably we opened the file
* already? XXX - NFS implications? */
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
}
+out_nofiledata:
if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
ptlrpc_req_finished(it->it_request);
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
* if it has an open lock in cache already.
*/
static int ll_lease_och_acquire(struct inode *inode, struct file *file,
- struct lustre_handle *old_handle)
+ struct lustre_handle *old_open_handle)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
*och_p = NULL;
}
- *old_handle = fd->fd_och->och_fh;
+ *old_open_handle = fd->fd_och->och_open_handle;
EXIT;
out_unlock:
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
- struct lustre_handle old_handle = { 0 };
+ struct lustre_handle old_open_handle = { 0 };
struct obd_client_handle *och = NULL;
int rc;
int rc2;
if (!(fmode & file->f_mode) || (file->f_mode & FMODE_EXEC))
RETURN(ERR_PTR(-EPERM));
- rc = ll_lease_och_acquire(inode, file, &old_handle);
+ rc = ll_lease_och_acquire(inode, file, &old_open_handle);
if (rc)
RETURN(ERR_PTR(rc));
}
GOTO(out, rc = PTR_ERR(op_data));
/* To tell the MDT this openhandle is from the same owner */
- op_data->op_handle = old_handle;
+ op_data->op_open_handle = old_open_handle;
it.it_flags = fmode | open_flags;
it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
if (rc)
GOTO(out, rc);
- op_data->op_handle = och->och_lease_handle;
+ op_data->op_lease_handle = och->och_lease_handle;
rc = md_file_resync(sbi->ll_md_exp, op_data);
if (rc)
GOTO(out, rc);
if (args->via_io_subtype == IO_NORMAL) {
iov_iter_advance(args->u.normal.via_iter, io->ci_nob);
- pos += io->ci_nob;
+
+ /* CLIO is too complicated. See LU-11069. */
+ if (cl_io_is_append(io))
+ pos = io->u.ci_rw.rw_iocb.ki_pos;
+ else
+ pos += io->ci_nob;
+
args->u.normal.via_iocb->ki_pos = pos;
#ifdef HAVE_KIOCB_KI_LEFT
args->u.normal.via_iocb->ki_left = count;
* and will write it out. This saves a lot of processing time.
*
* All writes here are within one page, so exclusion is handled by the page
- * lock on the vm page. Exception is appending, which requires locking the
- * full file to handle size issues. We do not do tiny writes for writes which
- * touch multiple pages because it's very unlikely multiple sequential pages
+ * lock on the vm page. We do not do tiny writes for writes which touch
+ * multiple pages because it's very unlikely multiple sequential pages are
* are already dirty.
*
* We limit these to < PAGE_SIZE because PAGE_SIZE writes are relatively common
* and are unlikely to be to already dirty pages.
*
- * Attribute updates are important here, we do it in ll_tiny_write_end.
+ * Attribute updates are important here, we do them in ll_tiny_write_end.
*/
static ssize_t ll_do_tiny_write(struct kiocb *iocb, struct iov_iter *iter)
{
ssize_t count = iov_iter_count(iter);
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct range_lock range;
ssize_t result = 0;
- bool append = false;
ENTRY;
- /* NB: we can't do direct IO for tiny writes because they use the page
- * cache, and we can't do sync writes because tiny writes can't flush
- * pages.
+ /* Restrict writes to single page and < PAGE_SIZE. See comment at top
+ * of function for why.
*/
- if (file->f_flags & (O_DIRECT | O_SYNC))
+ if (count >= PAGE_SIZE ||
+ (iocb->ki_pos & (PAGE_SIZE-1)) + count > PAGE_SIZE)
RETURN(0);
- /* It is relatively unlikely we will overwrite a full dirty page, so
- * limit tiny writes to < PAGE_SIZE
- */
- if (count >= PAGE_SIZE)
- RETURN(0);
-
- /* For append writes, we must take the range lock to protect size
- * and also move pos to current size before writing.
- */
- if (file->f_flags & O_APPEND) {
- struct lu_env *env;
- __u16 refcheck;
-
- append = true;
- range_lock_init(&range, 0, LUSTRE_EOF);
- result = range_lock(&lli->lli_write_tree, &range);
- if (result)
- RETURN(result);
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- GOTO(out, result = PTR_ERR(env));
- ll_merge_attr(env, inode);
- cl_env_put(env, &refcheck);
- iocb->ki_pos = i_size_read(inode);
- }
-
- /* Does this write touch multiple pages?
- *
- * This partly duplicates the PAGE_SIZE check above, but must come
- * after range locking for append writes because it depends on the
- * write position (ki_pos).
- */
- if ((iocb->ki_pos & (PAGE_SIZE-1)) + count > PAGE_SIZE)
- goto out;
-
result = __generic_file_write_iter(iocb, iter);
/* If the page is not already dirty, ll_tiny_write_begin returns
ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
}
-out:
- if (append)
- range_unlock(&lli->lli_write_tree, &range);
-
CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count);
RETURN(result);
{
struct vvp_io_args *args;
struct lu_env *env;
- ssize_t rc_tiny, rc_normal;
+ ssize_t rc_tiny = 0, rc_normal;
__u16 refcheck;
ENTRY;
- rc_tiny = ll_do_tiny_write(iocb, from);
+ /* NB: we can't do direct IO for tiny writes because they use the page
+ * cache, we can't do sync writes because tiny writes can't flush
+ * pages, and we can't do append writes because we can't guarantee the
+ * required DLM locks are held to protect file size.
+ */
+ if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(iocb->ki_filp))) &&
+ !(iocb->ki_filp->f_flags & (O_DIRECT | O_SYNC | O_APPEND)))
+ rc_tiny = ll_do_tiny_write(iocb, from);
/* In case of error, go on and try normal write - Only stop if tiny
* write completed I/O.
inode_lock(inode);
- rc = ll_setattr_raw(file_dentry(file), attr, true);
+ rc = ll_setattr_raw(file_dentry(file), attr, 0, true);
if (rc == -ENODATA)
rc = 0;
struct iattr ia = {
.ia_valid = ATTR_ATIME | ATTR_ATIME_SET |
ATTR_MTIME | ATTR_MTIME_SET |
- ATTR_CTIME | ATTR_CTIME_SET,
+ ATTR_CTIME,
.ia_atime = {
.tv_sec = lfu->lfu_atime_sec,
.tv_nsec = lfu->lfu_atime_nsec,
RETURN(-EINVAL);
inode_lock(inode);
- rc = ll_setattr_raw(file_dentry(file), &ia, false);
+ rc = ll_setattr_raw(file_dentry(file), &ia, OP_XVALID_CTIME_SET,
+ false);
inode_unlock(inode);
RETURN(rc);
sizeof(fsxattr)))
RETURN(-EFAULT);
- fsxattr.fsx_xflags = ll_inode_to_ext_flags(inode->i_flags);
+ fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags);
+ if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT))
+ fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
if (copy_to_user((struct fsxattr __user *)arg,
&fsxattr, sizeof(fsxattr)))
int rc = 0;
struct fsxattr fsxattr;
struct cl_object *obj;
+ struct iattr *attr;
+ int flags;
/* only root could change project ID */
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
if (copy_from_user(&fsxattr,
(const struct fsxattr __user *)arg,
sizeof(fsxattr)))
- GOTO(out_fsxattr1, rc = -EFAULT);
+ GOTO(out_fsxattr, rc = -EFAULT);
- op_data->op_attr_flags = fsxattr.fsx_xflags;
+ flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags);
+ op_data->op_attr_flags = ll_inode_to_ext_flags(flags);
+ if (fsxattr.fsx_xflags & FS_XFLAG_PROJINHERIT)
+ op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
op_data->op_projid = fsxattr.fsx_projid;
- op_data->op_attr.ia_valid |= (MDS_ATTR_PROJID | ATTR_ATTR_FLAG);
+ op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL,
0, &req);
ptlrpc_req_finished(req);
-
+ if (rc)
+ GOTO(out_fsxattr, rc);
+ ll_update_inode_flags(inode, op_data->op_attr_flags);
obj = ll_i2info(inode)->lli_clob;
- if (obj) {
- struct iattr *attr;
+ if (obj == NULL)
+ GOTO(out_fsxattr, rc);
- inode->i_flags = ll_ext_to_inode_flags(fsxattr.fsx_xflags);
- OBD_ALLOC_PTR(attr);
- if (attr == NULL)
- GOTO(out_fsxattr1, rc = -ENOMEM);
- attr->ia_valid = ATTR_ATTR_FLAG;
- rc = cl_setattr_ost(obj, attr, fsxattr.fsx_xflags);
+ OBD_ALLOC_PTR(attr);
+ if (attr == NULL)
+ GOTO(out_fsxattr, rc = -ENOMEM);
- OBD_FREE_PTR(attr);
- }
-out_fsxattr1:
+ rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS,
+ fsxattr.fsx_xflags);
+ OBD_FREE_PTR(attr);
+out_fsxattr:
ll_finish_md_op_data(op_data);
RETURN(rc);
}
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%x\n",
PFID(ll_inode2fid(inode)), inode, cmd);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
- /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
- if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
- RETURN(-ENOTTY);
+ /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
+ if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
+ RETURN(-ENOTTY);
- switch(cmd) {
- case LL_IOC_GETFLAGS:
- /* Get the current value of the file flags */
+ switch (cmd) {
+ case LL_IOC_GETFLAGS:
+ /* Get the current value of the file flags */
return put_user(fd->fd_flags, (int __user *)arg);
case LL_IOC_SETFLAGS:
case LL_IOC_CLRFLAGS:
case LL_IOC_LOV_GETSTRIPE:
case LL_IOC_LOV_GETSTRIPE_NEW:
RETURN(ll_file_getstripe(inode, (void __user *)arg, 0));
- case FSFILT_IOC_GETFLAGS:
- case FSFILT_IOC_SETFLAGS:
- RETURN(ll_iocontrol(inode, file, cmd, arg));
- case FSFILT_IOC_GETVERSION_OLD:
- case FSFILT_IOC_GETVERSION:
+ case FS_IOC_GETFLAGS:
+ case FS_IOC_SETFLAGS:
+ RETURN(ll_iocontrol(inode, file, cmd, arg));
+ case FSFILT_IOC_GETVERSION:
+ case FS_IOC_GETVERSION:
RETURN(put_user(inode->i_generation, (int __user *)arg));
+ /* We need to special case any other ioctls we want to handle,
+ * to send them to the MDS/OST as appropriate and to properly
+ * network encode the arg field. */
+ case FS_IOC_SETVERSION:
+ RETURN(-ENOTSUPP);
+
case LL_IOC_GROUP_LOCK:
RETURN(ll_get_grouplock(inode, file, arg));
case LL_IOC_GROUP_UNLOCK:
case IOC_OBD_STATFS:
RETURN(ll_obd_statfs(inode, (void __user *)arg));
- /* We need to special case any other ioctls we want to handle,
- * to send them to the MDS/OST as appropriate and to properly
- * network encode the arg field.
- case FSFILT_IOC_SETVERSION_OLD:
- case FSFILT_IOC_SETVERSION:
- */
case LL_IOC_FLUSHCTX:
RETURN(ll_flush_ctx(inode));
case LL_IOC_PATH2FID: {
RETURN(rc);
}
-int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
- const char *name, int namelen)
+int ll_migrate(struct inode *parent, struct file *file, struct lmv_user_md *lum,
+ const char *name)
{
- struct dentry *dchild = NULL;
- struct inode *child_inode = NULL;
- struct md_op_data *op_data;
+ struct dentry *dchild = NULL;
+ struct inode *child_inode = NULL;
+ struct md_op_data *op_data;
struct ptlrpc_request *request = NULL;
struct obd_client_handle *och = NULL;
- struct qstr qstr;
- struct mdt_body *body;
- int rc;
- __u64 data_version = 0;
+ struct qstr qstr;
+ struct mdt_body *body;
+ __u64 data_version = 0;
+ size_t namelen = strlen(name);
+ int lumlen = lmv_user_md_size(lum->lum_stripe_count, lum->lum_magic);
+ int rc;
ENTRY;
- CDEBUG(D_VFSTRACE, "migrate %s under "DFID" to MDT%04x\n",
- name, PFID(ll_inode2fid(parent)), mdtidx);
+ CDEBUG(D_VFSTRACE, "migrate "DFID"/%s to MDT%04x stripe count %d\n",
+ PFID(ll_inode2fid(parent)), name,
+ lum->lum_stripe_offset, lum->lum_stripe_count);
- op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
- 0, LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ if (lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC) &&
+ lum->lum_magic != cpu_to_le32(LMV_USER_MAGIC_SPECIFIC))
+ lustre_swab_lmv_user_md(lum);
/* Get child FID first */
qstr.hash = ll_full_name_hash(file_dentry(file), name, namelen);
qstr.name = name;
qstr.len = namelen;
dchild = d_lookup(file_dentry(file), &qstr);
- if (dchild != NULL) {
- if (dchild->d_inode != NULL)
+ if (dchild) {
+ if (dchild->d_inode)
child_inode = igrab(dchild->d_inode);
dput(dchild);
}
- if (child_inode == NULL) {
- rc = ll_get_fid_by_name(parent, name, namelen,
- &op_data->op_fid3, &child_inode);
- if (rc != 0)
- GOTO(out_free, rc);
+ if (!child_inode) {
+ rc = ll_get_fid_by_name(parent, name, namelen, NULL,
+ &child_inode);
+ if (rc)
+ RETURN(rc);
}
- if (child_inode == NULL)
- GOTO(out_free, rc = -EINVAL);
+ if (!child_inode)
+ RETURN(-ENOENT);
+
+ if (!(exp_connect_flags2(ll_i2sbi(parent)->ll_md_exp) &
+ OBD_CONNECT2_DIR_MIGRATE)) {
+ if (le32_to_cpu(lum->lum_stripe_count) > 1 ||
+ ll_i2info(child_inode)->lli_lsm_md) {
+ CERROR("%s: MDT doesn't support stripe directory "
+ "migration!\n",
+ ll_get_fsname(parent->i_sb, NULL, 0));
+ GOTO(out_iput, rc = -EOPNOTSUPP);
+ }
+ }
/*
* lfs migrate command needs to be blocked on the client
if (child_inode == parent->i_sb->s_root->d_inode)
GOTO(out_iput, rc = -EINVAL);
+ op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
+ child_inode->i_mode, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ GOTO(out_iput, rc = PTR_ERR(op_data));
+
inode_lock(child_inode);
op_data->op_fid3 = *ll_inode2fid(child_inode);
if (!fid_is_sane(&op_data->op_fid3)) {
GOTO(out_unlock, rc = -EINVAL);
}
- rc = ll_get_mdt_idx_by_fid(ll_i2sbi(parent), &op_data->op_fid3);
- if (rc < 0)
- GOTO(out_unlock, rc);
+ op_data->op_cli_flags |= CLI_MIGRATE | CLI_SET_MEA;
+ op_data->op_data = lum;
+ op_data->op_data_size = lumlen;
- if (rc == mdtidx) {
- CDEBUG(D_INFO, "%s: "DFID" is already on MDT%04x\n", name,
- PFID(&op_data->op_fid3), mdtidx);
- GOTO(out_unlock, rc = 0);
- }
again:
if (S_ISREG(child_inode->i_mode)) {
och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
if (rc != 0)
GOTO(out_close, rc);
- op_data->op_handle = och->och_fh;
- op_data->op_data = och->och_mod;
+ op_data->op_open_handle = och->och_open_handle;
op_data->op_data_version = data_version;
op_data->op_lease_handle = och->och_lease_handle;
- op_data->op_bias |= MDS_RENAME_MIGRATE;
+ op_data->op_bias |= MDS_CLOSE_MIGRATE;
+
+ spin_lock(&och->och_mod->mod_open_req->rq_lock);
+ och->och_mod->mod_open_req->rq_replay = 0;
+ spin_unlock(&och->och_mod->mod_open_req->rq_lock);
}
- op_data->op_mds = mdtidx;
- op_data->op_cli_flags = CLI_MIGRATE;
- rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name,
- namelen, name, namelen, &request);
+ rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name, namelen,
+ name, namelen, &request);
if (rc == 0) {
LASSERT(request != NULL);
ll_update_times(request, parent);
/* If the server does release layout lock, then we cleanup
* the client och here, otherwise release it in out_close: */
- if (och != NULL &&
- body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
+ if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
obd_mod_put(och->och_mod);
md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp,
och);
- och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+ och->och_open_handle.cookie = DEAD_HANDLE_MAGIC;
OBD_FREE_PTR(och);
och = NULL;
}
goto again;
out_close:
- if (och != NULL) /* close the file */
+ if (och)
ll_lease_close(och, child_inode, NULL);
- if (rc == 0)
+ if (!rc)
clear_nlink(child_inode);
out_unlock:
inode_unlock(child_inode);
+ ll_finish_md_op_data(op_data);
out_iput:
iput(child_inode);
-out_free:
- ll_finish_md_op_data(op_data);
RETURN(rc);
}
#ifdef CONFIG_FS_POSIX_ACL
int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *req = NULL;
const char *name = NULL;
char *value = NULL;
- size_t size = 0;
+ size_t value_size = 0;
int rc = 0;
ENTRY;
switch (type) {
case ACL_TYPE_ACCESS:
- if (acl) {
- rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (rc)
- GOTO(out, rc);
- }
name = XATTR_NAME_POSIX_ACL_ACCESS;
+ if (acl)
+ rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
break;
+
case ACL_TYPE_DEFAULT:
- if (!S_ISDIR(inode->i_mode))
- GOTO(out, rc = acl ? -EACCES : 0);
name = XATTR_NAME_POSIX_ACL_DEFAULT;
+ if (!S_ISDIR(inode->i_mode))
+ rc = acl ? -EACCES : 0;
break;
+
default:
- GOTO(out, rc = -EINVAL);
+ rc = -EINVAL;
+ break;
}
+ if (rc)
+ return rc;
if (acl) {
- size = posix_acl_xattr_size(acl->a_count);
- value = kmalloc(size, GFP_NOFS);
+ value_size = posix_acl_xattr_size(acl->a_count);
+ value = kmalloc(value_size, GFP_NOFS);
if (value == NULL)
GOTO(out, rc = -ENOMEM);
- rc = posix_acl_to_xattr(&init_user_ns, acl, value, size);
+ rc = posix_acl_to_xattr(&init_user_ns, acl, value, value_size);
if (rc < 0)
- GOTO(out_free, rc);
+ GOTO(out_value, rc);
}
- /* dentry is only used for *.lov attributes so it's safe to be NULL */
- rc = __vfs_setxattr(NULL, inode, name, value, size, XATTR_CREATE);
-out_free:
+ rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
+ value ? OBD_MD_FLXATTR : OBD_MD_FLXATTRRM,
+ name, value, value_size, 0, 0, &req);
+
+ ptlrpc_req_finished(req);
+out_value:
kfree(value);
out:
- if (!rc)
- set_cached_acl(inode, type, acl);
- else
+ if (rc)
forget_cached_acl(inode, type);
+ else
+ set_cached_acl(inode, type, acl);
RETURN(rc);
}
#endif /* CONFIG_FS_POSIX_ACL */
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc == 0)
rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- OBD_MD_FLXATTR, XATTR_NAME_LOV, NULL, 0,
- lmmsize, 0, &req);
+ OBD_MD_FLXATTR, XATTR_NAME_LOV, lmmsize, &req);
if (rc < 0)
RETURN(rc);