*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/llite/file.c
*
#include <linux/user_namespace.h>
#include <linux/uidgid.h>
#include <linux/falloc.h>
+#include <linux/ktime.h>
#include <uapi/linux/lustre/lustre_ioctl.h>
#include <uapi/linux/llcrypt.h>
op_data->op_xvalid |= OP_XVALID_CTIME_SET;
op_data->op_attr_blocks = inode->i_blocks;
op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
- if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT))
+ if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
op_data->op_open_handle = och->och_open_handle;
if (och->och_flags & FMODE_WRITE &&
- ll_file_test_and_clear_flag(ll_i2info(inode), LLIF_DATA_MODIFIED))
+ test_and_clear_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags))
/* For HSM: if inode data has been modified, pack it so that
* MDT can set data dirty flag in the archive. */
op_data->op_bias |= MDS_DATA_MODIFIED;
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
+ mutex_lock(&lli->lli_och_mutex);
if (fd->fd_lease_och != NULL) {
bool lease_broken;
+ struct obd_client_handle *lease_och;
+
+ lease_och = fd->fd_lease_och;
+ fd->fd_lease_och = NULL;
+ mutex_unlock(&lli->lli_och_mutex);
/* Usually the lease is not released when the
* application crashed, we need to release here. */
- rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken);
- CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n",
- PFID(&lli->lli_fid), rc, lease_broken);
+ rc = ll_lease_close(lease_och, inode, &lease_broken);
- fd->fd_lease_och = NULL;
+ mutex_lock(&lli->lli_och_mutex);
+
+ CDEBUG_LIMIT(rc ? D_ERROR : D_INODE,
+ "Clean up lease "DFID" %d/%d\n",
+ PFID(&lli->lli_fid), rc, lease_broken);
}
if (fd->fd_och != NULL) {
- rc = ll_close_inode_openhandle(inode, fd->fd_och, 0, NULL);
+ struct obd_client_handle *och;
+
+ och = fd->fd_och;
fd->fd_och = NULL;
+ mutex_unlock(&lli->lli_och_mutex);
+
+ rc = ll_close_inode_openhandle(inode, och, 0, NULL);
GOTO(out, rc);
}
/* Let's see if we have good enough OPEN lock on the file and if
we can skip talking to MDS */
- mutex_lock(&lli->lli_och_mutex);
if (fd->fd_omode & FMODE_WRITE) {
lockmode = LCK_CW;
LASSERT(lli->lli_open_fd_write_count);
if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd)
ll_deauthorize_statahead(inode, fd);
- if (inode->i_sb->s_root == file_dentry(file)) {
+ if (is_root_inode(inode)) {
file->private_data = NULL;
ll_file_data_put(fd);
GOTO(out, rc = 0);
lli->lli_async_rc = 0;
}
+ lli->lli_close_fd_time = ktime_get();
+
rc = ll_md_close(inode, file);
if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
libcfs_debug_dumplog();
out:
- if (!rc && inode->i_sb->s_root != file_dentry(file))
+ if (!rc && !is_root_inode(inode))
ll_stats_ops_tally(sbi, LPROC_LL_RELEASE,
ktime_us_delta(ktime_get(), kstart));
RETURN(rc);
if (!llcrypt_has_encryption_key(inode))
CDEBUG(D_SEC, "no enc key for "DFID"\n",
PFID(ll_inode2fid(inode)));
- /* decrypt only if page is not empty */
- else if (memcmp(page_address(page),
- page_address(ZERO_PAGE(0)),
- PAGE_SIZE) != 0)
- rc = llcrypt_decrypt_pagecache_blocks(page,
- PAGE_SIZE,
- 0);
+ else {
+ unsigned int offs = 0;
+
+ while (offs < PAGE_SIZE) {
+ /* decrypt only if page is not empty */
+ if (memcmp(page_address(page) + offs,
+ page_address(ZERO_PAGE(0)),
+ LUSTRE_ENCRYPTION_UNIT_SIZE) == 0)
+ break;
+
+ rc = llcrypt_decrypt_pagecache_blocks(page,
+ LUSTRE_ENCRYPTION_UNIT_SIZE,
+ offs);
+ if (rc)
+ break;
+
+ offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
+ }
+ }
}
unlock_page(page);
return rc;
}
-void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req,
- struct lookup_intent *it)
+void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req)
{
+ struct lu_env *env;
+ struct cl_io *io;
struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
struct address_space *mapping = inode->i_mapping;
char *data;
unsigned long index, start;
struct niobuf_local lnb;
+ __u16 refcheck;
+ int rc;
ENTRY;
RETURN_EXIT;
}
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN_EXIT;
+ io = vvp_env_thread_io(env);
+ io->ci_obj = obj;
+ io->ci_ignore_layout = 1;
+ rc = cl_io_init(env, io, CIT_MISC, obj);
+ if (rc)
+ GOTO(out_io, rc);
+
CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n",
rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size);
LASSERT((lnb.lnb_file_offset & ~PAGE_MASK) == 0);
lnb.lnb_page_offset = 0;
do {
+ struct cl_page *page;
+
lnb.lnb_data = data + (index << PAGE_SHIFT);
lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
if (lnb.lnb_len > PAGE_SIZE)
PTR_ERR(vmpage));
break;
}
+ lock_page(vmpage);
+ if (vmpage->mapping == NULL) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ /* page was truncated */
+ break;
+ }
+ /* attach VM page to CL page cache */
+ page = cl_page_find(env, obj, vmpage->index, vmpage,
+ CPT_CACHEABLE);
+ if (IS_ERR(page)) {
+ ClearPageUptodate(vmpage);
+ unlock_page(vmpage);
+ put_page(vmpage);
+ break;
+ }
+ cl_page_export(env, page, 1);
+ cl_page_put(env, page);
+ unlock_page(vmpage);
put_page(vmpage);
index++;
} while (rnb->rnb_len > (index << PAGE_SHIFT));
+
+out_io:
+ cl_io_fini(env, io);
+ cl_env_put(env, &refcheck);
+
EXIT;
}
}
op_data = ll_prep_md_op_data(NULL, parent->d_inode, de->d_inode,
- name, len, 0, LUSTRE_OPC_ANY, NULL);
+ name, len, 0, LUSTRE_OPC_OPEN, NULL);
if (IS_ERR(op_data)) {
kfree(name);
RETURN(PTR_ERR(op_data));
op_data->op_data = lmm;
op_data->op_data_size = lmmsize;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LLITE_OPEN_DELAY, cfs_fail_val);
+
rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req,
&ll_md_blocking_ast, 0);
kfree(name);
GOTO(out, rc);
}
- rc = ll_prep_inode(&de->d_inode, req, NULL, itp);
+ rc = ll_prep_inode(&de->d_inode, &req->rq_pill, NULL, itp);
if (!rc && itp->it_lock_mode) {
- struct lustre_handle handle = {.cookie = itp->it_lock_handle};
- struct ldlm_lock *lock;
- bool has_dom_bit = false;
+ __u64 bits = 0;
/* If we got a lock back and it has a LOOKUP bit set,
* make sure the dentry is marked as valid so we can find it.
* We don't need to care about actual hashing since other bits
* of kernel will deal with that later.
*/
- lock = ldlm_handle2lock(&handle);
- if (lock) {
- has_dom_bit = ldlm_has_dom(lock);
- if (lock->l_policy_data.l_inodebits.bits &
- MDS_INODELOCK_LOOKUP)
- d_lustre_revalidate(de);
-
- LDLM_LOCK_PUT(lock);
+ ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, &bits);
+ if (bits & MDS_INODELOCK_LOOKUP) {
+ d_lustre_revalidate(de);
+ ll_update_dir_depth(parent->d_inode, de->d_inode);
}
- ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, NULL);
- if (has_dom_bit)
- ll_dom_finish_open(de->d_inode, req, itp);
+
+ /* if DoM bit returned along with LAYOUT bit then there
+ * can be read-on-open data returned.
+ */
+ if (bits & MDS_INODELOCK_DOM && bits & MDS_INODELOCK_LAYOUT)
+ ll_dom_finish_open(de->d_inode, req);
}
out:
ptlrpc_req_finished(req);
ll_intent_drop_lock(itp);
- /* We did open by fid, but by the time we got to the server,
- * the object disappeared. If this is a create, we cannot really
- * tell the userspace that the file it was trying to create
- * does not exist. Instead let's return -ESTALE, and the VFS will
- * retry the create with LOOKUP_REVAL that we are going to catch
- * in ll_revalidate_dentry() and use lookup then.
+ /* We did open by fid, but by the time we got to the server, the object
+ * disappeared. This is possible if the object was unlinked, but it's
+ * also possible if the object was unlinked by a rename. In the case
+ * of an object renamed over our existing one, we can't fail this open.
+ * O_CREAT also goes through this path if we had an existing dentry,
+ * and it's obviously wrong to return ENOENT for O_CREAT.
+ *
+ * Instead let's return -ESTALE, and the VFS will retry the open with
+ * LOOKUP_REVAL, which we catch in ll_revalidate_dentry and fail to
+ * revalidate, causing a lookup. This causes extra lookups in the case
+ * where we had a dentry in cache but the file is being unlinked and we
+ * lose the race with unlink, but this should be very rare.
*/
- if (rc == -ENOENT && itp->it_op & IT_CREAT)
+ if (rc == -ENOENT)
rc = -ESTALE;
RETURN(rc);
file->private_data = fd;
ll_readahead_init(inode, &fd->fd_ras);
fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
-
- /* ll_cl_context initialize */
- rwlock_init(&fd->fd_lock);
- INIT_LIST_HEAD(&fd->fd_lccs);
+ /* turn off the kernel's read-ahead */
+ file->f_ra.ra_pages = 0;
RETURN(0);
}
+void ll_track_file_opens(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+
+ /* do not skew results with delays from never-opened inodes */
+ if (ktime_to_ns(lli->lli_close_fd_time))
+ ll_stats_ops_tally(sbi, LPROC_LL_INODE_OPCLTM,
+ ktime_us_delta(ktime_get(), lli->lli_close_fd_time));
+
+ if (ktime_after(ktime_get(),
+ ktime_add_ms(lli->lli_close_fd_time,
+ sbi->ll_oc_max_ms))) {
+ lli->lli_open_fd_count = 1;
+ lli->lli_close_fd_time = ns_to_ktime(0);
+ } else {
+ lli->lli_open_fd_count++;
+ }
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_OCOUNT,
+ lli->lli_open_fd_count);
+}
+
/* Open a file, and (for the very first open) create objects on the OSTs at
* this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
* creation or open until ll_lov_setstripe() ioctl is called.
if (S_ISDIR(inode->i_mode))
ll_authorize_statahead(inode, fd);
- if (inode->i_sb->s_root == file_dentry(file)) {
+ ll_track_file_opens(inode);
+ if (is_root_inode(inode)) {
file->private_data = fd;
RETURN(0);
}
LASSERT(*och_usecount == 0);
if (!it->it_disposition) {
struct dentry *dentry = file_dentry(file);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_dentry_data *ldd;
/* We cannot just request lock handle now, new ELC code
* handle to be returned from LOOKUP|OPEN request,
* for example if the target entry was a symlink.
*
- * Only fetch MDS_OPEN_LOCK if this is in NFS path,
- * marked by a bit set in ll_iget_for_nfs. Clear the
- * bit so that it's not confusing later callers.
+ * In NFS path we know there's pathologic behavior
+ * so we always enable open lock caching when coming
+ * from there. It's detected by setting a flag in
+ * ll_iget_for_nfs.
*
- * NB; when ldd is NULL, it must have come via normal
- * lookup path only, since ll_iget_for_nfs always calls
- * ll_d_init().
+ * After reaching number of opens of this inode
+ * we always ask for an open lock on it to handle
+ * bad userspace actors that open and close files
+ * in a loop for absolutely no good reason
*/
+
ldd = ll_d2d(dentry);
- if (ldd && ldd->lld_nfs_dentry) {
+ if (filename_is_volatile(dentry->d_name.name,
+ dentry->d_name.len,
+ NULL)) {
+ /* There really is nothing here, but this
+ * make this more readable I think.
+ * We do not want openlock for volatile
+ * files under any circumstances
+ */
+ } else if (ldd && ldd->lld_nfs_dentry) {
+ /* NFS path. This also happens to catch
+ * open by fh files I guess
+ */
+ it->it_flags |= MDS_OPEN_LOCK;
+ /* clear the flag for future lookups */
ldd->lld_nfs_dentry = 0;
- if (!filename_is_volatile(dentry->d_name.name,
- dentry->d_name.len,
- NULL))
+ } else if (sbi->ll_oc_thrsh_count > 0) {
+ /* Take MDS_OPEN_LOCK with many opens */
+ if (lli->lli_open_fd_count >=
+ sbi->ll_oc_thrsh_count)
+ it->it_flags |= MDS_OPEN_LOCK;
+
+ /* If this is open after we just closed */
+ else if (ktime_before(ktime_get(),
+ ktime_add_ms(lli->lli_close_fd_time,
+ sbi->ll_oc_thrsh_ms)))
it->it_flags |= MDS_OPEN_LOCK;
}
mutex_unlock(&lli->lli_och_mutex);
- /* lockless for direct IO so that it can do IO in parallel */
- if (file->f_flags & O_DIRECT)
- fd->fd_flags |= LL_FILE_LOCKLESS_IO;
fd = NULL;
/* Must do this outside lli_och_mutex lock to prevent deadlock where
/* already get lease, handle lease lock */
ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- if (it.it_lock_mode == 0 ||
- it.it_lock_bits != MDS_INODELOCK_OPEN) {
+ if (!it.it_lock_mode ||
+ !(it.it_lock_bits & MDS_INODELOCK_OPEN)) {
/* open lock must return for lease */
CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
PFID(ll_inode2fid(inode)), it.it_lock_mode,
* POSIX. Solving this problem needs to send an RPC to MDT for each
* read, this will hurt performance.
*/
- if (ll_file_test_and_clear_flag(lli, LLIF_UPDATE_ATIME) ||
+ if (test_and_clear_bit(LLIF_UPDATE_ATIME, &lli->lli_flags) ||
inode->i_atime.tv_sec < lli->lli_atime)
inode->i_atime.tv_sec = lli->lli_atime;
IS_SYNC(inode));
#ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
io->u.ci_wr.wr_sync |= !!(args &&
- args->via_io_subtype == IO_NORMAL &&
- args->u.normal.via_iocb->ki_flags & IOCB_DSYNC);
+ (args->u.normal.via_iocb->ki_flags &
+ IOCB_DSYNC));
#endif
}
struct vvp_io *vio = vvp_env_io(env);
struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_file_data *fd = file->private_data;
struct range_lock range;
+ bool range_locked = false;
struct cl_io *io;
ssize_t result = 0;
int rc = 0;
- unsigned int retried = 0, ignore_lockless = 0;
+ int rc2 = 0;
+ unsigned int retried = 0, dio_lock = 0;
bool is_aio = false;
+ bool is_parallel_dio = false;
+ struct cl_dio_aio *ci_aio = NULL;
+ size_t per_bytes;
+ bool partial_io = false;
+ size_t max_io_pages, max_cached_pages;
ENTRY;
file_dentry(file)->d_name.name,
iot == CIT_READ ? "read" : "write", *ppos, count);
+ max_io_pages = PTLRPC_MAX_BRW_PAGES * OBD_MAX_RIF_DEFAULT;
+ max_cached_pages = sbi->ll_cache->ccc_lru_max;
+ if (max_io_pages > (max_cached_pages >> 2))
+ max_io_pages = max_cached_pages >> 2;
+
+ io = vvp_env_thread_io(env);
+ if (file->f_flags & O_DIRECT) {
+ if (!is_sync_kiocb(args->u.normal.via_iocb))
+ is_aio = true;
+
+ /* the kernel does not support AIO on pipes, and parallel DIO
+ * uses part of the AIO path, so we must not do parallel dio
+ * to pipes
+ */
+ is_parallel_dio = !iov_iter_is_pipe(args->u.normal.via_iter) &&
+ !is_aio;
+
+ if (!ll_sbi_has_parallel_dio(sbi))
+ is_parallel_dio = false;
+
+ ci_aio = cl_aio_alloc(args->u.normal.via_iocb,
+ ll_i2info(inode)->lli_clob);
+ if (!ci_aio)
+ GOTO(out, rc = -ENOMEM);
+ }
+
restart:
+ /**
+ * IO block size need be aware of cached page limit, otherwise
+ * if we have small max_cached_mb but large block IO issued, io
+ * could not be finished and blocked whole client.
+ */
+ if (file->f_flags & O_DIRECT)
+ per_bytes = count;
+ else
+ per_bytes = min(max_io_pages << PAGE_SHIFT, count);
+ partial_io = per_bytes < count;
io = vvp_env_thread_io(env);
ll_io_init(io, file, iot, args);
- io->ci_ignore_lockless = ignore_lockless;
+ io->ci_aio = ci_aio;
+ io->ci_dio_lock = dio_lock;
io->ci_ndelay_tried = retried;
+ io->ci_parallel_dio = is_parallel_dio;
- if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
- bool range_locked = false;
-
+ if (cl_io_rw_init(env, io, iot, *ppos, per_bytes) == 0) {
if (file->f_flags & O_APPEND)
range_lock_init(&range, 0, LUSTRE_EOF);
else
- range_lock_init(&range, *ppos, *ppos + count - 1);
+ range_lock_init(&range, *ppos, *ppos + per_bytes - 1);
vio->vui_fd = file->private_data;
- vio->vui_io_subtype = args->via_io_subtype;
-
- switch (vio->vui_io_subtype) {
- case IO_NORMAL:
- vio->vui_iter = args->u.normal.via_iter;
- vio->vui_iocb = args->u.normal.via_iocb;
- if (file->f_flags & O_DIRECT) {
- if (!is_sync_kiocb(vio->vui_iocb))
- is_aio = true;
- io->ci_aio = cl_aio_alloc(vio->vui_iocb);
- if (!io->ci_aio)
- GOTO(out, rc = -ENOMEM);
- }
- /* Direct IO reads must also take range lock,
- * or multiple reads will try to work on the same pages
- * See LU-6227 for details. */
- if (((iot == CIT_WRITE) ||
- (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
- !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n",
- RL_PARA(&range));
- rc = range_lock(&lli->lli_write_tree, &range);
- if (rc < 0)
- GOTO(out, rc);
+ vio->vui_iter = args->u.normal.via_iter;
+ vio->vui_iocb = args->u.normal.via_iocb;
+ /* Direct IO reads must also take range lock,
+ * or multiple reads will try to work on the same pages
+ * See LU-6227 for details.
+ */
+ if (((iot == CIT_WRITE) ||
+ (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
+ !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n",
+ RL_PARA(&range));
+ rc = range_lock(&lli->lli_write_tree, &range);
+ if (rc < 0)
+ GOTO(out, rc);
- range_locked = true;
- }
- break;
- case IO_SPLICE:
- vio->u.splice.vui_pipe = args->u.splice.via_pipe;
- vio->u.splice.vui_flags = args->u.splice.via_flags;
- break;
- default:
- CERROR("unknown IO subtype %u\n", vio->vui_io_subtype);
- LBUG();
+ range_locked = true;
}
- ll_cl_add(file, env, io, LCC_RW);
+ ll_cl_add(inode, env, io, LCC_RW);
rc = cl_io_loop(env, io);
- ll_cl_remove(file, env);
+ ll_cl_remove(inode, env);
- if (range_locked) {
+ if (range_locked && !is_parallel_dio) {
CDEBUG(D_VFSTRACE, "Range unlock "RL_FMT"\n",
RL_PARA(&range));
range_unlock(&lli->lli_write_tree, &range);
+ range_locked = false;
}
} else {
/* cl_io_rw_init() handled IO */
rc = io->ci_result;
}
+ /* N/B: parallel DIO may be disabled during i/o submission;
+ * if that occurs, async RPCs are resolved before we get here, and this
+ * wait call completes immediately.
+ */
+ if (is_parallel_dio) {
+ struct cl_sync_io *anchor = &io->ci_aio->cda_sync;
+
+ /* for dio, EIOCBQUEUED is an implementation detail,
+ * and we don't return it to userspace
+ */
+ if (rc == -EIOCBQUEUED)
+ rc = 0;
+
+ rc2 = cl_sync_io_wait_recycle(env, anchor, 0, 0);
+ if (rc2 < 0)
+ rc = rc2;
+
+ if (range_locked) {
+ range_unlock(&lli->lli_write_tree, &range);
+ range_locked = false;
+ }
+ }
+
/*
* In order to move forward AIO, ci_nob was increased,
* but that doesn't mean io have been finished, it just
* EIOCBQUEUED to the caller, So we could only return
* number of bytes in non-AIO case.
*/
- if (io->ci_nob > 0 && !is_aio) {
- result += io->ci_nob;
- count -= io->ci_nob;
- *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
+ if (io->ci_nob > 0) {
+ if (!is_aio) {
+ if (rc2 == 0) {
+ result += io->ci_nob;
+ *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
+ } else if (rc2) {
+ result = 0;
+ }
+ }
+ count -= io->ci_nob;
/* prepare IO restart */
- if (count > 0 && args->via_io_subtype == IO_NORMAL)
+ if (count > 0)
args->u.normal.via_iter = vio->vui_iter;
- }
-out:
- if (io->ci_aio) {
- /**
- * Drop one extra reference so that end_io() could be
- * called for this IO context, we could call it after
- * we make sure all AIO requests have been proceed.
- */
- cl_sync_io_note(env, &io->ci_aio->cda_sync,
- rc == -EIOCBQUEUED ? 0 : rc);
- if (!is_aio) {
- cl_aio_free(io->ci_aio);
- io->ci_aio = NULL;
+
+ if (partial_io) {
+ /**
+ * Reexpand iov count because it was zero
+ * after IO finish.
+ */
+ iov_iter_reexpand(vio->vui_iter, count);
+ if (per_bytes == io->ci_nob)
+ io->ci_need_restart = 1;
}
}
+out:
cl_io_fini(env, io);
CDEBUG(D_VFSTRACE,
*ppos, count, result, rc);
/* preserve the tried count for FLR */
retried = io->ci_ndelay_tried;
- ignore_lockless = io->ci_ignore_lockless;
+ dio_lock = io->ci_dio_lock;
goto restart;
}
+ if (io->ci_aio) {
+ /*
+ * VFS will call aio_complete() if no -EIOCBQUEUED
+ * is returned for AIO, so we can not call aio_complete()
+ * in our end_io().
+ */
+ if (rc != -EIOCBQUEUED)
+ io->ci_aio->cda_no_aio_complete = 1;
+ /**
+ * Drop one extra reference so that end_io() could be
+ * called for this IO context, we could call it after
+ * we make sure all AIO requests have been proceed.
+ */
+ cl_sync_io_note(env, &io->ci_aio->cda_sync,
+ rc == -EIOCBQUEUED ? 0 : rc);
+ if (!is_aio) {
+ cl_aio_free(env, io->ci_aio);
+ io->ci_aio = NULL;
+ }
+ }
+
if (iot == CIT_READ) {
if (result > 0)
ll_stats_ops_tally(ll_i2sbi(inode),
if (IS_ERR(env))
return PTR_ERR(env);
- args = ll_env_args(env, IO_NORMAL);
+ args = ll_env_args(env);
args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
if (result > 0) {
ll_heat_add(inode, CIT_WRITE, result);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_WRITE_BYTES,
- result);
- ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
+ set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
}
CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count);
if (IS_ERR(env))
return PTR_ERR(env);
- args = ll_env_args(env, IO_NORMAL);
+ args = ll_env_args(env);
args->u.normal.via_iter = from;
args->u.normal.via_iocb = iocb;
}
#endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
-/*
- * Send file content (through pagecache) somewhere with helper
- */
-static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t count,
- unsigned int flags)
-{
- struct lu_env *env;
- struct vvp_io_args *args;
- ssize_t result;
- __u16 refcheck;
- bool cached;
-
- ENTRY;
-
- result = pcc_file_splice_read(in_file, ppos, pipe,
- count, flags, &cached);
- if (cached)
- RETURN(result);
-
- ll_ras_enter(in_file, *ppos, count);
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN(PTR_ERR(env));
-
- args = ll_env_args(env, IO_SPLICE);
- args->u.splice.via_pipe = pipe;
- args->u.splice.via_flags = flags;
-
- result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
- cl_env_put(env, &refcheck);
-
- if (result > 0)
- ll_rw_stats_tally(ll_i2sbi(file_inode(in_file)), current->pid,
- in_file->private_data, *ppos, result,
- READ);
- RETURN(result);
-}
-
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
__u64 flags, struct lov_user_md *lum, int lum_size)
{
struct lov_mds_md **lmmp, int *lmm_size,
struct ptlrpc_request **request)
{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct mdt_body *body;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *req = NULL;
- struct md_op_data *op_data;
- int rc, lmmsize;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct mdt_body *body;
+ struct lov_mds_md *lmm = NULL;
+ struct ptlrpc_request *req = NULL;
+ struct md_op_data *op_data;
+ int rc, lmmsize;
+
+ ENTRY;
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc)
RETURN(rc);
- op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
- strlen(filename), lmmsize,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
+ strlen(filename), lmmsize,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
- op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
- rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr_name failed "
- "on %s: rc %d\n", filename, rc);
- GOTO(out, rc);
- }
+ op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
+ rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
+ if (rc < 0) {
+ CDEBUG(D_INFO, "md_getattr_name failed on %s: rc %d\n",
+ filename, rc);
+ GOTO(out, rc);
+ }
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL); /* checked by mdc_getattr_name */
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body != NULL); /* checked by mdc_getattr_name */
lmmsize = body->mbo_eadatasize;
if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
- lmmsize == 0) {
- GOTO(out, rc = -ENODATA);
- }
+ lmmsize == 0)
+ GOTO(out, rc = -ENODATA);
- lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
- LASSERT(lmm != NULL);
+ lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
+ LASSERT(lmm != NULL);
if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) &&
lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) &&
/*
* This is coming from the MDS, so is probably in
- * little endian. We convert it to host endian before
+ * little endian. We convert it to host endian before
* passing it to userspace.
*/
- if ((lmm->lmm_magic & __swab32(LOV_MAGIC_MAGIC)) ==
- __swab32(LOV_MAGIC_MAGIC)) {
+ if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
int stripe_count = 0;
if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
if (le32_to_cpu(lmm->lmm_pattern) &
LOV_PATTERN_F_RELEASED)
stripe_count = 0;
- }
-
- lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
+ lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
- /* if function called for directory - we should
- * avoid swab not existent lsm objects */
- if (lmm->lmm_magic == LOV_MAGIC_V1 && S_ISREG(body->mbo_mode))
- lustre_swab_lov_user_md_objects(
+ /* if function called for directory - we should
+ * avoid swab not existent lsm objects
+ */
+ if (lmm->lmm_magic == LOV_MAGIC_V1 &&
+ S_ISREG(body->mbo_mode))
+ lustre_swab_lov_user_md_objects(
((struct lov_user_md_v1 *)lmm)->lmm_objects,
stripe_count);
- else if (lmm->lmm_magic == LOV_MAGIC_V3 &&
- S_ISREG(body->mbo_mode))
- lustre_swab_lov_user_md_objects(
+ else if (lmm->lmm_magic == LOV_MAGIC_V3 &&
+ S_ISREG(body->mbo_mode))
+ lustre_swab_lov_user_md_objects(
((struct lov_user_md_v3 *)lmm)->lmm_objects,
stripe_count);
+ } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
+ lustre_swab_lov_comp_md_v1(
+ (struct lov_comp_md_v1 *)lmm);
+ }
}
+ if (lmm->lmm_magic == LOV_MAGIC_COMP_V1) {
+ struct lov_comp_md_v1 *comp_v1 = NULL;
+ struct lov_comp_md_entry_v1 *ent;
+ struct lov_user_md_v1 *v1;
+ __u32 off;
+ int i = 0;
+
+ comp_v1 = (struct lov_comp_md_v1 *)lmm;
+ /* Dump the striping information */
+ for (; i < comp_v1->lcm_entry_count; i++) {
+ ent = &comp_v1->lcm_entries[i];
+ off = ent->lcme_offset;
+ v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
+ CDEBUG(D_INFO,
+ "comp[%d]: stripe_count=%u, stripe_size=%u\n",
+ i, v1->lmm_stripe_count, v1->lmm_stripe_size);
+ }
+
+ /**
+ * Return valid stripe_count and stripe_size instead of 0 for
+ * DoM files to avoid divide-by-zero for older userspace that
+ * calls this ioctl, e.g. lustre ADIO driver.
+ */
+ if (lmm->lmm_stripe_count == 0)
+ lmm->lmm_stripe_count = 1;
+ if (lmm->lmm_stripe_size == 0) {
+ /* Since the first component of the file data is placed
+ * on the MDT for faster access, the stripe_size of the
+ * second one is always that applications which are
+ * doing large IOs.
+ */
+ if (lmm->lmm_pattern == LOV_PATTERN_MDT)
+ i = comp_v1->lcm_entry_count > 1 ? 1 : 0;
+ else
+ i = comp_v1->lcm_entry_count > 1 ?
+ comp_v1->lcm_entry_count - 1 : 0;
+ ent = &comp_v1->lcm_entries[i];
+ off = ent->lcme_offset;
+ v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
+ lmm->lmm_stripe_size = v1->lmm_stripe_size;
+ }
+ }
out:
*lmmp = lmm;
*lmm_size = lmmsize;
*request = req;
- return rc;
+ RETURN(rc);
}
static int ll_lov_setea(struct inode *inode, struct file *file,
int rc;
ENTRY;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN))
RETURN(-EPERM);
OBD_ALLOC_LARGE(lump, lum_size);
GOTO(out, rc = PTR_ERR(env));
rc = cl_object_layout_get(env, obj, &cl);
- if (!rc && cl.cl_is_composite)
+ if (rc >= 0 && cl.cl_is_composite)
rc = ll_layout_write_intent(inode, LAYOUT_INTENT_WRITE,
&ext);
cl_env_put(env, &refcheck);
- if (rc)
+ if (rc < 0)
GOTO(out, rc);
}
*/
int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
{
- struct inode *inode = dentry->d_inode;
- struct obd_client_handle *och;
- int rc;
- ENTRY;
+ struct inode *inode = dentry->d_inode;
+ struct obd_client_handle *och;
+ int rc;
+ ENTRY;
- LASSERT(inode);
+ LASSERT(inode);
- /* Root ? Do nothing. */
- if (dentry->d_inode->i_sb->s_root == dentry)
- RETURN(0);
+ /* Root ? Do nothing. */
+ if (is_root_inode(inode))
+ RETURN(0);
- /* No open handle to close? Move away */
- if (!it_disposition(it, DISP_OPEN_OPEN))
- RETURN(0);
+ /* No open handle to close? Move away */
+ if (!it_disposition(it, DISP_OPEN_OPEN))
+ RETURN(0);
- LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
+ LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
- OBD_ALLOC(och, sizeof(*och));
- if (!och)
- GOTO(out, rc = -ENOMEM);
+ OBD_ALLOC(och, sizeof(*och));
+ if (!och)
+ GOTO(out, rc = -ENOMEM);
rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
if (rc)
ENTRY;
- if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
- !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
+ if (!capable(CAP_DAC_READ_SEARCH) &&
+ !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
RETURN(-EPERM);
/* Only need to get the buflen */
/* Non-root users are forbidden to set or clear flags which are
* NOT defined in HSM_USER_MASK. */
if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
- !cfs_capable(CFS_CAP_SYS_ADMIN))
+ !capable(CAP_SYS_ADMIN))
RETURN(-EPERM);
if (!exp_connect_archive_id_array(exp)) {
descr->cld_mode = cl_mode;
/* CEF_MUST is used because we do not want to convert a
* lockahead request to a lockless lock */
- descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND |
- CEF_NONBLOCK;
+ descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND;
if (ladvise->lla_peradvice_flags & LF_ASYNC)
descr->cld_enq_flags |= CEF_SPECULATIVE;
RETURN(-EFAULT);
fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags);
- if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT))
+ if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
if (copy_to_user((struct fsxattr __user *)arg,
RETURN(0);
}
-int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+int ll_ioctl_check_project(struct inode *inode, __u32 xflags,
+ __u32 projid)
{
/*
* Project Quota ID state is only allowed to change from within the init
* namespace. Enforce that restriction only if we are trying to change
* the quota ID state. Everything else is allowed in user namespaces.
*/
- if (current_user_ns() == &init_user_ns)
+ if (current_user_ns() == &init_user_ns) {
+ /*
+ * Caller is allowed to change the project ID. if it is being
+ * changed, make sure that the new value is valid.
+ */
+ if (ll_i2info(inode)->lli_projid != projid &&
+ !projid_valid(make_kprojid(&init_user_ns, projid)))
+ return -EINVAL;
+
return 0;
+ }
- if (ll_i2info(inode)->lli_projid != fa->fsx_projid)
+ if (ll_i2info(inode)->lli_projid != projid)
return -EINVAL;
- if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT)) {
- if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
+ if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) {
+ if (!(xflags & FS_XFLAG_PROJINHERIT))
return -EINVAL;
} else {
- if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ if (xflags & FS_XFLAG_PROJINHERIT)
return -EINVAL;
}
return 0;
}
-int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
- unsigned long arg)
+static int ll_set_project(struct inode *inode, __u32 xflags, __u32 projid)
{
-
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
- int rc = 0;
- struct fsxattr fsxattr;
struct cl_object *obj;
- struct iattr *attr;
- int flags;
-
- if (copy_from_user(&fsxattr,
- (const struct fsxattr __user *)arg,
- sizeof(fsxattr)))
- RETURN(-EFAULT);
+ unsigned int inode_flags;
+ int rc = 0;
- rc = ll_ioctl_check_project(inode, &fsxattr);
+ rc = ll_ioctl_check_project(inode, xflags, projid);
if (rc)
RETURN(rc);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags);
- op_data->op_attr_flags = ll_inode_to_ext_flags(flags);
- if (fsxattr.fsx_xflags & FS_XFLAG_PROJINHERIT)
+ inode_flags = ll_xflags_to_inode_flags(xflags);
+ op_data->op_attr_flags = ll_inode_to_ext_flags(inode_flags);
+ if (xflags & FS_XFLAG_PROJINHERIT)
op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
- op_data->op_projid = fsxattr.fsx_projid;
+ op_data->op_projid = projid;
op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
- rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL,
- 0, &req);
+ rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, &req);
ptlrpc_req_finished(req);
if (rc)
GOTO(out_fsxattr, rc);
ll_update_inode_flags(inode, op_data->op_attr_flags);
- obj = ll_i2info(inode)->lli_clob;
- if (obj == NULL)
- GOTO(out_fsxattr, rc);
- /* Avoiding OST RPC if this is only project ioctl */
- if (fsxattr.fsx_xflags == 0 ||
- fsxattr.fsx_xflags == FS_XFLAG_PROJINHERIT)
+ /* Avoid OST RPC if this is only ioctl setting project inherit flag */
+ if (xflags == 0 || xflags == FS_XFLAG_PROJINHERIT)
GOTO(out_fsxattr, rc);
- OBD_ALLOC_PTR(attr);
- if (attr == NULL)
- GOTO(out_fsxattr, rc = -ENOMEM);
+ obj = ll_i2info(inode)->lli_clob;
+ if (obj) {
+ struct iattr attr = { 0 };
+
+ rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS, xflags);
+ }
- rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS,
- fsxattr.fsx_xflags);
- OBD_FREE_PTR(attr);
out_fsxattr:
ll_finish_md_op_data(op_data);
RETURN(rc);
}
+int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fsxattr fsxattr;
+
+ ENTRY;
+
+ if (copy_from_user(&fsxattr,
+ (const struct fsxattr __user *)arg,
+ sizeof(fsxattr)))
+ RETURN(-EFAULT);
+
+ RETURN(ll_set_project(inode, fsxattr.fsx_xflags,
+ fsxattr.fsx_projid));
+}
+
+int ll_ioctl_project(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct lu_project lu_project;
+ struct dentry *dentry = file_dentry(file);
+ struct inode *inode = file_inode(file);
+ struct dentry *child_dentry = NULL;
+ int rc = 0, name_len;
+
+ if (copy_from_user(&lu_project,
+ (const struct lu_project __user *)arg,
+ sizeof(lu_project)))
+ RETURN(-EFAULT);
+
+ /* apply child dentry if name is valid */
+ name_len = strnlen(lu_project.project_name, NAME_MAX);
+ if (name_len > 0 && name_len <= NAME_MAX) {
+ inode_lock(inode);
+ child_dentry = lookup_one_len(lu_project.project_name,
+ dentry, name_len);
+ inode_unlock(inode);
+ if (IS_ERR(child_dentry)) {
+ rc = PTR_ERR(child_dentry);
+ goto out;
+ }
+ inode = child_dentry->d_inode;
+ if (!inode) {
+ rc = -ENOENT;
+ goto out;
+ }
+ } else if (name_len > NAME_MAX) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ switch (lu_project.project_type) {
+ case LU_PROJECT_SET:
+ rc = ll_set_project(inode, lu_project.project_xflags,
+ lu_project.project_id);
+ break;
+ case LU_PROJECT_GET:
+ lu_project.project_xflags =
+ ll_inode_flags_to_xflags(inode->i_flags);
+ if (test_bit(LLIF_PROJECT_INHERIT,
+ &ll_i2info(inode)->lli_flags))
+ lu_project.project_xflags |= FS_XFLAG_PROJINHERIT;
+ lu_project.project_id = ll_i2info(inode)->lli_projid;
+ if (copy_to_user((struct lu_project __user *)arg,
+ &lu_project, sizeof(lu_project))) {
+ rc = -EFAULT;
+ goto out;
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+out:
+ if (!IS_ERR_OR_NULL(child_dentry))
+ dput(child_dentry);
+ RETURN(rc);
+}
+
static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc,
unsigned long arg)
{
if (!layout_file)
GOTO(out_lease_close, rc = -EBADF);
+ /* if layout_file == file, it means to destroy the mirror */
sp.sp_inode = file_inode(layout_file);
sp.sp_mirror_id = (__u16)mirror_id;
data = &sp;
if (ioc->lil_count != 1)
RETURN(-EINVAL);
+ if (IS_ENCRYPTED(inode))
+ RETURN(-EOPNOTSUPP);
+
arg += sizeof(*ioc);
if (copy_from_user(¶m.pa_archive_id, (void __user *)arg,
sizeof(__u32)))
case LL_LEASE_LAYOUT_SPLIT:
if (layout_file)
fput(layout_file);
+
+ ll_layout_refresh(inode, &fd->fd_layout_version);
break;
case LL_LEASE_PCC_ATTACH:
if (!rc)
RETURN(0);
}
+ case OBD_IOC_GETNAME_OLD:
+ /* fall through */
case OBD_IOC_GETDTNAME:
+ /* fall through */
case OBD_IOC_GETMDNAME:
RETURN(ll_get_obd_name(inode, cmd, arg));
case LL_IOC_HSM_STATE_GET: {
RETURN(rc);
}
case LL_IOC_HSM_ACTION: {
- struct md_op_data *op_data;
- struct hsm_current_action *hca;
- int rc;
+ struct md_op_data *op_data;
+ struct hsm_current_action *hca;
+ const char *action;
+ int rc;
OBD_ALLOC_PTR(hca);
if (hca == NULL)
rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
op_data, NULL);
+ if (rc < 0)
+ GOTO(skip_copy, rc);
+
+ /* The hsm_current_action retreived from the server could
+ * contain corrupt information. If it is incorrect data collect
+ * debug information. We still send the data even if incorrect
+ * to user land to handle.
+ */
+ action = hsm_user_action2name(hca->hca_action);
+ if (strcmp(action, "UNKNOWN") == 0 ||
+ hca->hca_state > HPS_DONE) {
+ CDEBUG(D_HSM,
+ "HSM current state %s action %s, offset = %llu, length %llu\n",
+ hsm_progress_state2name(hca->hca_state), action,
+ hca->hca_location.offset, hca->hca_location.length);
+ }
if (copy_to_user((char __user *)arg, hca, sizeof(*hca)))
rc = -EFAULT;
-
+skip_copy:
ll_finish_md_op_data(op_data);
OBD_FREE_PTR(hca);
RETURN(rc);
RETURN(ll_ioctl_fsgetxattr(inode, cmd, arg));
case FS_IOC_FSSETXATTR:
RETURN(ll_ioctl_fssetxattr(inode, cmd, arg));
+ case LL_IOC_PROJECT:
+ RETURN(ll_ioctl_project(file, cmd, arg));
case BLKSSZGET:
RETURN(put_user(PAGE_SIZE, (int __user *)arg));
case LL_IOC_HEAT_GET: {
return -EOPNOTSUPP;
return llcrypt_ioctl_get_key_status(file, (void __user *)arg);
#endif
+
+ case LL_IOC_UNLOCK_FOREIGN: {
+ struct dentry *dentry = file_dentry(file);
+
+ /* if not a foreign symlink do nothing */
+ if (ll_foreign_is_removable(dentry, true)) {
+ CDEBUG(D_INFO,
+ "prevent unlink of non-foreign file ("DFID")\n",
+ PFID(ll_inode2fid(inode)));
+ RETURN(-EOPNOTSUPP);
+ }
+ RETURN(0);
+ }
+
default:
RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
(void __user *)arg));
}
}
+loff_t ll_lseek(struct file *file, loff_t offset, int whence)
+{
+ struct inode *inode = file_inode(file);
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_lseek_io *lsio;
+ __u16 refcheck;
+ int rc;
+ loff_t retval;
+
+ ENTRY;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ ll_io_set_mirror(io, file);
+
+ lsio = &io->u.ci_lseek;
+ lsio->ls_start = offset;
+ lsio->ls_whence = whence;
+ lsio->ls_result = -ENXIO;
+
+ do {
+ rc = cl_io_init(env, io, CIT_LSEEK, io->ci_obj);
+ if (!rc) {
+ struct vvp_io *vio = vvp_env_io(env);
+
+ vio->vui_fd = file->private_data;
+ rc = cl_io_loop(env, io);
+ } else {
+ rc = io->ci_result;
+ }
+ retval = rc ? : lsio->ls_result;
+ cl_io_fini(env, io);
+ } while (unlikely(io->ci_need_restart));
+
+ cl_env_put(env, &refcheck);
+
+ RETURN(retval);
+}
+
static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file_inode(file);
- loff_t retval, eof = 0;
+ loff_t retval = offset, eof = 0;
ktime_t kstart = ktime_get();
ENTRY;
- retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
- (origin == SEEK_CUR) ? file->f_pos : 0);
+
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
PFID(ll_inode2fid(inode)), inode, retval, retval,
origin);
- if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
+ if (origin == SEEK_END) {
retval = ll_glimpse_size(inode);
if (retval != 0)
RETURN(retval);
eof = i_size_read(inode);
}
- retval = generic_file_llseek_size(file, offset, origin,
- ll_file_maxbytes(inode), eof);
+ if (origin == SEEK_HOLE || origin == SEEK_DATA) {
+ if (offset < 0)
+ return -ENXIO;
+
+ /* flush local cache first if any */
+ cl_sync_file_range(inode, offset, OBD_OBJECT_EOF,
+ CL_FSYNC_LOCAL, 0);
+
+ retval = ll_lseek(file, offset, origin);
+ if (retval < 0)
+ return retval;
+ retval = vfs_setpos(file, retval, ll_file_maxbytes(inode));
+ } else {
+ retval = generic_file_llseek_size(file, offset, origin,
+ ll_file_maxbytes(inode), eof);
+ }
if (retval >= 0)
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK,
ktime_us_delta(ktime_get(), kstart));
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
- inode_lock(inode);
/* catch async errors that were recorded back when async writeback
* failed for pages in this mapping. */
fd->fd_write_failed = false;
}
- inode_unlock(inode);
-
if (!rc)
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
ktime_us_delta(ktime_get(), kstart));
int namelen, struct lu_fid *fid,
struct inode **inode)
{
- struct md_op_data *op_data = NULL;
- struct mdt_body *body;
- struct ptlrpc_request *req;
- int rc;
+ struct md_op_data *op_data = NULL;
+ struct mdt_body *body;
+ struct ptlrpc_request *req;
+ int rc;
ENTRY;
op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, 0,
*fid = body->mbo_fid1;
if (inode != NULL)
- rc = ll_prep_inode(inode, req, parent->i_sb, NULL);
+ rc = ll_prep_inode(inode, &req->rq_pill, parent->i_sb, NULL);
out_req:
ptlrpc_req_finished(req);
RETURN(rc);
* by checking the migrate FID against the FID of the
* filesystem root.
*/
- if (child_inode == parent->i_sb->s_root->d_inode)
+ if (is_root_inode(child_inode))
GOTO(out_iput, rc = -EINVAL);
+ if (IS_ENCRYPTED(child_inode)) {
+ rc = llcrypt_get_encryption_info(child_inode);
+ if (rc)
+ GOTO(out_iput, rc);
+ if (!llcrypt_has_encryption_key(child_inode)) {
+ CDEBUG(D_SEC, "no enc key for "DFID"\n",
+ PFID(ll_inode2fid(child_inode)));
+ GOTO(out_iput, rc = -ENOKEY);
+ }
+ if (unlikely(!llcrypt_policy_has_filename_enc(child_inode))) {
+ CDEBUG(D_SEC,
+ "cannot migrate old format encrypted "DFID", please move to new enc dir first\n",
+ PFID(ll_inode2fid(child_inode)));
+ GOTO(out_iput, rc = -EUCLEAN);
+ }
+ }
+
op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
- child_inode->i_mode, LUSTRE_OPC_ANY, NULL);
+ child_inode->i_mode, LUSTRE_OPC_MIGR,
+ NULL);
if (IS_ERR(op_data))
GOTO(out_iput, rc = PTR_ERR(op_data));
spin_unlock(&och->och_mod->mod_open_req->rq_lock);
}
- rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data, name, namelen,
- name, namelen, &request);
+ rc = md_rename(ll_i2sbi(parent)->ll_md_exp, op_data,
+ op_data->op_name, op_data->op_namelen,
+ op_data->op_name, op_data->op_namelen, &request);
if (rc == 0) {
LASSERT(request != NULL);
ll_update_times(request, parent);
};
struct ptlrpc_request *req = NULL;
struct md_op_data *op_data;
+ const char *name = NULL;
+ size_t namelen = 0;
int rc = 0;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%s\n",
PFID(ll_inode2fid(inode)), inode, dentry->d_name.name);
- if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID)
+ if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID) {
parent = dentry->d_parent->d_inode;
- else
+ name = dentry->d_name.name;
+ namelen = dentry->d_name.len;
+ } else {
parent = inode;
+ }
- /* Call getattr by fid, so do not provide name at all. */
- op_data = ll_prep_md_op_data(NULL, parent, inode, NULL, 0, 0,
+ op_data = ll_prep_md_op_data(NULL, parent, inode, name, namelen, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
+ /* Call getattr by fid */
+ if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID)
+ op_data->op_flags = MF_GETATTR_BY_FID;
rc = md_intent_lock(exp, op_data, &oit, &req, &ll_md_blocking_ast, 0);
ll_finish_md_op_data(op_data);
if (rc < 0) {
* do_lookup() -> ll_revalidate_it(). We cannot use d_drop
* here to preserve get_cwd functionality on 2.6.
* Bug 10503 */
- if (!dentry->d_inode->i_nlink) {
- spin_lock(&inode->i_lock);
- d_lustre_invalidate(dentry, 0);
- spin_unlock(&inode->i_lock);
- }
+ if (!dentry->d_inode->i_nlink)
+ d_lustre_invalidate(dentry);
ll_lookup_finish_locks(&oit, dentry);
out:
RETURN(0);
down_read(&lli->lli_lsm_sem);
- rc = md_merge_attr(ll_i2mdexp(inode), &lli->lli_fid, lli->lli_lsm_md,
+ rc = md_merge_attr(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md,
&attr, ll_md_blocking_ast);
up_read(&lli->lli_lsm_sem);
if (rc != 0)
RETURN(rc);
+ spin_lock(&inode->i_lock);
set_nlink(inode, attr.cat_nlink);
+ spin_unlock(&inode->i_lock);
+
inode->i_blocks = attr.cat_blocks;
i_size_write(inode, attr.cat_size);
}
int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask,
- unsigned int flags)
+ unsigned int flags, bool foreign)
{
struct inode *inode = de->d_inode;
struct ll_sb_info *sbi = ll_i2sbi(inode);
if (rc < 0)
RETURN(rc);
- if (S_ISREG(inode->i_mode)) {
+ /* foreign file/dir are always of zero length, so don't
+ * need to validate size.
+ */
+ if (S_ISREG(inode->i_mode) && !foreign) {
bool cached;
if (!need_glimpse)
* restore the MDT holds the layout lock so the glimpse will
* block up to the end of restore (getattr will block)
*/
- if (!ll_file_test_flag(lli, LLIF_FILE_RESTORING)) {
+ if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) {
rc = ll_glimpse_size(inode);
if (rc < 0)
RETURN(rc);
}
} else {
/* If object isn't regular a file then don't validate size. */
- if (ll_dir_striped(inode)) {
+ /* foreign dir is not striped dir */
+ if (ll_dir_striped(inode) && !foreign) {
rc = ll_merge_md_attr(inode);
if (rc < 0)
RETURN(rc);
stat->rdev = inode->i_rdev;
}
- stat->mode = inode->i_mode;
+ /* foreign symlink to be exposed as a real symlink */
+ if (!foreign)
+ stat->mode = inode->i_mode;
+ else
+ stat->mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
+
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
stat->atime = inode->i_atime;
int ll_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
- return ll_getattr_dentry(path->dentry, stat, request_mask, flags);
+ return ll_getattr_dentry(path->dentry, stat, request_mask, flags,
+ false);
}
#else
int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
{
return ll_getattr_dentry(de, stat, STATX_BASIC_STATS,
- AT_STATX_SYNC_AS_STAT);
+ AT_STATX_SYNC_AS_STAT, false);
}
#endif
-int cl_falloc(struct inode *inode, int mode, loff_t offset, loff_t len)
+int cl_falloc(struct file *file, struct inode *inode, int mode, loff_t offset,
+ loff_t len)
{
struct lu_env *env;
struct cl_io *io;
__u16 refcheck;
- int rc; loff_t sa_falloc_end;
+ int rc;
loff_t size = i_size_read(inode);
ENTRY;
io = vvp_env_thread_io(env);
io->ci_obj = ll_i2info(inode)->lli_clob;
+ ll_io_set_mirror(io, file);
+
io->ci_verify_layout = 1;
io->u.ci_setattr.sa_parent_fid = lu_object_fid(&io->ci_obj->co_lu);
io->u.ci_setattr.sa_falloc_mode = mode;
io->u.ci_setattr.sa_falloc_offset = offset;
- io->u.ci_setattr.sa_falloc_len = len;
- io->u.ci_setattr.sa_falloc_end = io->u.ci_setattr.sa_falloc_offset +
- io->u.ci_setattr.sa_falloc_len;
+ io->u.ci_setattr.sa_falloc_end = offset + len;
io->u.ci_setattr.sa_subtype = CL_SETATTR_FALLOCATE;
- sa_falloc_end = io->u.ci_setattr.sa_falloc_end;
- if (sa_falloc_end > size) {
+ if (io->u.ci_setattr.sa_falloc_end > size) {
+ loff_t newsize = io->u.ci_setattr.sa_falloc_end;
+
/* Check new size against VFS/VM file size limit and rlimit */
- rc = inode_newsize_ok(inode, sa_falloc_end);
+ rc = inode_newsize_ok(inode, newsize);
if (rc)
goto out;
- if (sa_falloc_end > ll_file_maxbytes(inode)) {
+ if (newsize > ll_file_maxbytes(inode)) {
CDEBUG(D_INODE, "file size too large %llu > %llu\n",
- (unsigned long long)(sa_falloc_end),
+ (unsigned long long)newsize,
ll_file_maxbytes(inode));
rc = -EFBIG;
goto out;
}
- io->u.ci_setattr.sa_attr.lvb_size = sa_falloc_end;
- if (!(mode & FALLOC_FL_KEEP_SIZE))
- io->u.ci_setattr.sa_avalid |= ATTR_SIZE;
- } else {
- io->u.ci_setattr.sa_attr.lvb_size = size;
}
-again:
- if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0)
- rc = cl_io_loop(env, io);
- else
- rc = io->ci_result;
-
- cl_io_fini(env, io);
- if (unlikely(io->ci_need_restart))
- goto again;
+ do {
+ rc = cl_io_init(env, io, CIT_SETATTR, io->ci_obj);
+ if (!rc)
+ rc = cl_io_loop(env, io);
+ else
+ rc = io->ci_result;
+ cl_io_fini(env, io);
+ } while (unlikely(io->ci_need_restart));
out:
cl_env_put(env, &refcheck);
long ll_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(filp);
int rc;
+ if (offset < 0 || len <= 0)
+ RETURN(-EINVAL);
/*
- * Only mode == 0 (which is standard prealloc) is supported now.
- * Punch is not supported yet.
+ * Encrypted inodes can't handle collapse range or zero range or insert
+ * range since we would need to re-encrypt blocks with a different IV or
+ * XTS tweak (which are based on the logical block number).
+ * Similar to what ext4 does.
*/
- if (mode & ~FALLOC_FL_KEEP_SIZE)
+ if (IS_ENCRYPTED(inode) &&
+ (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
+ FALLOC_FL_ZERO_RANGE)))
+ RETURN(-EOPNOTSUPP);
+
+ /*
+ * mode == 0 (which is standard prealloc) and PUNCH is supported
+ * Rest of mode options are not supported yet.
+ */
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
RETURN(-EOPNOTSUPP);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FALLOCATE, 1);
- rc = cl_falloc(inode, mode, offset, len);
+ rc = cl_falloc(filp, inode, mode, offset, len);
+ /*
+ * ENOTSUPP (524) is an NFSv3 specific error code erroneously
+ * used by Lustre in several places. Retuning it here would
+ * confuse applications that explicity test for EOPNOTSUPP
+ * (95) and fall back to ftruncate().
+ */
+ if (rc == -ENOTSUPP)
+ rc = -EOPNOTSUPP;
RETURN(rc);
}
rc = ll_do_fiemap(inode, fiemap, num_bytes);
+ if (IS_ENCRYPTED(inode)) {
+ int i;
+
+ for (i = 0; i < fiemap->fm_mapped_extents; i++)
+ fiemap->fm_extents[i].fe_flags |=
+ FIEMAP_EXTENT_DATA_ENCRYPTED |
+ FIEMAP_EXTENT_ENCODED;
+ }
+
fieinfo->fi_flags = fiemap->fm_flags;
fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
if (extent_count > 0 &&
return rc;
}
-struct posix_acl *ll_get_acl(struct inode *inode, int type)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct posix_acl *acl = NULL;
- ENTRY;
-
- spin_lock(&lli->lli_lock);
- /* VFS' acl_permission_check->check_acl will release the refcount */
- acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
-
- RETURN(acl);
-}
-
-#ifdef HAVE_IOP_SET_ACL
-#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
-int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- const char *name = NULL;
- char *value = NULL;
- size_t value_size = 0;
- int rc = 0;
- ENTRY;
-
- switch (type) {
- case ACL_TYPE_ACCESS:
- name = XATTR_NAME_POSIX_ACL_ACCESS;
- if (acl)
- rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- break;
-
- case ACL_TYPE_DEFAULT:
- name = XATTR_NAME_POSIX_ACL_DEFAULT;
- if (!S_ISDIR(inode->i_mode))
- rc = acl ? -EACCES : 0;
- break;
-
- default:
- rc = -EINVAL;
- break;
- }
- if (rc)
- return rc;
-
- if (acl) {
- value_size = posix_acl_xattr_size(acl->a_count);
- value = kmalloc(value_size, GFP_NOFS);
- if (value == NULL)
- GOTO(out, rc = -ENOMEM);
-
- rc = posix_acl_to_xattr(&init_user_ns, acl, value, value_size);
- if (rc < 0)
- GOTO(out_value, rc);
- }
-
- rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- value ? OBD_MD_FLXATTR : OBD_MD_FLXATTRRM,
- name, value, value_size, 0, 0, &req);
-
- ptlrpc_req_finished(req);
-out_value:
- kfree(value);
-out:
- if (rc)
- forget_cached_acl(inode, type);
- else
- set_cached_acl(inode, type, acl);
- RETURN(rc);
-}
-#endif /* CONFIG_LUSTRE_FS_POSIX_ACL */
-#endif /* HAVE_IOP_SET_ACL */
-
int ll_inode_permission(struct inode *inode, int mask)
{
int rc = 0;
struct root_squash_info *squash;
struct cred *cred = NULL;
const struct cred *old_cred = NULL;
- cfs_cap_t cap;
bool squash_id = false;
ktime_t kstart = ktime_get();
* need to do it before permission check.
*/
- if (inode == inode->i_sb->s_root->d_inode) {
+ if (is_root_inode(inode)) {
rc = ll_inode_revalidate(inode->i_sb->s_root, IT_LOOKUP);
if (rc)
RETURN(rc);
squash = &sbi->ll_squash;
if (unlikely(squash->rsi_uid != 0 &&
uid_eq(current_fsuid(), GLOBAL_ROOT_UID) &&
- !(sbi->ll_flags & LL_SBI_NOROOTSQUASH))) {
+ !test_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags))) {
squash_id = true;
}
if (squash_id) {
cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid);
cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid);
- for (cap = 0; cap < sizeof(cfs_cap_t) * 8; cap++) {
- if (BIT(cap) & CFS_CAP_FS_MASK)
- cap_lower(cred->cap_effective, cap);
- }
+ cred->cap_effective = cap_drop_nfsd_set(cred->cap_effective);
+ cred->cap_effective = cap_drop_fs_set(cred->cap_effective);
+
old_cred = override_creds(cred);
}
}
/* -o localflock - only provides locally consistent flock locks */
-struct file_operations ll_file_operations = {
+static const struct file_operations ll_file_operations = {
#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
# ifdef HAVE_SYNC_READ_WRITE
.read = new_sync_read,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+#ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
+ .splice_read = generic_file_splice_read,
+#else
+ .splice_read = pcc_file_splice_read,
+#endif
.fsync = ll_fsync,
.flush = ll_flush,
.fallocate = ll_fallocate,
};
-struct file_operations ll_file_operations_flock = {
+static const struct file_operations ll_file_operations_flock = {
#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
# ifdef HAVE_SYNC_READ_WRITE
.read = new_sync_read,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+#ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
+ .splice_read = generic_file_splice_read,
+#else
+ .splice_read = pcc_file_splice_read,
+#endif
.fsync = ll_fsync,
.flush = ll_flush,
.flock = ll_file_flock,
};
/* These are for -o noflock - to return ENOSYS on flock calls */
-struct file_operations ll_file_operations_noflock = {
+static const struct file_operations ll_file_operations_noflock = {
#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
# ifdef HAVE_SYNC_READ_WRITE
.read = new_sync_read,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+#ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
+ .splice_read = generic_file_splice_read,
+#else
+ .splice_read = pcc_file_splice_read,
+#endif
.fsync = ll_fsync,
.flush = ll_flush,
.flock = ll_file_noflock,
.fallocate = ll_fallocate,
};
-struct inode_operations ll_file_inode_operations = {
+const struct inode_operations ll_file_inode_operations = {
.setattr = ll_setattr,
.getattr = ll_getattr,
.permission = ll_inode_permission,
#endif
};
+const struct file_operations *ll_select_file_operations(struct ll_sb_info *sbi)
+{
+ const struct file_operations *fops = &ll_file_operations_noflock;
+
+ if (test_bit(LL_SBI_FLOCK, sbi->ll_flags))
+ fops = &ll_file_operations_flock;
+ else if (test_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags))
+ fops = &ll_file_operations;
+
+ return fops;
+}
+
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
{
struct ll_inode_info *lli = ll_i2info(inode);
out:
cl_env_put(env, &refcheck);
- RETURN(rc);
+ RETURN(rc < 0 ? rc : 0);
}
/* Fetch layout from MDT with getxattr request, if it's not ready yet */
lock = ldlm_handle2lock(lockh);
LASSERT(lock != NULL);
- LASSERT(ldlm_has_layout(lock));
+
+ if (!ldlm_has_layout(lock))
+ GOTO(out, rc = -EAGAIN);
LDLM_DEBUG(lock, "file "DFID"(%p) being reconfigured",
PFID(&lli->lli_fid), inode);
ENTRY;
*gen = ll_layout_version_get(lli);
- if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != CL_LAYOUT_GEN_NONE)
+ if (!test_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags) ||
+ *gen != CL_LAYOUT_GEN_NONE)
RETURN(0);
/* sanity checks */