#include <linux/file.h>
#include <linux/sched.h>
#include <linux/user_namespace.h>
-#ifdef HAVE_UIDGID_HEADER
-# include <linux/uidgid.h>
-#endif
+#include <linux/uidgid.h>
+#include <linux/falloc.h>
#include <uapi/linux/lustre/lustre_ioctl.h>
+#include <uapi/linux/llcrypt.h>
#include <lustre_swab.h>
#include "cl_object.h"
__u16 sp_mirror_id;
};
+struct pcc_param {
+ __u64 pa_data_version;
+ __u32 pa_archive_id;
+ __u32 pa_layout_gen;
+};
+
static int
ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
return NULL;
fd->fd_write_failed = false;
+ pcc_file_init(&fd->fd_pcc_file);
return fd;
}
op_data->op_xvalid |= OP_XVALID_CTIME_SET;
op_data->op_attr_blocks = inode->i_blocks;
op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
- if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT))
+ if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
op_data->op_open_handle = och->och_open_handle;
if (och->och_flags & FMODE_WRITE &&
- ll_file_test_and_clear_flag(ll_i2info(inode), LLIF_DATA_MODIFIED))
+ test_and_clear_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags))
/* For HSM: if inode data has been modified, pack it so that
* MDT can set data dirty flag in the archive. */
op_data->op_bias |= MDS_DATA_MODIFIED;
op_data->op_attr_blocks += ((struct inode *)data)->i_blocks;
op_data->op_attr.ia_valid |= ATTR_SIZE;
op_data->op_xvalid |= OP_XVALID_BLOCKS;
+ /* fallthrough */
case MDS_CLOSE_LAYOUT_SPLIT:
case MDS_CLOSE_LAYOUT_SWAP: {
struct split_param *sp = data;
break;
}
+ case MDS_PCC_ATTACH: {
+ struct pcc_param *param = data;
+
+ LASSERT(data != NULL);
+ op_data->op_bias |= MDS_HSM_RELEASE | MDS_PCC_ATTACH;
+ op_data->op_archive_id = param->pa_archive_id;
+ op_data->op_data_version = param->pa_data_version;
+ op_data->op_lease_handle = och->och_lease_handle;
+ break;
+ }
+
case MDS_HSM_RELEASE:
LASSERT(data != NULL);
op_data->op_bias |= MDS_HSM_RELEASE;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
rc = -EBUSY;
+
+ if (bias & MDS_PCC_ATTACH) {
+ struct pcc_param *param = data;
+
+ param->pa_layout_gen = body->mbo_layout_gen;
+ }
}
ll_finish_md_op_data(op_data);
.l_inodebits = { MDS_INODELOCK_OPEN },
};
__u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_inode_info *lli = ll_i2info(inode);
struct lustre_handle lockh;
enum ldlm_mode lockmode;
}
mutex_unlock(&lli->lli_och_mutex);
- if (!md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
+ /* LU-4398: do not cache write open lock if the file has exec bit */
+ if ((lockmode == LCK_CW && inode->i_mode & S_IXUGO) ||
+ !md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
LDLM_IBITS, &policy, lockmode, &lockh))
rc = ll_md_real_close(inode, fd->fd_omode);
out:
- LUSTRE_FPRIVATE(file) = NULL;
+ file->private_data = NULL;
ll_file_data_put(fd);
RETURN(rc);
*/
int ll_file_release(struct inode *inode, struct file *file)
{
- struct ll_file_data *fd;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
- ENTRY;
+ struct ll_file_data *fd;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ ktime_t kstart = ktime_get();
+ int rc;
+
+ ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(ll_inode2fid(inode)), inode);
- if (inode->i_sb->s_root != file_dentry(file))
- ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
- fd = LUSTRE_FPRIVATE(file);
- LASSERT(fd != NULL);
+ fd = file->private_data;
+ LASSERT(fd != NULL);
/* The last ref on @file, maybe not the the owner pid of statahead,
* because parent and child process can share the same file handle. */
if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd)
ll_deauthorize_statahead(inode, fd);
- if (inode->i_sb->s_root == file_dentry(file)) {
- LUSTRE_FPRIVATE(file) = NULL;
+ if (is_root_inode(inode)) {
+ file->private_data = NULL;
ll_file_data_put(fd);
- RETURN(0);
+ GOTO(out, rc = 0);
}
+ pcc_file_release(inode, file);
+
if (!S_ISDIR(inode->i_mode)) {
if (lli->lli_clob != NULL)
lov_read_and_clear_async_rc(lli->lli_clob);
if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
libcfs_debug_dumplog();
+out:
+ if (!rc && !is_root_inode(inode))
+ ll_stats_ops_tally(sbi, LPROC_LL_RELEASE,
+ ktime_us_delta(ktime_get(), kstart));
RETURN(rc);
}
{
struct niobuf_local *lnb = data;
void *kaddr;
+ int rc = 0;
- kaddr = ll_kmap_atomic(page, KM_USER0);
+ struct inode *inode = page2inode(page);
+
+ kaddr = kmap_atomic(page);
memcpy(kaddr, lnb->lnb_data, lnb->lnb_len);
if (lnb->lnb_len < PAGE_SIZE)
memset(kaddr + lnb->lnb_len, 0,
PAGE_SIZE - lnb->lnb_len);
flush_dcache_page(page);
SetPageUptodate(page);
- ll_kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
+
+ if (inode && IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
+ if (!llcrypt_has_encryption_key(inode))
+ CDEBUG(D_SEC, "no enc key for "DFID"\n",
+ PFID(ll_inode2fid(inode)));
+ else {
+ unsigned int offs = 0;
+
+ while (offs < PAGE_SIZE) {
+ /* decrypt only if page is not empty */
+ if (memcmp(page_address(page) + offs,
+ page_address(ZERO_PAGE(0)),
+ LUSTRE_ENCRYPTION_UNIT_SIZE) == 0)
+ break;
+
+ rc = llcrypt_decrypt_pagecache_blocks(page,
+ LUSTRE_ENCRYPTION_UNIT_SIZE,
+ offs);
+ if (rc)
+ break;
+
+ offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
+ }
+ }
+ }
unlock_page(page);
- return 0;
+ return rc;
}
-void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req,
- struct lookup_intent *it)
+void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req)
{
+ struct lu_env *env;
+ struct cl_io *io;
struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
struct address_space *mapping = inode->i_mapping;
struct page *vmpage;
struct niobuf_remote *rnb;
+ struct mdt_body *body;
char *data;
unsigned long index, start;
struct niobuf_local lnb;
+ __u16 refcheck;
+ int rc;
ENTRY;
if (obj == NULL)
RETURN_EXIT;
- if (!req_capsule_has_field(&req->rq_pill, &RMF_NIOBUF_INLINE,
- RCL_SERVER))
+ if (!req_capsule_field_present(&req->rq_pill, &RMF_NIOBUF_INLINE,
+ RCL_SERVER))
RETURN_EXIT;
rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
* client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is
* smaller then offset may be not aligned and that data is just ignored.
*/
- if (rnb->rnb_offset % PAGE_SIZE)
+ if (rnb->rnb_offset & ~PAGE_MASK)
RETURN_EXIT;
- /* Server returns whole file or just file tail if it fills in
- * reply buffer, in both cases total size should be inode size.
+ /* Server returns whole file or just file tail if it fills in reply
+ * buffer, in both cases total size should be equal to the file size.
*/
- if (rnb->rnb_offset + rnb->rnb_len < i_size_read(inode)) {
- CERROR("%s: server returns off/len %llu/%u < i_size %llu\n",
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size &&
+ !(inode && IS_ENCRYPTED(inode))) {
+ CERROR("%s: server returns off/len %llu/%u but size %llu\n",
ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset,
- rnb->rnb_len, i_size_read(inode));
+ rnb->rnb_len, body->mbo_dom_size);
RETURN_EXIT;
}
- CDEBUG(D_INFO, "Get data along with open at %llu len %i, i_size %llu\n",
- rnb->rnb_offset, rnb->rnb_len, i_size_read(inode));
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN_EXIT;
+ io = vvp_env_thread_io(env);
+ io->ci_obj = obj;
+ io->ci_ignore_layout = 1;
+ rc = cl_io_init(env, io, CIT_MISC, obj);
+ if (rc)
+ GOTO(out_io, rc);
+
+ CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n",
+ rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size);
data = (char *)rnb + sizeof(*rnb);
lnb.lnb_file_offset = rnb->rnb_offset;
- start = lnb.lnb_file_offset / PAGE_SIZE;
+ start = lnb.lnb_file_offset >> PAGE_SHIFT;
index = 0;
- LASSERT(lnb.lnb_file_offset % PAGE_SIZE == 0);
+ LASSERT((lnb.lnb_file_offset & ~PAGE_MASK) == 0);
lnb.lnb_page_offset = 0;
do {
+ struct cl_page *page;
+
lnb.lnb_data = data + (index << PAGE_SHIFT);
lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
if (lnb.lnb_len > PAGE_SIZE)
PTR_ERR(vmpage));
break;
}
+ lock_page(vmpage);
+ if (vmpage->mapping == NULL) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ /* page was truncated */
+ break;
+ }
+ /* attach VM page to CL page cache */
+ page = cl_page_find(env, obj, vmpage->index, vmpage,
+ CPT_CACHEABLE);
+ if (IS_ERR(page)) {
+ ClearPageUptodate(vmpage);
+ unlock_page(vmpage);
+ put_page(vmpage);
+ break;
+ }
+ cl_page_export(env, page, 1);
+ cl_page_put(env, page);
+ unlock_page(vmpage);
put_page(vmpage);
index++;
} while (rnb->rnb_len > (index << PAGE_SHIFT));
+
+out_io:
+ cl_io_fini(env, io);
+ cl_env_put(env, &refcheck);
+
EXIT;
}
rc = ll_prep_inode(&de->d_inode, req, NULL, itp);
if (!rc && itp->it_lock_mode) {
- struct lustre_handle handle = {.cookie = itp->it_lock_handle};
- struct ldlm_lock *lock;
- bool has_dom_bit = false;
+ __u64 bits = 0;
/* If we got a lock back and it has a LOOKUP bit set,
* make sure the dentry is marked as valid so we can find it.
* We don't need to care about actual hashing since other bits
* of kernel will deal with that later.
*/
- lock = ldlm_handle2lock(&handle);
- if (lock) {
- has_dom_bit = ldlm_has_dom(lock);
- if (lock->l_policy_data.l_inodebits.bits &
- MDS_INODELOCK_LOOKUP)
- d_lustre_revalidate(de);
-
- LDLM_LOCK_PUT(lock);
- }
- ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, NULL);
- if (has_dom_bit)
- ll_dom_finish_open(de->d_inode, req, itp);
+ ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, &bits);
+ if (bits & MDS_INODELOCK_LOOKUP)
+ d_lustre_revalidate(de);
+ /* if DoM bit returned along with LAYOUT bit then there
+ * can be read-on-open data returned.
+ */
+ if (bits & MDS_INODELOCK_DOM && bits & MDS_INODELOCK_LAYOUT)
+ ll_dom_finish_open(de->d_inode, req);
}
out:
struct inode *inode = file_inode(file);
ENTRY;
- LASSERT(!LUSTRE_FPRIVATE(file));
+ LASSERT(!file->private_data);
LASSERT(fd != NULL);
RETURN(rc);
}
- LUSTRE_FPRIVATE(file) = fd;
+ file->private_data = fd;
ll_readahead_init(inode, &fd->fd_ras);
fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
+ /* turn off the kernel's read-ahead */
+ file->f_ra.ra_pages = 0;
/* ll_cl_context initialize */
rwlock_init(&fd->fd_lock);
struct obd_client_handle **och_p = NULL;
__u64 *och_usecount = NULL;
struct ll_file_data *fd;
+ ktime_t kstart = ktime_get();
int rc = 0;
ENTRY;
it = file->private_data; /* XXX: compat macro */
file->private_data = NULL; /* prevent ll_local_open assertion */
+ if (S_ISREG(inode->i_mode)) {
+ rc = llcrypt_file_open(inode, file);
+ if (rc)
+ GOTO(out_nofiledata, rc);
+ }
+
fd = ll_file_data_get();
if (fd == NULL)
GOTO(out_nofiledata, rc = -ENOMEM);
if (S_ISDIR(inode->i_mode))
ll_authorize_statahead(inode, fd);
- if (inode->i_sb->s_root == file_dentry(file)) {
- LUSTRE_FPRIVATE(file) = fd;
- RETURN(0);
- }
+ if (is_root_inode(inode)) {
+ file->private_data = fd;
+ RETURN(0);
+ }
if (!it || !it->it_disposition) {
- /* Convert f_flags into access mode. We cannot use file->f_mode,
- * because everything but O_ACCMODE mask was stripped from
- * there */
- if ((oit.it_flags + 1) & O_ACCMODE)
- oit.it_flags++;
- if (file->f_flags & O_TRUNC)
- oit.it_flags |= FMODE_WRITE;
+ /* Convert f_flags into access mode. We cannot use file->f_mode,
+ * because everything but O_ACCMODE mask was stripped from
+ * there */
+ if ((oit.it_flags + 1) & O_ACCMODE)
+ oit.it_flags++;
+ if (file->f_flags & O_TRUNC)
+ oit.it_flags |= FMODE_WRITE;
/* kernel only call f_op->open in dentry_open. filp_open calls
* dentry_open after call to open_namei that checks permissions.
if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
- /* We do not want O_EXCL here, presumably we opened the file
- * already? XXX - NFS implications? */
- oit.it_flags &= ~O_EXCL;
+ /* We do not want O_EXCL here, presumably we opened the file
+ * already? XXX - NFS implications? */
+ oit.it_flags &= ~O_EXCL;
- /* bug20584, if "it_flags" contains O_CREAT, the file will be
- * created if necessary, then "IT_CREAT" should be set to keep
- * consistent with it */
- if (oit.it_flags & O_CREAT)
- oit.it_op |= IT_CREAT;
+ /* bug20584, if "it_flags" contains O_CREAT, the file will be
+ * created if necessary, then "IT_CREAT" should be set to keep
+ * consistent with it */
+ if (oit.it_flags & O_CREAT)
+ oit.it_op |= IT_CREAT;
- it = &oit;
- }
+ it = &oit;
+ }
restart:
- /* Let's see if we have file open on MDS already. */
- if (it->it_flags & FMODE_WRITE) {
- och_p = &lli->lli_mds_write_och;
- och_usecount = &lli->lli_open_fd_write_count;
- } else if (it->it_flags & FMODE_EXEC) {
- och_p = &lli->lli_mds_exec_och;
- och_usecount = &lli->lli_open_fd_exec_count;
- } else {
- och_p = &lli->lli_mds_read_och;
- och_usecount = &lli->lli_open_fd_read_count;
- }
+ /* Let's see if we have file open on MDS already. */
+ if (it->it_flags & FMODE_WRITE) {
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else if (it->it_flags & FMODE_EXEC) {
+ och_p = &lli->lli_mds_exec_och;
+ och_usecount = &lli->lli_open_fd_exec_count;
+ } else {
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
mutex_lock(&lli->lli_och_mutex);
- if (*och_p) { /* Open handle is present */
- if (it_disposition(it, DISP_OPEN_OPEN)) {
- /* Well, there's extra open request that we do not need,
- let's close it somehow. This will decref request. */
- rc = it_open_error(DISP_OPEN_OPEN, it);
- if (rc) {
+ if (*och_p) { /* Open handle is present */
+ if (it_disposition(it, DISP_OPEN_OPEN)) {
+ /* Well, there's extra open request that we do not need,
+ * let's close it somehow. This will decref request. */
+ rc = it_open_error(DISP_OPEN_OPEN, it);
+ if (rc) {
mutex_unlock(&lli->lli_och_mutex);
- GOTO(out_openerr, rc);
- }
+ GOTO(out_openerr, rc);
+ }
ll_release_openhandle(file_dentry(file), it);
- }
- (*och_usecount)++;
+ }
+ (*och_usecount)++;
- rc = ll_local_open(file, it, fd, NULL);
- if (rc) {
- (*och_usecount)--;
+ rc = ll_local_open(file, it, fd, NULL);
+ if (rc) {
+ (*och_usecount)--;
mutex_unlock(&lli->lli_och_mutex);
- GOTO(out_openerr, rc);
- }
- } else {
- LASSERT(*och_usecount == 0);
+ GOTO(out_openerr, rc);
+ }
+ } else {
+ LASSERT(*och_usecount == 0);
if (!it->it_disposition) {
- struct ll_dentry_data *ldd = ll_d2d(file->f_path.dentry);
- /* We cannot just request lock handle now, new ELC code
- means that one of other OPEN locks for this file
- could be cancelled, and since blocking ast handler
- would attempt to grab och_mutex as well, that would
- result in a deadlock */
+ struct dentry *dentry = file_dentry(file);
+ struct ll_dentry_data *ldd;
+
+ /* We cannot just request lock handle now, new ELC code
+ * means that one of other OPEN locks for this file
+ * could be cancelled, and since blocking ast handler
+ * would attempt to grab och_mutex as well, that would
+ * result in a deadlock
+ */
mutex_unlock(&lli->lli_och_mutex);
/*
* Normally called under two situations:
* lookup path only, since ll_iget_for_nfs always calls
* ll_d_init().
*/
+ ldd = ll_d2d(dentry);
if (ldd && ldd->lld_nfs_dentry) {
ldd->lld_nfs_dentry = 0;
- it->it_flags |= MDS_OPEN_LOCK;
+ if (!filename_is_volatile(dentry->d_name.name,
+ dentry->d_name.len,
+ NULL))
+ it->it_flags |= MDS_OPEN_LOCK;
}
- /*
+ /*
* Always specify MDS_OPEN_BY_FID because we don't want
* to get file with different fid.
*/
it->it_flags |= MDS_OPEN_BY_FID;
- rc = ll_intent_file_open(file_dentry(file), NULL, 0,
- it);
- if (rc)
- GOTO(out_openerr, rc);
+ rc = ll_intent_file_open(dentry, NULL, 0, it);
+ if (rc)
+ GOTO(out_openerr, rc);
- goto restart;
- }
- OBD_ALLOC(*och_p, sizeof (struct obd_client_handle));
- if (!*och_p)
- GOTO(out_och_free, rc = -ENOMEM);
+ goto restart;
+ }
+ OBD_ALLOC(*och_p, sizeof(struct obd_client_handle));
+ if (!*och_p)
+ GOTO(out_och_free, rc = -ENOMEM);
- (*och_usecount)++;
+ (*och_usecount)++;
- /* md_intent_lock() didn't get a request ref if there was an
- * open error, so don't do cleanup on the request here
- * (bug 3430) */
- /* XXX (green): Should not we bail out on any error here, not
- * just open error? */
+ /* md_intent_lock() didn't get a request ref if there was an
+ * open error, so don't do cleanup on the request here
+ * (bug 3430) */
+ /* XXX (green): Should not we bail out on any error here, not
+ * just open error? */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc != 0)
GOTO(out_och_free, rc);
if (rc)
GOTO(out_och_free, rc);
}
+
+ rc = pcc_file_open(inode, file);
+ if (rc)
+ GOTO(out_och_free, rc);
+
mutex_unlock(&lli->lli_och_mutex);
- fd = NULL;
- /* Must do this outside lli_och_mutex lock to prevent deadlock where
- different kind of OPEN lock for this same inode gets cancelled
- by ldlm_cancel_lru */
- if (!S_ISREG(inode->i_mode))
- GOTO(out_och_free, rc);
+ fd = NULL;
+ /* Must do this outside lli_och_mutex lock to prevent deadlock where
+ different kind of OPEN lock for this same inode gets cancelled
+ by ldlm_cancel_lru */
+ if (!S_ISREG(inode->i_mode))
+ GOTO(out_och_free, rc);
cl_lov_delay_create_clear(&file->f_flags);
GOTO(out_och_free, rc);
out_och_free:
- if (rc) {
- if (och_p && *och_p) {
- OBD_FREE(*och_p, sizeof (struct obd_client_handle));
- *och_p = NULL; /* OBD_FREE writes some magic there */
- (*och_usecount)--;
- }
+ if (rc) {
+ if (och_p && *och_p) {
+ OBD_FREE(*och_p, sizeof(struct obd_client_handle));
+ *och_p = NULL; /* OBD_FREE writes some magic there */
+ (*och_usecount)--;
+ }
mutex_unlock(&lli->lli_och_mutex);
out_openerr:
if (lli->lli_opendir_key == fd)
ll_deauthorize_statahead(inode, fd);
+
if (fd != NULL)
ll_file_data_put(fd);
- } else {
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
- }
+ } else {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN,
+ ktime_us_delta(ktime_get(), kstart));
+ }
out_nofiledata:
if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
}
- return rc;
+ return rc;
}
static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
struct lustre_handle *old_open_handle)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct obd_client_handle **och_p;
__u64 *och_usecount;
int rc = 0;
static int ll_lease_och_release(struct inode *inode, struct file *file)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct obd_client_handle **och_p;
struct obd_client_handle *old_och = NULL;
__u64 *och_usecount;
GOTO(out_release_it, rc);
LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF));
- ll_och_fill(sbi->ll_md_exp, &it, och);
+ rc = ll_och_fill(sbi->ll_md_exp, &it, och);
+ if (rc)
+ GOTO(out_release_it, rc);
if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */
GOTO(out_close, rc = -EOPNOTSUPP);
/* already get lease, handle lease lock */
ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- if (it.it_lock_mode == 0 ||
- it.it_lock_bits != MDS_INODELOCK_OPEN) {
+ if (!it.it_lock_mode ||
+ !(it.it_lock_bits & MDS_INODELOCK_OPEN)) {
/* open lock must return for lease */
CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
PFID(ll_inode2fid(inode)), it.it_lock_mode,
* POSIX. Solving this problem needs to send an RPC to MDT for each
* read, this will hurt performance.
*/
- if (inode->i_atime.tv_sec < lli->lli_atime ||
- lli->lli_update_atime) {
+ if (test_and_clear_bit(LLIF_UPDATE_ATIME, &lli->lli_flags) ||
+ inode->i_atime.tv_sec < lli->lli_atime)
inode->i_atime.tv_sec = lli->lli_atime;
- lli->lli_update_atime = 0;
- }
+
inode->i_mtime.tv_sec = lli->lli_mtime;
inode->i_ctime.tv_sec = lli->lli_ctime;
*/
void ll_io_set_mirror(struct cl_io *io, const struct file *file)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
/* clear layout version for generic(non-resync) I/O in case it carries
* stale layout version due to I/O restart */
if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
- if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
return false;
}
-static void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot)
+void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot,
+ struct vvp_io_args *args)
{
struct inode *inode = file_inode(file);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
io->ci_lock_no_expand = fd->ll_lock_no_expand;
io->u.ci_wr.wr_sync = !!(file->f_flags & O_SYNC ||
file->f_flags & O_DIRECT ||
IS_SYNC(inode));
+#ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
+ io->u.ci_wr.wr_sync |= !!(args &&
+ (args->u.normal.via_iocb->ki_flags &
+ IOCB_DSYNC));
+#endif
}
+
io->ci_obj = ll_i2info(inode)->lli_clob;
io->ci_lockreq = CILR_MAYBE;
if (ll_file_nolock(file)) {
io->ci_lockreq = CILR_MANDATORY;
}
io->ci_noatime = file_is_noatime(file);
+ io->ci_async_readahead = false;
/* FLR: only use non-delay I/O for read as there is only one
* avaliable mirror for write. */
struct file *file, enum cl_io_type iot,
loff_t *ppos, size_t count)
{
- struct vvp_io *vio = vvp_env_io(env);
- struct inode *inode = file_inode(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct range_lock range;
- struct cl_io *io;
- ssize_t result = 0;
- int rc = 0;
- unsigned retried = 0;
- bool restarted = false;
+ struct vvp_io *vio = vvp_env_io(env);
+ struct inode *inode = file_inode(file);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_file_data *fd = file->private_data;
+ struct range_lock range;
+ struct cl_io *io;
+ ssize_t result = 0;
+ int rc = 0;
+ unsigned int retried = 0, dio_lock = 0;
+ bool is_aio = false;
+ struct cl_dio_aio *ci_aio = NULL;
+ size_t per_bytes;
+ bool partial_io = false;
+ size_t max_io_pages, max_cached_pages;
ENTRY;
file_dentry(file)->d_name.name,
iot == CIT_READ ? "read" : "write", *ppos, count);
+ max_io_pages = PTLRPC_MAX_BRW_PAGES * OBD_MAX_RIF_DEFAULT;
+ max_cached_pages = sbi->ll_cache->ccc_lru_max;
+ if (max_io_pages > (max_cached_pages >> 2))
+ max_io_pages = max_cached_pages >> 2;
+
+ io = vvp_env_thread_io(env);
+ if (file->f_flags & O_DIRECT) {
+ if (!is_sync_kiocb(args->u.normal.via_iocb))
+ is_aio = true;
+ ci_aio = cl_aio_alloc(args->u.normal.via_iocb);
+ if (!ci_aio)
+ GOTO(out, rc = -ENOMEM);
+ }
+
restart:
+ /**
+ * IO block size need be aware of cached page limit, otherwise
+ * if we have small max_cached_mb but large block IO issued, io
+ * could not be finished and blocked whole client.
+ */
+ if (file->f_flags & O_DIRECT)
+ per_bytes = count;
+ else
+ per_bytes = min(max_io_pages << PAGE_SHIFT, count);
+ partial_io = per_bytes < count;
io = vvp_env_thread_io(env);
- ll_io_init(io, file, iot);
+ ll_io_init(io, file, iot, args);
+ io->ci_aio = ci_aio;
+ io->ci_dio_lock = dio_lock;
io->ci_ndelay_tried = retried;
- if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
+ if (cl_io_rw_init(env, io, iot, *ppos, per_bytes) == 0) {
bool range_locked = false;
if (file->f_flags & O_APPEND)
range_lock_init(&range, 0, LUSTRE_EOF);
else
- range_lock_init(&range, *ppos, *ppos + count - 1);
-
- vio->vui_fd = LUSTRE_FPRIVATE(file);
- vio->vui_io_subtype = args->via_io_subtype;
-
- switch (vio->vui_io_subtype) {
- case IO_NORMAL:
- vio->vui_iter = args->u.normal.via_iter;
- vio->vui_iocb = args->u.normal.via_iocb;
- /* Direct IO reads must also take range lock,
- * or multiple reads will try to work on the same pages
- * See LU-6227 for details. */
- if (((iot == CIT_WRITE) ||
- (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
- !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n",
- RL_PARA(&range));
- rc = range_lock(&lli->lli_write_tree, &range);
- if (rc < 0)
- GOTO(out, rc);
+ range_lock_init(&range, *ppos, *ppos + per_bytes - 1);
+
+ vio->vui_fd = file->private_data;
+ vio->vui_iter = args->u.normal.via_iter;
+ vio->vui_iocb = args->u.normal.via_iocb;
+ /* Direct IO reads must also take range lock,
+ * or multiple reads will try to work on the same pages
+ * See LU-6227 for details.
+ */
+ if (((iot == CIT_WRITE) ||
+ (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
+ !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n",
+ RL_PARA(&range));
+ rc = range_lock(&lli->lli_write_tree, &range);
+ if (rc < 0)
+ GOTO(out, rc);
- range_locked = true;
- }
- break;
- case IO_SPLICE:
- vio->u.splice.vui_pipe = args->u.splice.via_pipe;
- vio->u.splice.vui_flags = args->u.splice.via_flags;
- break;
- default:
- CERROR("unknown IO subtype %u\n", vio->vui_io_subtype);
- LBUG();
+ range_locked = true;
}
ll_cl_add(file, env, io, LCC_RW);
rc = io->ci_result;
}
+ /*
+ * In order to move forward AIO, ci_nob was increased,
+ * but that doesn't mean io have been finished, it just
+ * means io have been submited, we will always return
+ * EIOCBQUEUED to the caller, So we could only return
+ * number of bytes in non-AIO case.
+ */
if (io->ci_nob > 0) {
- result += io->ci_nob;
- count -= io->ci_nob;
- *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
+ if (!is_aio) {
+ result += io->ci_nob;
+ *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */
+ }
+ count -= io->ci_nob;
/* prepare IO restart */
- if (count > 0 && args->via_io_subtype == IO_NORMAL)
+ if (count > 0)
args->u.normal.via_iter = vio->vui_iter;
+
+ if (partial_io) {
+ /**
+ * Reexpand iov count because it was zero
+ * after IO finish.
+ */
+ iov_iter_reexpand(vio->vui_iter, count);
+ if (per_bytes == io->ci_nob)
+ io->ci_need_restart = 1;
+ }
}
out:
cl_io_fini(env, io);
file->f_path.dentry->d_name.name,
iot, rc, result, io->ci_need_restart);
- if ((rc == 0 || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
+ if ((rc == 0 || rc == -ENODATA || rc == -ENOLCK) &&
+ count > 0 && io->ci_need_restart) {
CDEBUG(D_VFSTRACE,
"%s: restart %s from %lld, count: %zu, ret: %zd, rc: %d\n",
file_dentry(file)->d_name.name,
*ppos, count, result, rc);
/* preserve the tried count for FLR */
retried = io->ci_ndelay_tried;
- restarted = true;
+ dio_lock = io->ci_dio_lock;
goto restart;
}
+ if (io->ci_aio) {
+ /*
+ * VFS will call aio_complete() if no -EIOCBQUEUED
+ * is returned for AIO, so we can not call aio_complete()
+ * in our end_io().
+ */
+ if (rc != -EIOCBQUEUED)
+ io->ci_aio->cda_no_aio_complete = 1;
+ /**
+ * Drop one extra reference so that end_io() could be
+ * called for this IO context, we could call it after
+ * we make sure all AIO requests have been proceed.
+ */
+ cl_sync_io_note(env, &io->ci_aio->cda_sync,
+ rc == -EIOCBQUEUED ? 0 : rc);
+ if (!is_aio) {
+ cl_aio_free(io->ci_aio);
+ io->ci_aio = NULL;
+ }
+ }
+
if (iot == CIT_READ) {
if (result > 0)
ll_stats_ops_tally(ll_i2sbi(inode),
if (result > 0) {
ll_heat_add(file_inode(iocb->ki_filp), CIT_READ, result);
ll_stats_ops_tally(ll_i2sbi(file_inode(iocb->ki_filp)),
- LPROC_LL_READ_BYTES, result);
+ LPROC_LL_READ_BYTES, result);
}
return result;
{
struct lu_env *env;
struct vvp_io_args *args;
+ struct file *file = iocb->ki_filp;
ssize_t result;
ssize_t rc2;
__u16 refcheck;
+ ktime_t kstart = ktime_get();
+ bool cached;
+
+ if (!iov_iter_count(to))
+ return 0;
- ll_ras_enter(iocb->ki_filp);
+ /**
+ * Currently when PCC read failed, we do not fall back to the
+ * normal read path, just return the error.
+ * The resaon is that: for RW-PCC, the file data may be modified
+ * in the PCC and inconsistent with the data on OSTs (or file
+ * data has been removed from the Lustre file system), at this
+ * time, fallback to the normal read path may read the wrong
+ * data.
+ * TODO: for RO-PCC (readonly PCC), fall back to normal read
+ * path: read data from data copy on OSTs.
+ */
+ result = pcc_file_read_iter(iocb, to, &cached);
+ if (cached)
+ GOTO(out, result);
+
+ ll_ras_enter(file, iocb->ki_pos, iov_iter_count(to));
result = ll_do_fast_read(iocb, to);
if (result < 0 || iov_iter_count(to) == 0)
if (IS_ERR(env))
return PTR_ERR(env);
- args = ll_env_args(env, IO_NORMAL);
+ args = ll_env_args(env);
args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
- rc2 = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
+ rc2 = ll_file_io_generic(env, args, file, CIT_READ,
&iocb->ki_pos, iov_iter_count(to));
if (rc2 > 0)
result += rc2;
cl_env_put(env, &refcheck);
out:
+ if (result > 0) {
+ ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
+ file->private_data, iocb->ki_pos, result,
+ READ);
+ ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_READ,
+ ktime_us_delta(ktime_get(), kstart));
+ }
+
return result;
}
ll_heat_add(inode, CIT_WRITE, result);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_WRITE_BYTES,
result);
- ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
+ set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags);
}
CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count);
struct vvp_io_args *args;
struct lu_env *env;
ssize_t rc_tiny = 0, rc_normal;
+ struct file *file = iocb->ki_filp;
__u16 refcheck;
+ bool cached;
+ ktime_t kstart = ktime_get();
+ int result;
ENTRY;
+ if (!iov_iter_count(from))
+ GOTO(out, rc_normal = 0);
+
+ /**
+ * When PCC write failed, we usually do not fall back to the normal
+ * write path, just return the error. But there is a special case when
+ * returned error code is -ENOSPC due to running out of space on PCC HSM
+ * bakcend. At this time, it will fall back to normal I/O path and
+ * retry the I/O. As the file is in HSM released state, it will restore
+ * the file data to OSTs first and redo the write again. And the
+ * restore process will revoke the layout lock and detach the file
+ * from PCC cache automatically.
+ */
+ result = pcc_file_write_iter(iocb, from, &cached);
+ if (cached && result != -ENOSPC && result != -EDQUOT)
+ GOTO(out, rc_normal = result);
+
/* NB: we can't do direct IO for tiny writes because they use the page
* cache, we can't do sync writes because tiny writes can't flush
* pages, and we can't do append writes because we can't guarantee the
* required DLM locks are held to protect file size.
*/
- if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(iocb->ki_filp))) &&
- !(iocb->ki_filp->f_flags & (O_DIRECT | O_SYNC | O_APPEND)))
+ if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(file))) &&
+ !(file->f_flags & (O_DIRECT | O_SYNC | O_APPEND)))
rc_tiny = ll_do_tiny_write(iocb, from);
/* In case of error, go on and try normal write - Only stop if tiny
if (IS_ERR(env))
return PTR_ERR(env);
- args = ll_env_args(env, IO_NORMAL);
+ args = ll_env_args(env);
args->u.normal.via_iter = from;
args->u.normal.via_iocb = iocb;
- rc_normal = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
- &iocb->ki_pos, iov_iter_count(from));
+ rc_normal = ll_file_io_generic(env, args, file, CIT_WRITE,
+ &iocb->ki_pos, iov_iter_count(from));
/* On success, combine bytes written. */
if (rc_tiny >= 0 && rc_normal > 0)
cl_env_put(env, &refcheck);
out:
+ if (rc_normal > 0) {
+ ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid,
+ file->private_data, iocb->ki_pos,
+ rc_normal, WRITE);
+ ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_WRITE,
+ ktime_us_delta(ktime_get(), kstart));
+ }
+
RETURN(rc_normal);
}
* XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
*/
static int ll_file_get_iov_count(const struct iovec *iov,
- unsigned long *nr_segs, size_t *count)
+ unsigned long *nr_segs, size_t *count,
+ int access_flags)
{
size_t cnt = 0;
unsigned long seg;
cnt += iv->iov_len;
if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
return -EINVAL;
- if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
+ if (access_ok(access_flags, iv->iov_base, iv->iov_len))
continue;
if (seg == 0)
return -EFAULT;
ssize_t result;
ENTRY;
- result = ll_file_get_iov_count(iov, &nr_segs, &iov_count);
+ result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_READ);
if (result)
RETURN(result);
+ if (!iov_count)
+ RETURN(0);
+
# ifdef HAVE_IOV_ITER_INIT_DIRECTION
iov_iter_init(&to, READ, iov, nr_segs, iov_count);
# else /* !HAVE_IOV_ITER_INIT_DIRECTION */
struct iovec iov = { .iov_base = buf, .iov_len = count };
struct kiocb kiocb;
ssize_t result;
+
ENTRY;
+ if (!count)
+ RETURN(0);
+
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = *ppos;
#ifdef HAVE_KIOCB_KI_LEFT
ssize_t result;
ENTRY;
- result = ll_file_get_iov_count(iov, &nr_segs, &iov_count);
+ result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_WRITE);
if (result)
RETURN(result);
+ if (!iov_count)
+ RETURN(0);
+
# ifdef HAVE_IOV_ITER_INIT_DIRECTION
iov_iter_init(&from, WRITE, iov, nr_segs, iov_count);
# else /* !HAVE_IOV_ITER_INIT_DIRECTION */
ENTRY;
+ if (!count)
+ RETURN(0);
+
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = *ppos;
#ifdef HAVE_KIOCB_KI_LEFT
}
#endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */
-/*
- * Send file content (through pagecache) somewhere with helper
- */
-static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t count,
- unsigned int flags)
-{
- struct lu_env *env;
- struct vvp_io_args *args;
- ssize_t result;
- __u16 refcheck;
- ENTRY;
-
- ll_ras_enter(in_file);
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN(PTR_ERR(env));
-
- args = ll_env_args(env, IO_SPLICE);
- args->u.splice.via_pipe = pipe;
- args->u.splice.via_flags = flags;
-
- result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
- cl_env_put(env, &refcheck);
- RETURN(result);
-}
-
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
__u64 flags, struct lov_user_md *lum, int lum_size)
{
int rc;
ENTRY;
+ if ((__swab32(lum->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) ==
+ le32_to_cpu(LOV_MAGIC_MAGIC)) {
+ /* this code will only exist for big-endian systems */
+ lustre_swab_lov_user_md(lum, 0);
+ }
+
ll_inode_size_lock(inode);
rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
if (rc < 0)
struct lov_mds_md **lmmp, int *lmm_size,
struct ptlrpc_request **request)
{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct mdt_body *body;
- struct lov_mds_md *lmm = NULL;
- struct ptlrpc_request *req = NULL;
- struct md_op_data *op_data;
- int rc, lmmsize;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct mdt_body *body;
+ struct lov_mds_md *lmm = NULL;
+ struct ptlrpc_request *req = NULL;
+ struct md_op_data *op_data;
+ int rc, lmmsize;
+
+ ENTRY;
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc)
RETURN(rc);
- op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
- strlen(filename), lmmsize,
- LUSTRE_OPC_ANY, NULL);
- if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
+ strlen(filename), lmmsize,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
- op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
- rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
- ll_finish_md_op_data(op_data);
- if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr_name failed "
- "on %s: rc %d\n", filename, rc);
- GOTO(out, rc);
- }
+ op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
+ rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
+ if (rc < 0) {
+ CDEBUG(D_INFO, "md_getattr_name failed "
+ "on %s: rc %d\n", filename, rc);
+ GOTO(out, rc);
+ }
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL); /* checked by mdc_getattr_name */
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body != NULL); /* checked by mdc_getattr_name */
lmmsize = body->mbo_eadatasize;
if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
- lmmsize == 0) {
- GOTO(out, rc = -ENODATA);
- }
+ lmmsize == 0)
+ GOTO(out, rc = -ENODATA);
- lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
- LASSERT(lmm != NULL);
+ lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
+ LASSERT(lmm != NULL);
if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) &&
lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) &&
lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_FOREIGN))
GOTO(out, rc = -EPROTO);
- /*
- * This is coming from the MDS, so is probably in
- * little endian. We convert it to host endian before
- * passing it to userspace.
- */
- if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC)) {
- int stripe_count;
+ /*
+ * This is coming from the MDS, so is probably in
+ * little endian. We convert it to host endian before
+ * passing it to userspace.
+ */
+ if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
+ int stripe_count = 0;
if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
if (le32_to_cpu(lmm->lmm_pattern) &
LOV_PATTERN_F_RELEASED)
stripe_count = 0;
- }
+ lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0);
- /* if function called for directory - we should
- * avoid swab not existent lsm objects */
- if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
- lustre_swab_lov_user_md_v1(
- (struct lov_user_md_v1 *)lmm);
- if (S_ISREG(body->mbo_mode))
+ /* if function called for directory - we should
+ * avoid swab not existent lsm objects
+ */
+ if (lmm->lmm_magic == LOV_MAGIC_V1 &&
+ S_ISREG(body->mbo_mode))
lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v1 *)lmm)->lmm_objects,
- stripe_count);
- } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
- lustre_swab_lov_user_md_v3(
- (struct lov_user_md_v3 *)lmm);
- if (S_ISREG(body->mbo_mode))
+ ((struct lov_user_md_v1 *)lmm)->lmm_objects,
+ stripe_count);
+ else if (lmm->lmm_magic == LOV_MAGIC_V3 &&
+ S_ISREG(body->mbo_mode))
lustre_swab_lov_user_md_objects(
- ((struct lov_user_md_v3 *)lmm)->lmm_objects,
- stripe_count);
- } else if (lmm->lmm_magic ==
- cpu_to_le32(LOV_MAGIC_COMP_V1)) {
+ ((struct lov_user_md_v3 *)lmm)->lmm_objects,
+ stripe_count);
+ } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
lustre_swab_lov_comp_md_v1(
- (struct lov_comp_md_v1 *)lmm);
- } else if (lmm->lmm_magic ==
- cpu_to_le32(LOV_MAGIC_FOREIGN)) {
- struct lov_foreign_md *lfm;
-
- lfm = (struct lov_foreign_md *)lmm;
- __swab32s(&lfm->lfm_magic);
- __swab32s(&lfm->lfm_length);
- __swab32s(&lfm->lfm_type);
- __swab32s(&lfm->lfm_flags);
+ (struct lov_comp_md_v1 *)lmm);
}
}
+ if (lmm->lmm_magic == LOV_MAGIC_COMP_V1) {
+ struct lov_comp_md_v1 *comp_v1 = NULL;
+ struct lov_comp_md_entry_v1 *ent;
+ struct lov_user_md_v1 *v1;
+ __u32 off;
+ int i = 0;
+
+ comp_v1 = (struct lov_comp_md_v1 *)lmm;
+ /* Dump the striping information */
+ for (; i < comp_v1->lcm_entry_count; i++) {
+ ent = &comp_v1->lcm_entries[i];
+ off = ent->lcme_offset;
+ v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
+ CDEBUG(D_INFO,
+ "comp[%d]: stripe_count=%u, stripe_size=%u\n",
+ i, v1->lmm_stripe_count, v1->lmm_stripe_size);
+ }
+
+ /**
+ * Return valid stripe_count and stripe_size instead of 0 for
+ * DoM files to avoid divide-by-zero for older userspace that
+ * calls this ioctl, e.g. lustre ADIO driver.
+ */
+ if (lmm->lmm_stripe_count == 0)
+ lmm->lmm_stripe_count = 1;
+ if (lmm->lmm_stripe_size == 0) {
+ /* Since the first component of the file data is placed
+ * on the MDT for faster access, the stripe_size of the
+ * second one is always that applications which are
+ * doing large IOs.
+ */
+ if (lmm->lmm_pattern == LOV_PATTERN_MDT)
+ i = comp_v1->lcm_entry_count > 1 ? 1 : 0;
+ else
+ i = comp_v1->lcm_entry_count > 1 ?
+ comp_v1->lcm_entry_count - 1 : 0;
+ ent = &comp_v1->lcm_entries[i];
+ off = ent->lcme_offset;
+ v1 = (struct lov_user_md_v1 *)((char *)lmm + off);
+ lmm->lmm_stripe_size = v1->lmm_stripe_size;
+ }
+ }
out:
*lmmp = lmm;
*lmm_size = lmmsize;
*request = req;
- return rc;
+ RETURN(rc);
}
static int ll_lov_setea(struct inode *inode, struct file *file,
int rc;
ENTRY;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN))
RETURN(-EPERM);
OBD_ALLOC_LARGE(lump, lum_size);
GOTO(out, rc);
rc = ll_file_getstripe(inode, arg, lum_size);
+ if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
+ ll_i2info(inode)->lli_clob) {
+ struct iattr attr = { 0 };
+
+ rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, &attr,
+ OP_XVALID_FLAGS, LUSTRE_ENCRYPT_FL);
+ }
}
cl_lov_delay_create_clear(&file->f_flags);
out:
- OBD_FREE(klum, lum_size);
+ OBD_FREE_LARGE(klum, lum_size);
RETURN(rc);
}
+
static int
ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_grouplock grouplock;
int rc;
ENTRY;
RETURN(-EINVAL);
}
- if (ll_file_nolock(file))
- RETURN(-EOPNOTSUPP);
+ if (ll_file_nolock(file))
+ RETURN(-EOPNOTSUPP);
+retry:
+ if (file->f_flags & O_NONBLOCK) {
+ if (!mutex_trylock(&lli->lli_group_mutex))
+ RETURN(-EAGAIN);
+ } else
+ mutex_lock(&lli->lli_group_mutex);
- spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
CWARN("group lock already existed with gid %lu\n",
fd->fd_grouplock.lg_gid);
- spin_unlock(&lli->lli_lock);
- RETURN(-EINVAL);
+ GOTO(out, rc = -EINVAL);
+ }
+ if (arg != lli->lli_group_gid && lli->lli_group_users != 0) {
+ if (file->f_flags & O_NONBLOCK)
+ GOTO(out, rc = -EAGAIN);
+ mutex_unlock(&lli->lli_group_mutex);
+ wait_var_event(&lli->lli_group_users, !lli->lli_group_users);
+ GOTO(retry, rc = 0);
}
LASSERT(fd->fd_grouplock.lg_lock == NULL);
- spin_unlock(&lli->lli_lock);
/**
* XXX: group lock needs to protect all OST objects while PFL
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ GOTO(out, rc = PTR_ERR(env));
rc = cl_object_layout_get(env, obj, &cl);
- if (!rc && cl.cl_is_composite)
+ if (rc >= 0 && cl.cl_is_composite)
rc = ll_layout_write_intent(inode, LAYOUT_INTENT_WRITE,
&ext);
cl_env_put(env, &refcheck);
- if (rc)
- RETURN(rc);
+ if (rc < 0)
+ GOTO(out, rc);
}
rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
arg, (file->f_flags & O_NONBLOCK), &grouplock);
- if (rc)
- RETURN(rc);
- spin_lock(&lli->lli_lock);
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- spin_unlock(&lli->lli_lock);
- CERROR("another thread just won the race\n");
- cl_put_grouplock(&grouplock);
- RETURN(-EINVAL);
- }
+ if (rc)
+ GOTO(out, rc);
fd->fd_flags |= LL_FILE_GROUP_LOCKED;
fd->fd_grouplock = grouplock;
- spin_unlock(&lli->lli_lock);
+ if (lli->lli_group_users == 0)
+ lli->lli_group_gid = grouplock.lg_gid;
+ lli->lli_group_users++;
CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
- RETURN(0);
+out:
+ mutex_unlock(&lli->lli_group_mutex);
+
+ RETURN(rc);
}
static int ll_put_grouplock(struct inode *inode, struct file *file,
unsigned long arg)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_grouplock grouplock;
+ int rc;
ENTRY;
- spin_lock(&lli->lli_lock);
+ mutex_lock(&lli->lli_group_mutex);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- spin_unlock(&lli->lli_lock);
- CWARN("no group lock held\n");
- RETURN(-EINVAL);
- }
+ CWARN("no group lock held\n");
+ GOTO(out, rc = -EINVAL);
+ }
LASSERT(fd->fd_grouplock.lg_lock != NULL);
if (fd->fd_grouplock.lg_gid != arg) {
CWARN("group lock %lu doesn't match current id %lu\n",
arg, fd->fd_grouplock.lg_gid);
- spin_unlock(&lli->lli_lock);
- RETURN(-EINVAL);
+ GOTO(out, rc = -EINVAL);
}
grouplock = fd->fd_grouplock;
memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
- spin_unlock(&lli->lli_lock);
cl_put_grouplock(&grouplock);
+
+ lli->lli_group_users--;
+ if (lli->lli_group_users == 0) {
+ lli->lli_group_gid = 0;
+ wake_up_var(&lli->lli_group_users);
+ }
CDEBUG(D_INFO, "group lock %lu released\n", arg);
- RETURN(0);
+ GOTO(out, rc = 0);
+out:
+ mutex_unlock(&lli->lli_group_mutex);
+
+ RETURN(rc);
}
/**
*/
int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
{
- struct inode *inode = dentry->d_inode;
- struct obd_client_handle *och;
- int rc;
- ENTRY;
+ struct inode *inode = dentry->d_inode;
+ struct obd_client_handle *och;
+ int rc;
+ ENTRY;
- LASSERT(inode);
+ LASSERT(inode);
- /* Root ? Do nothing. */
- if (dentry->d_inode->i_sb->s_root == dentry)
- RETURN(0);
+ /* Root ? Do nothing. */
+ if (is_root_inode(inode))
+ RETURN(0);
- /* No open handle to close? Move away */
- if (!it_disposition(it, DISP_OPEN_OPEN))
- RETURN(0);
+ /* No open handle to close? Move away */
+ if (!it_disposition(it, DISP_OPEN_OPEN))
+ RETURN(0);
- LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
+ LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
- OBD_ALLOC(och, sizeof(*och));
- if (!och)
- GOTO(out, rc = -ENOMEM);
+ OBD_ALLOC(och, sizeof(*och));
+ if (!och)
+ GOTO(out, rc = -ENOMEM);
- ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
+ rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
+ if (rc)
+ GOTO(out, rc);
rc = ll_close_inode_openhandle(inode, och, 0, NULL);
out:
ENTRY;
- if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
+ if (!capable(CAP_DAC_READ_SEARCH) &&
!(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
RETURN(-EPERM);
/* Non-root users are forbidden to set or clear flags which are
* NOT defined in HSM_USER_MASK. */
if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
- !cfs_capable(CFS_CAP_SYS_ADMIN))
+ !capable(CAP_SYS_ADMIN))
RETURN(-EPERM);
if (!exp_connect_archive_id_array(exp)) {
ENTRY;
- CDEBUG(D_VFSTRACE, "Lock request: file=%.*s, inode=%p, mode=%s "
- "start=%llu, end=%llu\n", dentry->d_name.len,
- dentry->d_name.name, dentry->d_inode,
+ CDEBUG(D_VFSTRACE,
+ "Lock request: file=%pd, inode=%p, mode=%s start=%llu, end=%llu\n",
+ dentry, dentry->d_inode,
user_lockname[ladvise->lla_lockahead_mode], (__u64) start,
(__u64) end);
if (advice > LU_LADVISE_MAX || advice == LU_LADVISE_INVALID) {
rc = -EINVAL;
- CDEBUG(D_VFSTRACE, "%s: advice with value '%d' not recognized,"
- "last supported advice is %s (value '%d'): rc = %d\n",
+ CDEBUG(D_VFSTRACE,
+ "%s: advice with value '%d' not recognized, last supported advice is %s (value '%d'): rc = %d\n",
sbi->ll_fsname, advice,
ladvise_names[LU_LADVISE_MAX-1], LU_LADVISE_MAX-1, rc);
GOTO(out, rc);
ladvise_names[advice], rc);
GOTO(out, rc);
}
+ /* fallthrough */
case LU_LADVISE_WILLREAD:
case LU_LADVISE_DONTNEED:
default:
static int ll_lock_noexpand(struct file *file, int flags)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
fd->ll_lock_no_expand = !(flags & LF_UNSET);
RETURN(-EFAULT);
fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags);
- if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT))
+ if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags))
fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT;
fsxattr.fsx_projid = ll_i2info(inode)->lli_projid;
if (copy_to_user((struct fsxattr __user *)arg,
if (ll_i2info(inode)->lli_projid != fa->fsx_projid)
return -EINVAL;
- if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT)) {
+ if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) {
if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
return -EINVAL;
} else {
int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd,
unsigned long arg)
{
-
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
- int rc = 0;
struct fsxattr fsxattr;
struct cl_object *obj;
- struct iattr *attr;
- int flags;
+ unsigned int inode_flags;
+ int rc = 0;
if (copy_from_user(&fsxattr,
(const struct fsxattr __user *)arg,
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags);
- op_data->op_attr_flags = ll_inode_to_ext_flags(flags);
+ inode_flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags);
+ op_data->op_attr_flags = ll_inode_to_ext_flags(inode_flags);
if (fsxattr.fsx_xflags & FS_XFLAG_PROJINHERIT)
op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL;
op_data->op_projid = fsxattr.fsx_projid;
op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS;
- rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL,
- 0, &req);
+ rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, &req);
ptlrpc_req_finished(req);
if (rc)
GOTO(out_fsxattr, rc);
ll_update_inode_flags(inode, op_data->op_attr_flags);
- obj = ll_i2info(inode)->lli_clob;
- if (obj == NULL)
+
+ /* Avoid OST RPC if this is only ioctl setting project inherit flag */
+ if (fsxattr.fsx_xflags == 0 ||
+ fsxattr.fsx_xflags == FS_XFLAG_PROJINHERIT)
GOTO(out_fsxattr, rc);
- OBD_ALLOC_PTR(attr);
- if (attr == NULL)
- GOTO(out_fsxattr, rc = -ENOMEM);
+ obj = ll_i2info(inode)->lli_clob;
+ if (obj) {
+ struct iattr attr = { 0 };
+
+ rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS,
+ fsxattr.fsx_xflags);
+ }
- rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS,
- fsxattr.fsx_xflags);
- OBD_FREE_PTR(attr);
out_fsxattr:
ll_finish_md_op_data(op_data);
RETURN(rc);
unsigned long arg)
{
struct inode *inode = file_inode(file);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_client_handle *och = NULL;
struct split_param sp;
- bool lease_broken;
+ struct pcc_param param;
+ bool lease_broken = false;
fmode_t fmode = 0;
enum mds_op_bias bias = 0;
struct file *layout_file = NULL;
void *data = NULL;
size_t data_size = 0;
- long rc;
+ bool attached = false;
+ long rc, rc2 = 0;
+
ENTRY;
mutex_lock(&lli->lli_och_mutex);
mutex_unlock(&lli->lli_och_mutex);
if (och == NULL)
- GOTO(out, rc = -ENOLCK);
+ RETURN(-ENOLCK);
fmode = och->och_flags;
switch (ioc->lil_flags) {
case LL_LEASE_RESYNC_DONE:
if (ioc->lil_count > IOC_IDS_MAX)
- GOTO(out, rc = -EINVAL);
+ GOTO(out_lease_close, rc = -EINVAL);
data_size = offsetof(typeof(*ioc), lil_ids[ioc->lil_count]);
OBD_ALLOC(data, data_size);
if (!data)
- GOTO(out, rc = -ENOMEM);
+ GOTO(out_lease_close, rc = -ENOMEM);
if (copy_from_user(data, (void __user *)arg, data_size))
- GOTO(out, rc = -EFAULT);
+ GOTO(out_lease_close, rc = -EFAULT);
bias = MDS_CLOSE_RESYNC_DONE;
break;
int fd;
if (ioc->lil_count != 1)
- GOTO(out, rc = -EINVAL);
+ GOTO(out_lease_close, rc = -EINVAL);
arg += sizeof(*ioc);
if (copy_from_user(&fd, (void __user *)arg, sizeof(__u32)))
- GOTO(out, rc = -EFAULT);
+ GOTO(out_lease_close, rc = -EFAULT);
layout_file = fget(fd);
if (!layout_file)
- GOTO(out, rc = -EBADF);
+ GOTO(out_lease_close, rc = -EBADF);
if ((file->f_flags & O_ACCMODE) == O_RDONLY ||
(layout_file->f_flags & O_ACCMODE) == O_RDONLY)
- GOTO(out, rc = -EPERM);
+ GOTO(out_lease_close, rc = -EPERM);
data = file_inode(layout_file);
bias = MDS_CLOSE_LAYOUT_MERGE;
int mirror_id;
if (ioc->lil_count != 2)
- GOTO(out, rc = -EINVAL);
+ GOTO(out_lease_close, rc = -EINVAL);
arg += sizeof(*ioc);
if (copy_from_user(&fdv, (void __user *)arg, sizeof(__u32)))
- GOTO(out, rc = -EFAULT);
+ GOTO(out_lease_close, rc = -EFAULT);
arg += sizeof(__u32);
if (copy_from_user(&mirror_id, (void __user *)arg,
sizeof(__u32)))
- GOTO(out, rc = -EFAULT);
+ GOTO(out_lease_close, rc = -EFAULT);
layout_file = fget(fdv);
if (!layout_file)
- GOTO(out, rc = -EBADF);
+ GOTO(out_lease_close, rc = -EBADF);
+ /* if layout_file == file, it means to destroy the mirror */
sp.sp_inode = file_inode(layout_file);
sp.sp_mirror_id = (__u16)mirror_id;
data = &sp;
bias = MDS_CLOSE_LAYOUT_SPLIT;
break;
}
+ case LL_LEASE_PCC_ATTACH:
+ if (ioc->lil_count != 1)
+ RETURN(-EINVAL);
+
+ arg += sizeof(*ioc);
+ if (copy_from_user(¶m.pa_archive_id, (void __user *)arg,
+ sizeof(__u32)))
+ GOTO(out_lease_close, rc2 = -EFAULT);
+
+ rc2 = pcc_readwrite_attach(file, inode, param.pa_archive_id);
+ if (rc2)
+ GOTO(out_lease_close, rc2);
+
+ attached = true;
+ /* Grab latest data version */
+ rc2 = ll_data_version(inode, ¶m.pa_data_version,
+ LL_DV_WR_FLUSH);
+ if (rc2)
+ GOTO(out_lease_close, rc2);
+
+ data = ¶m;
+ bias = MDS_PCC_ATTACH;
+ break;
default:
/* without close intent */
break;
}
+out_lease_close:
rc = ll_lease_close_intent(och, inode, &lease_broken, bias, data);
if (rc < 0)
GOTO(out, rc);
if (layout_file)
fput(layout_file);
break;
+ case LL_LEASE_PCC_ATTACH:
+ if (!rc)
+ rc = rc2;
+ rc = pcc_readwrite_attach_fini(file, inode,
+ param.pa_layout_gen,
+ lease_broken, rc,
+ attached);
+ break;
}
if (!rc)
{
struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
struct obd_client_handle *och = NULL;
__u64 open_flags = 0;
bool lease_broken;
ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(file);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
int flags, rc;
ENTRY;
RETURN(0);
}
+ case OBD_IOC_GETNAME_OLD:
+ /* fall through */
case OBD_IOC_GETDTNAME:
+ /* fall through */
case OBD_IOC_GETMDNAME:
RETURN(ll_get_obd_name(inode, cmd, arg));
case LL_IOC_HSM_STATE_GET: {
fd->fd_designated_mirror = (__u32)arg;
RETURN(0);
}
- case LL_IOC_FSGETXATTR:
+ case FS_IOC_FSGETXATTR:
RETURN(ll_ioctl_fsgetxattr(inode, cmd, arg));
- case LL_IOC_FSSETXATTR:
+ case FS_IOC_FSSETXATTR:
RETURN(ll_ioctl_fssetxattr(inode, cmd, arg));
case BLKSSZGET:
RETURN(put_user(PAGE_SIZE, (int __user *)arg));
rc = ll_heat_set(inode, flags);
RETURN(rc);
}
+ case LL_IOC_PCC_DETACH: {
+ struct lu_pcc_detach *detach;
+
+ OBD_ALLOC_PTR(detach);
+ if (detach == NULL)
+ RETURN(-ENOMEM);
+
+ if (copy_from_user(detach,
+ (const struct lu_pcc_detach __user *)arg,
+ sizeof(*detach)))
+ GOTO(out_detach_free, rc = -EFAULT);
+
+ if (!S_ISREG(inode->i_mode))
+ GOTO(out_detach_free, rc = -EINVAL);
+
+ if (!inode_owner_or_capable(inode))
+ GOTO(out_detach_free, rc = -EPERM);
+
+ rc = pcc_ioctl_detach(inode, detach->pccd_opt);
+out_detach_free:
+ OBD_FREE_PTR(detach);
+ RETURN(rc);
+ }
+ case LL_IOC_PCC_STATE: {
+ struct lu_pcc_state __user *ustate =
+ (struct lu_pcc_state __user *)arg;
+ struct lu_pcc_state *state;
+
+ OBD_ALLOC_PTR(state);
+ if (state == NULL)
+ RETURN(-ENOMEM);
+
+ if (copy_from_user(state, ustate, sizeof(*state)))
+ GOTO(out_state, rc = -EFAULT);
+
+ rc = pcc_ioctl_state(file, inode, state);
+ if (rc)
+ GOTO(out_state, rc);
+
+ if (copy_to_user(ustate, state, sizeof(*state)))
+ GOTO(out_state, rc = -EFAULT);
+
+out_state:
+ OBD_FREE_PTR(state);
+ RETURN(rc);
+ }
+#ifdef HAVE_LUSTRE_CRYPTO
+ case LL_IOC_SET_ENCRYPTION_POLICY:
+ if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
+ return -EOPNOTSUPP;
+ return llcrypt_ioctl_set_policy(file, (const void __user *)arg);
+ case LL_IOC_GET_ENCRYPTION_POLICY_EX:
+ if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
+ return -EOPNOTSUPP;
+ return llcrypt_ioctl_get_policy_ex(file, (void __user *)arg);
+ case LL_IOC_ADD_ENCRYPTION_KEY:
+ if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
+ return -EOPNOTSUPP;
+ return llcrypt_ioctl_add_key(file, (void __user *)arg);
+ case LL_IOC_REMOVE_ENCRYPTION_KEY:
+ if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
+ return -EOPNOTSUPP;
+ return llcrypt_ioctl_remove_key(file, (void __user *)arg);
+ case LL_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
+ return -EOPNOTSUPP;
+ return llcrypt_ioctl_remove_key_all_users(file,
+ (void __user *)arg);
+ case LL_IOC_GET_ENCRYPTION_KEY_STATUS:
+ if (!ll_sbi_has_encrypt(ll_i2sbi(inode)))
+ return -EOPNOTSUPP;
+ return llcrypt_ioctl_get_key_status(file, (void __user *)arg);
+#endif
+
+ case LL_IOC_UNLOCK_FOREIGN: {
+ struct dentry *dentry = file_dentry(file);
+
+ /* if not a foreign symlink do nothing */
+ if (ll_foreign_is_removable(dentry, true)) {
+ CDEBUG(D_INFO,
+ "prevent unlink of non-foreign file ("DFID")\n",
+ PFID(ll_inode2fid(inode)));
+ RETURN(-EOPNOTSUPP);
+ }
+ RETURN(0);
+ }
+
default:
RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
(void __user *)arg));
}
}
-#ifndef HAVE_FILE_LLSEEK_SIZE
-static inline loff_t
-llseek_execute(struct file *file, loff_t offset, loff_t maxsize)
+loff_t ll_lseek(struct file *file, loff_t offset, int whence)
{
- if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
- return -EINVAL;
- if (offset > maxsize)
- return -EINVAL;
+ struct inode *inode = file_inode(file);
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_lseek_io *lsio;
+ __u16 refcheck;
+ int rc;
+ loff_t retval;
- if (offset != file->f_pos) {
- file->f_pos = offset;
- file->f_version = 0;
- }
- return offset;
-}
+ ENTRY;
-static loff_t
-generic_file_llseek_size(struct file *file, loff_t offset, int origin,
- loff_t maxsize, loff_t eof)
-{
- struct inode *inode = file_inode(file);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- switch (origin) {
- case SEEK_END:
- offset += eof;
- break;
- case SEEK_CUR:
- /*
- * Here we special-case the lseek(fd, 0, SEEK_CUR)
- * position-querying operation. Avoid rewriting the "same"
- * f_pos value back to the file because a concurrent read(),
- * write() or lseek() might have altered it
- */
- if (offset == 0)
- return file->f_pos;
- /*
- * f_lock protects against read/modify/write race with other
- * SEEK_CURs. Note that parallel writes and reads behave
- * like SEEK_SET.
- */
- inode_lock(inode);
- offset = llseek_execute(file, file->f_pos + offset, maxsize);
- inode_unlock(inode);
- return offset;
- case SEEK_DATA:
- /*
- * In the generic case the entire file is data, so as long as
- * offset isn't at the end of the file then the offset is data.
- */
- if (offset >= eof)
- return -ENXIO;
- break;
- case SEEK_HOLE:
- /*
- * There is a virtual hole at the end of the file, so as long as
- * offset isn't i_size or larger, return i_size.
- */
- if (offset >= eof)
- return -ENXIO;
- offset = eof;
- break;
- }
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ ll_io_set_mirror(io, file);
+
+ lsio = &io->u.ci_lseek;
+ lsio->ls_start = offset;
+ lsio->ls_whence = whence;
+ lsio->ls_result = -ENXIO;
+
+ do {
+ rc = cl_io_init(env, io, CIT_LSEEK, io->ci_obj);
+ if (!rc) {
+ struct vvp_io *vio = vvp_env_io(env);
+
+ vio->vui_fd = file->private_data;
+ rc = cl_io_loop(env, io);
+ } else {
+ rc = io->ci_result;
+ }
+ retval = rc ? : lsio->ls_result;
+ cl_io_fini(env, io);
+ } while (unlikely(io->ci_need_restart));
+
+ cl_env_put(env, &refcheck);
- return llseek_execute(file, offset, maxsize);
+ RETURN(retval);
}
-#endif
static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file_inode(file);
- loff_t retval, eof = 0;
+ loff_t retval = offset, eof = 0;
+ ktime_t kstart = ktime_get();
ENTRY;
- retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
- (origin == SEEK_CUR) ? file->f_pos : 0);
+
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
PFID(ll_inode2fid(inode)), inode, retval, retval,
origin);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
- if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
+ if (origin == SEEK_END) {
retval = ll_glimpse_size(inode);
if (retval != 0)
RETURN(retval);
eof = i_size_read(inode);
}
- retval = ll_generic_file_llseek_size(file, offset, origin,
- ll_file_maxbytes(inode), eof);
+ if (origin == SEEK_HOLE || origin == SEEK_DATA) {
+ if (offset < 0)
+ return -ENXIO;
+
+ /* flush local cache first if any */
+ cl_sync_file_range(inode, offset, OBD_OBJECT_EOF,
+ CL_FSYNC_LOCAL, 0);
+
+ retval = ll_lseek(file, offset, origin);
+ if (retval < 0)
+ return retval;
+ retval = vfs_setpos(file, retval, ll_file_maxbytes(inode));
+ } else {
+ retval = generic_file_llseek_size(file, offset, origin,
+ ll_file_maxbytes(inode), eof);
+ }
+ if (retval >= 0)
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK,
+ ktime_us_delta(ktime_get(), kstart));
RETURN(retval);
}
{
struct inode *inode = file_inode(file);
struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
int rc, err;
LASSERT(!S_ISDIR(inode->i_mode));
* file_dentry() as is done otherwise.
*/
-#ifdef HAVE_FILE_FSYNC_4ARGS
int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file_dentry(file);
-#elif defined(HAVE_FILE_FSYNC_2ARGS)
-int ll_fsync(struct file *file, int datasync)
-{
- struct dentry *dentry = file_dentry(file);
- loff_t start = 0;
- loff_t end = LLONG_MAX;
-#else
-int ll_fsync(struct file *file, struct dentry *dentry, int datasync)
-{
- loff_t start = 0;
- loff_t end = LLONG_MAX;
-#endif
struct inode *inode = dentry->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
struct ptlrpc_request *req;
+ ktime_t kstart = ktime_get();
int rc, err;
+
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
- PFID(ll_inode2fid(inode)), inode);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
+ CDEBUG(D_VFSTRACE,
+ "VFS Op:inode="DFID"(%p), start %lld, end %lld, datasync %d\n",
+ PFID(ll_inode2fid(inode)), inode, start, end, datasync);
-#ifdef HAVE_FILE_FSYNC_4ARGS
- rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
- inode_lock(inode);
-#else
/* fsync's caller has already called _fdata{sync,write}, we want
* that IO to finish before calling the osc and mdc sync methods */
- rc = filemap_fdatawait(inode->i_mapping);
-#endif
+ rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ inode_lock(inode);
/* catch async errors that were recorded back when async writeback
* failed for pages in this mapping. */
ptlrpc_req_finished(req);
if (S_ISREG(inode->i_mode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_file_data *fd = file->private_data;
+ bool cached;
- err = cl_sync_file_range(inode, start, end, CL_FSYNC_ALL, 0);
+ /* Sync metadata on MDT first, and then sync the cached data
+ * on PCC.
+ */
+ err = pcc_fsync(file, start, end, datasync, &cached);
+ if (!cached)
+ err = cl_sync_file_range(inode, start, end,
+ CL_FSYNC_ALL, 0);
if (rc == 0 && err < 0)
rc = err;
if (rc < 0)
fd->fd_write_failed = false;
}
-#ifdef HAVE_FILE_FSYNC_4ARGS
inode_unlock(inode);
-#endif
+
+ if (!rc)
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC,
+ ktime_us_delta(ktime_get(), kstart));
RETURN(rc);
}
struct lustre_handle lockh = { 0 };
union ldlm_policy_data flock = { { 0 } };
int fl_type = file_lock->fl_type;
+ ktime_t kstart = ktime_get();
__u64 flags = 0;
int rc;
int rc2 = 0;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
PFID(ll_inode2fid(inode)), file_lock);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
-
- if (file_lock->fl_flags & FL_FLOCK) {
- LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
- /* flocks are whole-file locks */
- flock.l_flock.end = OFFSET_MAX;
- /* For flocks owner is determined by the local file desctiptor*/
- flock.l_flock.owner = (unsigned long)file_lock->fl_file;
- } else if (file_lock->fl_flags & FL_POSIX) {
- flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
- flock.l_flock.start = file_lock->fl_start;
- flock.l_flock.end = file_lock->fl_end;
- } else {
- RETURN(-EINVAL);
- }
- flock.l_flock.pid = file_lock->fl_pid;
+ if (file_lock->fl_flags & FL_FLOCK) {
+ LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
+ /* flocks are whole-file locks */
+ flock.l_flock.end = OFFSET_MAX;
+ /* For flocks owner is determined by the local file desctiptor*/
+ flock.l_flock.owner = (unsigned long)file_lock->fl_file;
+ } else if (file_lock->fl_flags & FL_POSIX) {
+ flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
+ flock.l_flock.start = file_lock->fl_start;
+ flock.l_flock.end = file_lock->fl_end;
+ } else {
+ RETURN(-EINVAL);
+ }
+ flock.l_flock.pid = file_lock->fl_pid;
+#if defined(HAVE_LM_COMPARE_OWNER) || defined(lm_compare_owner)
/* Somewhat ugly workaround for svc lockd.
* lockd installs custom fl_lmops->lm_compare_owner that checks
* for the fl_owner to be the same (which it always is on local node
* pointer space for current->files are not intersecting */
if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
+#endif
switch (fl_type) {
case F_RDLCK:
ll_finish_md_op_data(op_data);
- RETURN(rc);
+ if (!rc)
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK,
+ ktime_us_delta(ktime_get(), kstart));
+ RETURN(rc);
}
int ll_get_fid_by_name(struct inode *parent, const char *name,
* by checking the migrate FID against the FID of the
* filesystem root.
*/
- if (child_inode == parent->i_sb->s_root->d_inode)
+ if (is_root_inode(child_inode))
GOTO(out_iput, rc = -EINVAL);
+ if (IS_ENCRYPTED(child_inode)) {
+ rc = llcrypt_get_encryption_info(child_inode);
+ if (rc)
+ GOTO(out_iput, rc);
+ if (!llcrypt_has_encryption_key(child_inode)) {
+ CDEBUG(D_SEC, "no enc key for "DFID"\n",
+ PFID(ll_inode2fid(child_inode)));
+ GOTO(out_iput, rc = -ENOKEY);
+ }
+ }
+
op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
child_inode->i_mode, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
static int
ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
{
- ENTRY;
+ struct ll_file_data *fd = file->private_data;
+ ENTRY;
- RETURN(-ENOSYS);
+ /*
+ * In order to avoid flood of warning messages, only print one message
+ * for one file. And the entire message rate on the client is limited
+ * by CDEBUG_LIMIT too.
+ */
+ if (!(fd->fd_flags & LL_FILE_FLOCK_WARNING)) {
+ fd->fd_flags |= LL_FILE_FLOCK_WARNING;
+ CDEBUG_LIMIT(D_TTY | D_CONSOLE,
+ "flock disabled, mount with '-o [local]flock' to enable\r\n");
+ }
+ RETURN(-ENOSYS);
}
/**
ldlm_lockname[mode]);
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
- for (i = 0; i <= MDS_INODELOCK_MAXSHIFT && *bits != 0; i++) {
- policy.l_inodebits.bits = *bits & (1 << i);
+ for (i = 0; i < MDS_INODELOCK_NUMBITS && *bits != 0; i++) {
+ policy.l_inodebits.bits = *bits & BIT(i);
if (policy.l_inodebits.bits == 0)
continue;
- if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS,
- &policy, mode, &lockh)) {
- struct ldlm_lock *lock;
-
- lock = ldlm_handle2lock(&lockh);
- if (lock) {
- *bits &=
- ~(lock->l_policy_data.l_inodebits.bits);
- LDLM_LOCK_PUT(lock);
- } else {
- *bits &= ~policy.l_inodebits.bits;
- }
- }
- }
+ if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS,
+ &policy, mode, &lockh)) {
+ struct ldlm_lock *lock;
+
+ lock = ldlm_handle2lock(&lockh);
+ if (lock) {
+ *bits &=
+ ~(lock->l_policy_data.l_inodebits.bits);
+ LDLM_LOCK_PUT(lock);
+ } else {
+ *bits &= ~policy.l_inodebits.bits;
+ }
+ }
+ }
RETURN(*bits == 0);
}
static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op)
{
+ struct inode *parent;
struct inode *inode = dentry->d_inode;
struct obd_export *exp = ll_i2mdexp(inode);
struct lookup_intent oit = {
};
struct ptlrpc_request *req = NULL;
struct md_op_data *op_data;
+ const char *name = NULL;
+ size_t namelen = 0;
int rc = 0;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%s\n",
PFID(ll_inode2fid(inode)), inode, dentry->d_name.name);
- /* Call getattr by fid, so do not provide name at all. */
- op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
+ if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID) {
+ parent = dentry->d_parent->d_inode;
+ name = dentry->d_name.name;
+ namelen = dentry->d_name.len;
+ } else {
+ parent = inode;
+ }
+
+ op_data = ll_prep_md_op_data(NULL, parent, inode, name, namelen, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
+ /* Call getattr by fid */
+ if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID)
+ op_data->op_flags = MF_GETATTR_BY_FID;
rc = md_intent_lock(exp, op_data, &oit, &req, &ll_md_blocking_ast, 0);
ll_finish_md_op_data(op_data);
if (rc < 0) {
* do_lookup() -> ll_revalidate_it(). We cannot use d_drop
* here to preserve get_cwd functionality on 2.6.
* Bug 10503 */
- if (!dentry->d_inode->i_nlink) {
- ll_lock_dcache(inode);
- d_lustre_invalidate(dentry, 0);
- ll_unlock_dcache(inode);
- }
+ if (!dentry->d_inode->i_nlink)
+ d_lustre_invalidate(dentry);
ll_lookup_finish_locks(&oit, dentry);
out:
RETURN(0);
}
-static inline dev_t ll_compat_encode_dev(dev_t dev)
-{
- /* The compat_sys_*stat*() syscalls will fail unless the
- * device majors and minors are both less than 256. Note that
- * the value returned here will be passed through
- * old_encode_dev() in cp_compat_stat(). And so we are not
- * trying to return a valid compat (u16) device number, just
- * one that will pass the old_valid_dev() check. */
-
- return MKDEV(MAJOR(dev) & 0xff, MINOR(dev) & 0xff);
-}
-
-#ifdef HAVE_INODEOPS_ENHANCED_GETATTR
-int ll_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask,
+ unsigned int flags, bool foreign)
{
- struct dentry *de = path->dentry;
-#else
-int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
-{
-#endif
struct inode *inode = de->d_inode;
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
+ struct inode *dir = de->d_parent->d_inode;
+ bool need_glimpse = true;
+ ktime_t kstart = ktime_get();
int rc;
- ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1);
+ /* The OST object(s) determine the file size, blocks and mtime. */
+ if (!(request_mask & STATX_SIZE || request_mask & STATX_BLOCKS ||
+ request_mask & STATX_MTIME))
+ need_glimpse = false;
+
+ if (dentry_may_statahead(dir, de))
+ ll_start_statahead(dir, de, need_glimpse &&
+ !(flags & AT_STATX_DONT_SYNC));
+
+ if (flags & AT_STATX_DONT_SYNC)
+ GOTO(fill_attr, rc = 0);
rc = ll_inode_revalidate(de, IT_GETATTR);
if (rc < 0)
RETURN(rc);
- if (S_ISREG(inode->i_mode)) {
+ /* foreign file/dir are always of zero length, so don't
+ * need to validate size.
+ */
+ if (S_ISREG(inode->i_mode) && !foreign) {
+ bool cached;
+
+ if (!need_glimpse)
+ GOTO(fill_attr, rc);
+
+ rc = pcc_inode_getattr(inode, request_mask, flags, &cached);
+ if (cached && rc < 0)
+ RETURN(rc);
+
+ if (cached)
+ GOTO(fill_attr, rc);
+
+ /*
+ * If the returned attr is masked with OBD_MD_FLSIZE &
+ * OBD_MD_FLBLOCKS & OBD_MD_FLMTIME, it means that the file size
+ * or blocks obtained from MDT is strictly correct, and the file
+ * is usually not being modified by clients, and the [a|m|c]time
+ * got from MDT is also strictly correct.
+ * Under this circumstance, it does not need to send glimpse
+ * RPCs to OSTs for file attributes such as the size and blocks.
+ */
+ if (lli->lli_attr_valid & OBD_MD_FLSIZE &&
+ lli->lli_attr_valid & OBD_MD_FLBLOCKS &&
+ lli->lli_attr_valid & OBD_MD_FLMTIME) {
+ inode->i_mtime.tv_sec = lli->lli_mtime;
+ if (lli->lli_attr_valid & OBD_MD_FLATIME)
+ inode->i_atime.tv_sec = lli->lli_atime;
+ if (lli->lli_attr_valid & OBD_MD_FLCTIME)
+ inode->i_ctime.tv_sec = lli->lli_ctime;
+ GOTO(fill_attr, rc);
+ }
+
/* In case of restore, the MDT has the right size and has
* already send it back without granting the layout lock,
* inode is up-to-date so glimpse is useless.
* restore the MDT holds the layout lock so the glimpse will
* block up to the end of restore (getattr will block)
*/
- if (!ll_file_test_flag(lli, LLIF_FILE_RESTORING)) {
+ if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) {
rc = ll_glimpse_size(inode);
if (rc < 0)
RETURN(rc);
}
} else {
/* If object isn't regular a file then don't validate size. */
- if (ll_dir_striped(inode)) {
+ /* foreign dir is not striped dir */
+ if (ll_dir_striped(inode) && !foreign) {
rc = ll_merge_md_attr(inode);
if (rc < 0)
RETURN(rc);
}
- inode->i_atime.tv_sec = lli->lli_atime;
- inode->i_mtime.tv_sec = lli->lli_mtime;
- inode->i_ctime.tv_sec = lli->lli_ctime;
+ if (lli->lli_attr_valid & OBD_MD_FLATIME)
+ inode->i_atime.tv_sec = lli->lli_atime;
+ if (lli->lli_attr_valid & OBD_MD_FLMTIME)
+ inode->i_mtime.tv_sec = lli->lli_mtime;
+ if (lli->lli_attr_valid & OBD_MD_FLCTIME)
+ inode->i_ctime.tv_sec = lli->lli_ctime;
}
+fill_attr:
OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30);
if (ll_need_32bit_api(sbi)) {
stat->rdev = inode->i_rdev;
}
- stat->mode = inode->i_mode;
+ /* foreign symlink to be exposed as a real symlink */
+ if (!foreign)
+ stat->mode = inode->i_mode;
+ else
+ stat->mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
+
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
- stat->blksize = sbi->ll_stat_blksize ?: 1 << inode->i_blkbits;
+ /* stat->blksize is used to tell about preferred IO size */
+ if (sbi->ll_stat_blksize)
+ stat->blksize = sbi->ll_stat_blksize;
+ else if (S_ISREG(inode->i_mode))
+ stat->blksize = 1 << min(PTLRPC_MAX_BRW_BITS + 1,
+ LL_MAX_BLKSIZE_BITS);
+ else
+ stat->blksize = 1 << inode->i_sb->s_blocksize_bits;
stat->nlink = inode->i_nlink;
stat->size = i_size_read(inode);
stat->blocks = inode->i_blocks;
- return 0;
+#ifdef HAVE_INODEOPS_ENHANCED_GETATTR
+ if (flags & AT_STATX_DONT_SYNC) {
+ if (stat->size == 0 &&
+ lli->lli_attr_valid & OBD_MD_FLLAZYSIZE)
+ stat->size = lli->lli_lazysize;
+ if (stat->blocks == 0 &&
+ lli->lli_attr_valid & OBD_MD_FLLAZYBLOCKS)
+ stat->blocks = lli->lli_lazyblocks;
+ }
+
+ if (lli->lli_attr_valid & OBD_MD_FLBTIME) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime.tv_sec = lli->lli_btime;
+ }
+
+ stat->attributes_mask = STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
+ stat->attributes |= ll_inode_to_ext_flags(inode->i_flags);
+ stat->result_mask &= request_mask;
+#endif
+
+ ll_stats_ops_tally(sbi, LPROC_LL_GETATTR,
+ ktime_us_delta(ktime_get(), kstart));
+
+ return 0;
+}
+
+#ifdef HAVE_INODEOPS_ENHANCED_GETATTR
+int ll_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
+{
+ return ll_getattr_dentry(path->dentry, stat, request_mask, flags,
+ false);
+}
+#else
+int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
+{
+ return ll_getattr_dentry(de, stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT, false);
+}
+#endif
+
+int cl_falloc(struct inode *inode, int mode, loff_t offset, loff_t len)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ __u16 refcheck;
+ int rc; loff_t sa_falloc_end;
+ loff_t size = i_size_read(inode);
+
+ ENTRY;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ io->ci_verify_layout = 1;
+ io->u.ci_setattr.sa_parent_fid = lu_object_fid(&io->ci_obj->co_lu);
+ io->u.ci_setattr.sa_falloc_mode = mode;
+ io->u.ci_setattr.sa_falloc_offset = offset;
+ io->u.ci_setattr.sa_falloc_len = len;
+ io->u.ci_setattr.sa_falloc_end = io->u.ci_setattr.sa_falloc_offset +
+ io->u.ci_setattr.sa_falloc_len;
+ io->u.ci_setattr.sa_subtype = CL_SETATTR_FALLOCATE;
+ sa_falloc_end = io->u.ci_setattr.sa_falloc_end;
+ if (sa_falloc_end > size) {
+ /* Check new size against VFS/VM file size limit and rlimit */
+ rc = inode_newsize_ok(inode, sa_falloc_end);
+ if (rc)
+ goto out;
+ if (sa_falloc_end > ll_file_maxbytes(inode)) {
+ CDEBUG(D_INODE, "file size too large %llu > %llu\n",
+ (unsigned long long)(sa_falloc_end),
+ ll_file_maxbytes(inode));
+ rc = -EFBIG;
+ goto out;
+ }
+ io->u.ci_setattr.sa_attr.lvb_size = sa_falloc_end;
+ if (!(mode & FALLOC_FL_KEEP_SIZE))
+ io->u.ci_setattr.sa_avalid |= ATTR_SIZE;
+ } else {
+ io->u.ci_setattr.sa_attr.lvb_size = size;
+ }
+
+again:
+ if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0)
+ rc = cl_io_loop(env, io);
+ else
+ rc = io->ci_result;
+
+ cl_io_fini(env, io);
+ if (unlikely(io->ci_need_restart))
+ goto again;
+
+out:
+ cl_env_put(env, &refcheck);
+ RETURN(rc);
+}
+
+long ll_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int rc;
+
+ /*
+ * Encrypted inodes can't handle collapse range or zero range or insert
+ * range since we would need to re-encrypt blocks with a different IV or
+ * XTS tweak (which are based on the logical block number).
+ * Similar to what ext4 does.
+ */
+ if (IS_ENCRYPTED(inode) &&
+ (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
+ FALLOC_FL_ZERO_RANGE)))
+ RETURN(-EOPNOTSUPP);
+
+ /*
+ * Only mode == 0 (which is standard prealloc) is supported now.
+ * Punch is not supported yet.
+ */
+ if (mode & ~FALLOC_FL_KEEP_SIZE)
+ RETURN(-EOPNOTSUPP);
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FALLOCATE, 1);
+
+ rc = cl_falloc(inode, mode, offset, len);
+ /*
+ * ENOTSUPP (524) is an NFSv3 specific error code erroneously
+ * used by Lustre in several places. Retuning it here would
+ * confuse applications that explicity test for EOPNOTSUPP
+ * (95) and fall back to ftruncate().
+ */
+ if (rc == -ENOTSUPP)
+ rc = -EOPNOTSUPP;
+
+ RETURN(rc);
}
static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
rc = ll_do_fiemap(inode, fiemap, num_bytes);
+ if (IS_ENCRYPTED(inode)) {
+ int i;
+
+ for (i = 0; i < fiemap->fm_mapped_extents; i++)
+ fiemap->fm_extents[i].fe_flags |=
+ FIEMAP_EXTENT_DATA_ENCRYPTED |
+ FIEMAP_EXTENT_ENCODED;
+ }
+
fieinfo->fi_flags = fiemap->fm_flags;
fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
if (extent_count > 0 &&
return rc;
}
-struct posix_acl *ll_get_acl(struct inode *inode, int type)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct posix_acl *acl = NULL;
- ENTRY;
-
- spin_lock(&lli->lli_lock);
- /* VFS' acl_permission_check->check_acl will release the refcount */
- acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
-
- RETURN(acl);
-}
-
-#ifdef HAVE_IOP_SET_ACL
-#ifdef CONFIG_FS_POSIX_ACL
-int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- const char *name = NULL;
- char *value = NULL;
- size_t value_size = 0;
- int rc = 0;
- ENTRY;
-
- switch (type) {
- case ACL_TYPE_ACCESS:
- name = XATTR_NAME_POSIX_ACL_ACCESS;
- if (acl)
- rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- break;
-
- case ACL_TYPE_DEFAULT:
- name = XATTR_NAME_POSIX_ACL_DEFAULT;
- if (!S_ISDIR(inode->i_mode))
- rc = acl ? -EACCES : 0;
- break;
-
- default:
- rc = -EINVAL;
- break;
- }
- if (rc)
- return rc;
-
- if (acl) {
- value_size = posix_acl_xattr_size(acl->a_count);
- value = kmalloc(value_size, GFP_NOFS);
- if (value == NULL)
- GOTO(out, rc = -ENOMEM);
-
- rc = posix_acl_to_xattr(&init_user_ns, acl, value, value_size);
- if (rc < 0)
- GOTO(out_value, rc);
- }
-
- rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- value ? OBD_MD_FLXATTR : OBD_MD_FLXATTRRM,
- name, value, value_size, 0, 0, &req);
-
- ptlrpc_req_finished(req);
-out_value:
- kfree(value);
-out:
- if (rc)
- forget_cached_acl(inode, type);
- else
- set_cached_acl(inode, type, acl);
- RETURN(rc);
-}
-#endif /* CONFIG_FS_POSIX_ACL */
-#endif /* HAVE_IOP_SET_ACL */
-
-#ifndef HAVE_GENERIC_PERMISSION_2ARGS
-static int
-# ifdef HAVE_GENERIC_PERMISSION_4ARGS
-ll_check_acl(struct inode *inode, int mask, unsigned int flags)
-# else
-ll_check_acl(struct inode *inode, int mask)
-# endif
-{
-# ifdef CONFIG_FS_POSIX_ACL
- struct posix_acl *acl;
- int rc;
- ENTRY;
-
-# ifdef HAVE_GENERIC_PERMISSION_4ARGS
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
-# endif
- acl = ll_get_acl(inode, ACL_TYPE_ACCESS);
-
- if (!acl)
- RETURN(-EAGAIN);
-
- rc = posix_acl_permission(inode, acl, mask);
- posix_acl_release(acl);
-
- RETURN(rc);
-# else /* !CONFIG_FS_POSIX_ACL */
- return -EAGAIN;
-# endif /* CONFIG_FS_POSIX_ACL */
-}
-#endif /* HAVE_GENERIC_PERMISSION_2ARGS */
-
-#ifdef HAVE_GENERIC_PERMISSION_4ARGS
-int ll_inode_permission(struct inode *inode, int mask, unsigned int flags)
-#else
-# ifdef HAVE_INODE_PERMISION_2ARGS
int ll_inode_permission(struct inode *inode, int mask)
-# else
-int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd)
-# endif
-#endif
{
int rc = 0;
struct ll_sb_info *sbi;
struct root_squash_info *squash;
struct cred *cred = NULL;
const struct cred *old_cred = NULL;
- cfs_cap_t cap;
bool squash_id = false;
+ ktime_t kstart = ktime_get();
+
ENTRY;
-#ifdef MAY_NOT_BLOCK
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
-#elif defined(HAVE_GENERIC_PERMISSION_4ARGS)
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
-#endif
- /* as root inode are NOT getting validated in lookup operation,
- * need to do it before permission check. */
+ /*
+ * as root inode are NOT getting validated in lookup operation,
+ * need to do it before permission check.
+ */
- if (inode == inode->i_sb->s_root->d_inode) {
+ if (is_root_inode(inode)) {
rc = ll_inode_revalidate(inode->i_sb->s_root, IT_LOOKUP);
- if (rc)
- RETURN(rc);
- }
+ if (rc)
+ RETURN(rc);
+ }
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid);
cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid);
- for (cap = 0; cap < sizeof(cfs_cap_t) * 8; cap++) {
- if ((1 << cap) & CFS_CAP_FS_MASK)
- cap_lower(cred->cap_effective, cap);
- }
+ cred->cap_effective = cap_drop_nfsd_set(cred->cap_effective);
+ cred->cap_effective = cap_drop_fs_set(cred->cap_effective);
+
old_cred = override_creds(cred);
}
- ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM, 1);
- rc = ll_generic_permission(inode, mask, flags, ll_check_acl);
+ rc = generic_permission(inode, mask);
/* restore current process's credentials and FS capability */
if (squash_id) {
revert_creds(old_cred);
put_cred(cred);
}
+ if (!rc)
+ ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM,
+ ktime_us_delta(ktime_get(), kstart));
+
RETURN(rc);
}
/* -o localflock - only provides locally consistent flock locks */
-struct file_operations ll_file_operations = {
+static const struct file_operations ll_file_operations = {
#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
# ifdef HAVE_SYNC_READ_WRITE
.read = new_sync_read,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+#ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
+ .splice_read = generic_file_splice_read,
+#else
+ .splice_read = pcc_file_splice_read,
+#endif
.fsync = ll_fsync,
- .flush = ll_flush
+ .flush = ll_flush,
+ .fallocate = ll_fallocate,
};
-struct file_operations ll_file_operations_flock = {
+static const struct file_operations ll_file_operations_flock = {
#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
# ifdef HAVE_SYNC_READ_WRITE
.read = new_sync_read,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+#ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
+ .splice_read = generic_file_splice_read,
+#else
+ .splice_read = pcc_file_splice_read,
+#endif
.fsync = ll_fsync,
.flush = ll_flush,
.flock = ll_file_flock,
- .lock = ll_file_flock
+ .lock = ll_file_flock,
+ .fallocate = ll_fallocate,
};
/* These are for -o noflock - to return ENOSYS on flock calls */
-struct file_operations ll_file_operations_noflock = {
+static const struct file_operations ll_file_operations_noflock = {
#ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER
# ifdef HAVE_SYNC_READ_WRITE
.read = new_sync_read,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+#ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT
+ .splice_read = generic_file_splice_read,
+#else
+ .splice_read = pcc_file_splice_read,
+#endif
.fsync = ll_fsync,
.flush = ll_flush,
.flock = ll_file_noflock,
- .lock = ll_file_noflock
+ .lock = ll_file_noflock,
+ .fallocate = ll_fallocate,
};
-struct inode_operations ll_file_inode_operations = {
+const struct inode_operations ll_file_inode_operations = {
.setattr = ll_setattr,
.getattr = ll_getattr,
.permission = ll_inode_permission,
#endif
.listxattr = ll_listxattr,
.fiemap = ll_fiemap,
-#ifdef HAVE_IOP_GET_ACL
.get_acl = ll_get_acl,
-#endif
#ifdef HAVE_IOP_SET_ACL
.set_acl = ll_set_acl,
#endif
};
+const struct file_operations *ll_select_file_operations(struct ll_sb_info *sbi)
+{
+ const struct file_operations *fops = &ll_file_operations_noflock;
+
+ if (sbi->ll_flags & LL_SBI_FLOCK)
+ fops = &ll_file_operations_flock;
+ else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
+ fops = &ll_file_operations;
+
+ return fops;
+}
+
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
{
struct ll_inode_info *lli = ll_i2info(inode);
out:
cl_env_put(env, &refcheck);
- RETURN(rc);
+ RETURN(rc < 0 ? rc : 0);
}
/* Fetch layout from MDT with getxattr request, if it's not ready yet */
/* mostly layout lock is caching on the local side, so try to
* match it before grabbing layout lock mutex. */
mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
- LCK_CR | LCK_CW | LCK_PR | LCK_PW);
+ LCK_CR | LCK_CW | LCK_PR |
+ LCK_PW | LCK_EX);
if (mode != 0) { /* hit cached lock */
rc = ll_layout_lock_set(&lockh, mode, inode);
if (rc == -EAGAIN)