X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fllite%2Ffile.c;h=38d23fc8b1c5829e6b63188d872ab2c36e4d9a92;hp=97e871f3fc3a075d251b0bb2c2931be752b5c431;hb=e8d76d1090e912ee5d916284ca5c8ba9195ddd9b;hpb=a9af7100ce72ece9c7a37c4d2c28b54fcf68b562 diff --git a/lustre/llite/file.c b/lustre/llite/file.c index 97e871f..38d23fc 100644 --- a/lustre/llite/file.c +++ b/lustre/llite/file.c @@ -27,7 +27,6 @@ */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * lustre/llite/file.c * @@ -42,11 +41,12 @@ #include #include #include -#ifdef HAVE_UIDGID_HEADER -# include -#endif +#include +#include +#include #include +#include #include #include "cl_object.h" @@ -112,12 +112,12 @@ static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data, op_data->op_xvalid |= OP_XVALID_CTIME_SET; op_data->op_attr_blocks = inode->i_blocks; op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags); - if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT)) + if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL; op_data->op_open_handle = och->och_open_handle; if (och->och_flags & FMODE_WRITE && - ll_file_test_and_clear_flag(ll_i2info(inode), LLIF_DATA_MODIFIED)) + test_and_clear_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags)) /* For HSM: if inode data has been modified, pack it so that * MDT can set data dirty flag in the archive. */ op_data->op_bias |= MDS_DATA_MODIFIED; @@ -163,6 +163,7 @@ static int ll_close_inode_openhandle(struct inode *inode, op_data->op_attr_blocks += ((struct inode *)data)->i_blocks; op_data->op_attr.ia_valid |= ATTR_SIZE; op_data->op_xvalid |= OP_XVALID_BLOCKS; + /* fallthrough */ case MDS_CLOSE_LAYOUT_SPLIT: case MDS_CLOSE_LAYOUT_SWAP: { struct split_param *sp = data; @@ -306,7 +307,7 @@ static int ll_md_close(struct inode *inode, struct file *file) .l_inodebits = { MDS_INODELOCK_OPEN }, }; __u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK; - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; struct ll_inode_info *lli = ll_i2info(inode); struct lustre_handle lockh; enum ldlm_mode lockmode; @@ -323,8 +324,9 @@ static int ll_md_close(struct inode *inode, struct file *file) /* Usually the lease is not released when the * application crashed, we need to release here. */ rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken); - CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n", - PFID(&lli->lli_fid), rc, lease_broken); + CDEBUG_LIMIT(rc ? D_ERROR : D_INODE, + "Clean up lease "DFID" %d/%d\n", + PFID(&lli->lli_fid), rc, lease_broken); fd->fd_lease_och = NULL; } @@ -353,12 +355,14 @@ static int ll_md_close(struct inode *inode, struct file *file) } mutex_unlock(&lli->lli_och_mutex); - if (!md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode), + /* LU-4398: do not cache write open lock if the file has exec bit */ + if ((lockmode == LCK_CW && inode->i_mode & S_IXUGO) || + !md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode), LDLM_IBITS, &policy, lockmode, &lockh)) rc = ll_md_real_close(inode, fd->fd_omode); out: - LUSTRE_FPRIVATE(file) = NULL; + file->private_data = NULL; ll_file_data_put(fd); RETURN(rc); @@ -371,29 +375,29 @@ out: */ int ll_file_release(struct inode *inode, struct file *file) { - struct ll_file_data *fd; - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ll_inode_info *lli = ll_i2info(inode); - int rc; - ENTRY; + struct ll_file_data *fd; + struct ll_sb_info *sbi = ll_i2sbi(inode); + struct ll_inode_info *lli = ll_i2info(inode); + ktime_t kstart = ktime_get(); + int rc; + + ENTRY; CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n", PFID(ll_inode2fid(inode)), inode); - if (inode->i_sb->s_root != file_dentry(file)) - ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1); - fd = LUSTRE_FPRIVATE(file); - LASSERT(fd != NULL); + fd = file->private_data; + LASSERT(fd != NULL); /* The last ref on @file, maybe not the the owner pid of statahead, * because parent and child process can share the same file handle. */ if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd) ll_deauthorize_statahead(inode, fd); - if (inode->i_sb->s_root == file_dentry(file)) { - LUSTRE_FPRIVATE(file) = NULL; + if (is_root_inode(inode)) { + file->private_data = NULL; ll_file_data_put(fd); - RETURN(0); + GOTO(out, rc = 0); } pcc_file_release(inode, file); @@ -404,11 +408,17 @@ int ll_file_release(struct inode *inode, struct file *file) lli->lli_async_rc = 0; } + lli->lli_close_fd_time = ktime_get(); + rc = ll_md_close(inode, file); if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val)) libcfs_debug_dumplog(); +out: + if (!rc && !is_root_inode(inode)) + ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, + ktime_us_delta(ktime_get(), kstart)); RETURN(rc); } @@ -416,23 +426,52 @@ static inline int ll_dom_readpage(void *data, struct page *page) { struct niobuf_local *lnb = data; void *kaddr; + int rc = 0; + + struct inode *inode = page2inode(page); - kaddr = ll_kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page); memcpy(kaddr, lnb->lnb_data, lnb->lnb_len); if (lnb->lnb_len < PAGE_SIZE) memset(kaddr + lnb->lnb_len, 0, PAGE_SIZE - lnb->lnb_len); flush_dcache_page(page); SetPageUptodate(page); - ll_kunmap_atomic(kaddr, KM_USER0); + kunmap_atomic(kaddr); + + if (inode && IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) { + if (!llcrypt_has_encryption_key(inode)) + CDEBUG(D_SEC, "no enc key for "DFID"\n", + PFID(ll_inode2fid(inode))); + else { + unsigned int offs = 0; + + while (offs < PAGE_SIZE) { + /* decrypt only if page is not empty */ + if (memcmp(page_address(page) + offs, + page_address(ZERO_PAGE(0)), + LUSTRE_ENCRYPTION_UNIT_SIZE) == 0) + break; + + rc = llcrypt_decrypt_pagecache_blocks(page, + LUSTRE_ENCRYPTION_UNIT_SIZE, + offs); + if (rc) + break; + + offs += LUSTRE_ENCRYPTION_UNIT_SIZE; + } + } + } unlock_page(page); - return 0; + return rc; } -void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req, - struct lookup_intent *it) +void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req) { + struct lu_env *env; + struct cl_io *io; struct ll_inode_info *lli = ll_i2info(inode); struct cl_object *obj = lli->lli_clob; struct address_space *mapping = inode->i_mapping; @@ -442,14 +481,16 @@ void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req, char *data; unsigned long index, start; struct niobuf_local lnb; + __u16 refcheck; + int rc; ENTRY; if (obj == NULL) RETURN_EXIT; - if (!req_capsule_has_field(&req->rq_pill, &RMF_NIOBUF_INLINE, - RCL_SERVER)) + if (!req_capsule_field_present(&req->rq_pill, &RMF_NIOBUF_INLINE, + RCL_SERVER)) RETURN_EXIT; rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE); @@ -461,31 +502,44 @@ void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req, * client PAGE_SIZE to be used on that client, if server's PAGE_SIZE is * smaller then offset may be not aligned and that data is just ignored. */ - if (rnb->rnb_offset % PAGE_SIZE) + if (rnb->rnb_offset & ~PAGE_MASK) RETURN_EXIT; /* Server returns whole file or just file tail if it fills in reply * buffer, in both cases total size should be equal to the file size. */ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size) { + if (rnb->rnb_offset + rnb->rnb_len != body->mbo_dom_size && + !(inode && IS_ENCRYPTED(inode))) { CERROR("%s: server returns off/len %llu/%u but size %llu\n", ll_i2sbi(inode)->ll_fsname, rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size); RETURN_EXIT; } + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN_EXIT; + io = vvp_env_thread_io(env); + io->ci_obj = obj; + io->ci_ignore_layout = 1; + rc = cl_io_init(env, io, CIT_MISC, obj); + if (rc) + GOTO(out_io, rc); + CDEBUG(D_INFO, "Get data along with open at %llu len %i, size %llu\n", rnb->rnb_offset, rnb->rnb_len, body->mbo_dom_size); data = (char *)rnb + sizeof(*rnb); lnb.lnb_file_offset = rnb->rnb_offset; - start = lnb.lnb_file_offset / PAGE_SIZE; + start = lnb.lnb_file_offset >> PAGE_SHIFT; index = 0; - LASSERT(lnb.lnb_file_offset % PAGE_SIZE == 0); + LASSERT((lnb.lnb_file_offset & ~PAGE_MASK) == 0); lnb.lnb_page_offset = 0; do { + struct cl_page *page; + lnb.lnb_data = data + (index << PAGE_SHIFT); lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT); if (lnb.lnb_len > PAGE_SIZE) @@ -501,9 +555,33 @@ void ll_dom_finish_open(struct inode *inode, struct ptlrpc_request *req, PTR_ERR(vmpage)); break; } + lock_page(vmpage); + if (vmpage->mapping == NULL) { + unlock_page(vmpage); + put_page(vmpage); + /* page was truncated */ + break; + } + /* attach VM page to CL page cache */ + page = cl_page_find(env, obj, vmpage->index, vmpage, + CPT_CACHEABLE); + if (IS_ERR(page)) { + ClearPageUptodate(vmpage); + unlock_page(vmpage); + put_page(vmpage); + break; + } + cl_page_export(env, page, 1); + cl_page_put(env, page); + unlock_page(vmpage); put_page(vmpage); index++; } while (rnb->rnb_len > (index << PAGE_SHIFT)); + +out_io: + cl_io_fini(env, io); + cl_env_put(env, &refcheck); + EXIT; } @@ -558,6 +636,8 @@ retry: op_data->op_data = lmm; op_data->op_data_size = lmmsize; + OBD_FAIL_TIMEOUT(OBD_FAIL_LLITE_OPEN_DELAY, cfs_fail_val); + rc = md_intent_lock(sbi->ll_md_exp, op_data, itp, &req, &ll_md_blocking_ast, 0); kfree(name); @@ -582,44 +662,47 @@ retry: GOTO(out, rc); } - rc = ll_prep_inode(&de->d_inode, req, NULL, itp); + rc = ll_prep_inode(&de->d_inode, &req->rq_pill, NULL, itp); if (!rc && itp->it_lock_mode) { - struct lustre_handle handle = {.cookie = itp->it_lock_handle}; - struct ldlm_lock *lock; - bool has_dom_bit = false; + __u64 bits = 0; /* If we got a lock back and it has a LOOKUP bit set, * make sure the dentry is marked as valid so we can find it. * We don't need to care about actual hashing since other bits * of kernel will deal with that later. */ - lock = ldlm_handle2lock(&handle); - if (lock) { - has_dom_bit = ldlm_has_dom(lock); - if (lock->l_policy_data.l_inodebits.bits & - MDS_INODELOCK_LOOKUP) - d_lustre_revalidate(de); - - LDLM_LOCK_PUT(lock); + ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, &bits); + if (bits & MDS_INODELOCK_LOOKUP) { + d_lustre_revalidate(de); + ll_update_dir_depth(parent->d_inode, de->d_inode); } - ll_set_lock_data(sbi->ll_md_exp, de->d_inode, itp, NULL); - if (has_dom_bit) - ll_dom_finish_open(de->d_inode, req, itp); + + /* if DoM bit returned along with LAYOUT bit then there + * can be read-on-open data returned. + */ + if (bits & MDS_INODELOCK_DOM && bits & MDS_INODELOCK_LAYOUT) + ll_dom_finish_open(de->d_inode, req); } out: ptlrpc_req_finished(req); ll_intent_drop_lock(itp); - /* We did open by fid, but by the time we got to the server, - * the object disappeared. If this is a create, we cannot really - * tell the userspace that the file it was trying to create - * does not exist. Instead let's return -ESTALE, and the VFS will - * retry the create with LOOKUP_REVAL that we are going to catch - * in ll_revalidate_dentry() and use lookup then. + /* We did open by fid, but by the time we got to the server, the object + * disappeared. This is possible if the object was unlinked, but it's + * also possible if the object was unlinked by a rename. In the case + * of an object renamed over our existing one, we can't fail this open. + * O_CREAT also goes through this path if we had an existing dentry, + * and it's obviously wrong to return ENOENT for O_CREAT. + * + * Instead let's return -ESTALE, and the VFS will retry the open with + * LOOKUP_REVAL, which we catch in ll_revalidate_dentry and fail to + * revalidate, causing a lookup. This causes extra lookups in the case + * where we had a dentry in cache but the file is being unlinked and we + * lose the race with unlink, but this should be very rare. */ - if (rc == -ENOENT && itp->it_op & IT_CREAT) + if (rc == -ENOENT) rc = -ESTALE; RETURN(rc); @@ -646,7 +729,7 @@ static int ll_local_open(struct file *file, struct lookup_intent *it, struct inode *inode = file_inode(file); ENTRY; - LASSERT(!LUSTRE_FPRIVATE(file)); + LASSERT(!file->private_data); LASSERT(fd != NULL); @@ -658,9 +741,11 @@ static int ll_local_open(struct file *file, struct lookup_intent *it, RETURN(rc); } - LUSTRE_FPRIVATE(file) = fd; + file->private_data = fd; ll_readahead_init(inode, &fd->fd_ras); fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); + /* turn off the kernel's read-ahead */ + file->f_ra.ra_pages = 0; /* ll_cl_context initialize */ rwlock_init(&fd->fd_lock); @@ -669,6 +754,29 @@ static int ll_local_open(struct file *file, struct lookup_intent *it, RETURN(0); } +void ll_track_file_opens(struct inode *inode) +{ + struct ll_inode_info *lli = ll_i2info(inode); + struct ll_sb_info *sbi = ll_i2sbi(inode); + + /* do not skew results with delays from never-opened inodes */ + if (ktime_to_ns(lli->lli_close_fd_time)) + ll_stats_ops_tally(sbi, LPROC_LL_INODE_OPCLTM, + ktime_us_delta(ktime_get(), lli->lli_close_fd_time)); + + if (ktime_after(ktime_get(), + ktime_add_ms(lli->lli_close_fd_time, + sbi->ll_oc_max_ms))) { + lli->lli_open_fd_count = 1; + lli->lli_close_fd_time = ns_to_ktime(0); + } else { + lli->lli_open_fd_count++; + } + + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_OCOUNT, + lli->lli_open_fd_count); +} + /* Open a file, and (for the very first open) create objects on the OSTs at * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object * creation or open until ll_lov_setstripe() ioctl is called. @@ -690,6 +798,7 @@ int ll_file_open(struct inode *inode, struct file *file) struct obd_client_handle **och_p = NULL; __u64 *och_usecount = NULL; struct ll_file_data *fd; + ktime_t kstart = ktime_get(); int rc = 0; ENTRY; @@ -699,6 +808,12 @@ int ll_file_open(struct inode *inode, struct file *file) it = file->private_data; /* XXX: compat macro */ file->private_data = NULL; /* prevent ll_local_open assertion */ + if (S_ISREG(inode->i_mode)) { + rc = llcrypt_file_open(inode, file); + if (rc) + GOTO(out_nofiledata, rc); + } + fd = ll_file_data_get(); if (fd == NULL) GOTO(out_nofiledata, rc = -ENOMEM); @@ -707,19 +822,20 @@ int ll_file_open(struct inode *inode, struct file *file) if (S_ISDIR(inode->i_mode)) ll_authorize_statahead(inode, fd); - if (inode->i_sb->s_root == file_dentry(file)) { - LUSTRE_FPRIVATE(file) = fd; - RETURN(0); - } + ll_track_file_opens(inode); + if (is_root_inode(inode)) { + file->private_data = fd; + RETURN(0); + } if (!it || !it->it_disposition) { - /* Convert f_flags into access mode. We cannot use file->f_mode, - * because everything but O_ACCMODE mask was stripped from - * there */ - if ((oit.it_flags + 1) & O_ACCMODE) - oit.it_flags++; - if (file->f_flags & O_TRUNC) - oit.it_flags |= FMODE_WRITE; + /* Convert f_flags into access mode. We cannot use file->f_mode, + * because everything but O_ACCMODE mask was stripped from + * there */ + if ((oit.it_flags + 1) & O_ACCMODE) + oit.it_flags++; + if (file->f_flags & O_TRUNC) + oit.it_flags |= FMODE_WRITE; /* kernel only call f_op->open in dentry_open. filp_open calls * dentry_open after call to open_namei that checks permissions. @@ -729,62 +845,66 @@ int ll_file_open(struct inode *inode, struct file *file) if (oit.it_flags & (FMODE_WRITE | FMODE_READ)) oit.it_flags |= MDS_OPEN_OWNEROVERRIDE; - /* We do not want O_EXCL here, presumably we opened the file - * already? XXX - NFS implications? */ - oit.it_flags &= ~O_EXCL; + /* We do not want O_EXCL here, presumably we opened the file + * already? XXX - NFS implications? */ + oit.it_flags &= ~O_EXCL; - /* bug20584, if "it_flags" contains O_CREAT, the file will be - * created if necessary, then "IT_CREAT" should be set to keep - * consistent with it */ - if (oit.it_flags & O_CREAT) - oit.it_op |= IT_CREAT; + /* bug20584, if "it_flags" contains O_CREAT, the file will be + * created if necessary, then "IT_CREAT" should be set to keep + * consistent with it */ + if (oit.it_flags & O_CREAT) + oit.it_op |= IT_CREAT; - it = &oit; - } + it = &oit; + } restart: - /* Let's see if we have file open on MDS already. */ - if (it->it_flags & FMODE_WRITE) { - och_p = &lli->lli_mds_write_och; - och_usecount = &lli->lli_open_fd_write_count; - } else if (it->it_flags & FMODE_EXEC) { - och_p = &lli->lli_mds_exec_och; - och_usecount = &lli->lli_open_fd_exec_count; - } else { - och_p = &lli->lli_mds_read_och; - och_usecount = &lli->lli_open_fd_read_count; - } + /* Let's see if we have file open on MDS already. */ + if (it->it_flags & FMODE_WRITE) { + och_p = &lli->lli_mds_write_och; + och_usecount = &lli->lli_open_fd_write_count; + } else if (it->it_flags & FMODE_EXEC) { + och_p = &lli->lli_mds_exec_och; + och_usecount = &lli->lli_open_fd_exec_count; + } else { + och_p = &lli->lli_mds_read_och; + och_usecount = &lli->lli_open_fd_read_count; + } mutex_lock(&lli->lli_och_mutex); - if (*och_p) { /* Open handle is present */ - if (it_disposition(it, DISP_OPEN_OPEN)) { - /* Well, there's extra open request that we do not need, - let's close it somehow. This will decref request. */ - rc = it_open_error(DISP_OPEN_OPEN, it); - if (rc) { + if (*och_p) { /* Open handle is present */ + if (it_disposition(it, DISP_OPEN_OPEN)) { + /* Well, there's extra open request that we do not need, + * let's close it somehow. This will decref request. */ + rc = it_open_error(DISP_OPEN_OPEN, it); + if (rc) { mutex_unlock(&lli->lli_och_mutex); - GOTO(out_openerr, rc); - } + GOTO(out_openerr, rc); + } ll_release_openhandle(file_dentry(file), it); - } - (*och_usecount)++; + } + (*och_usecount)++; - rc = ll_local_open(file, it, fd, NULL); - if (rc) { - (*och_usecount)--; + rc = ll_local_open(file, it, fd, NULL); + if (rc) { + (*och_usecount)--; mutex_unlock(&lli->lli_och_mutex); - GOTO(out_openerr, rc); - } - } else { - LASSERT(*och_usecount == 0); + GOTO(out_openerr, rc); + } + } else { + LASSERT(*och_usecount == 0); if (!it->it_disposition) { - struct ll_dentry_data *ldd = ll_d2d(file->f_path.dentry); - /* We cannot just request lock handle now, new ELC code - means that one of other OPEN locks for this file - could be cancelled, and since blocking ast handler - would attempt to grab och_mutex as well, that would - result in a deadlock */ + struct dentry *dentry = file_dentry(file); + struct ll_sb_info *sbi = ll_i2sbi(inode); + struct ll_dentry_data *ldd; + + /* We cannot just request lock handle now, new ELC code + * means that one of other OPEN locks for this file + * could be cancelled, and since blocking ast handler + * would attempt to grab och_mutex as well, that would + * result in a deadlock + */ mutex_unlock(&lli->lli_och_mutex); /* * Normally called under two situations: @@ -793,42 +913,68 @@ restart: * handle to be returned from LOOKUP|OPEN request, * for example if the target entry was a symlink. * - * Only fetch MDS_OPEN_LOCK if this is in NFS path, - * marked by a bit set in ll_iget_for_nfs. Clear the - * bit so that it's not confusing later callers. + * In NFS path we know there's pathologic behavior + * so we always enable open lock caching when coming + * from there. It's detected by setting a flag in + * ll_iget_for_nfs. * - * NB; when ldd is NULL, it must have come via normal - * lookup path only, since ll_iget_for_nfs always calls - * ll_d_init(). + * After reaching number of opens of this inode + * we always ask for an open lock on it to handle + * bad userspace actors that open and close files + * in a loop for absolutely no good reason */ - if (ldd && ldd->lld_nfs_dentry) { - ldd->lld_nfs_dentry = 0; + + ldd = ll_d2d(dentry); + if (filename_is_volatile(dentry->d_name.name, + dentry->d_name.len, + NULL)) { + /* There really is nothing here, but this + * make this more readable I think. + * We do not want openlock for volatile + * files under any circumstances + */ + } else if (ldd && ldd->lld_nfs_dentry) { + /* NFS path. This also happens to catch + * open by fh files I guess + */ it->it_flags |= MDS_OPEN_LOCK; + /* clear the flag for future lookups */ + ldd->lld_nfs_dentry = 0; + } else if (sbi->ll_oc_thrsh_count > 0) { + /* Take MDS_OPEN_LOCK with many opens */ + if (lli->lli_open_fd_count >= + sbi->ll_oc_thrsh_count) + it->it_flags |= MDS_OPEN_LOCK; + + /* If this is open after we just closed */ + else if (ktime_before(ktime_get(), + ktime_add_ms(lli->lli_close_fd_time, + sbi->ll_oc_thrsh_ms))) + it->it_flags |= MDS_OPEN_LOCK; } - /* + /* * Always specify MDS_OPEN_BY_FID because we don't want * to get file with different fid. */ it->it_flags |= MDS_OPEN_BY_FID; - rc = ll_intent_file_open(file_dentry(file), NULL, 0, - it); - if (rc) - GOTO(out_openerr, rc); + rc = ll_intent_file_open(dentry, NULL, 0, it); + if (rc) + GOTO(out_openerr, rc); - goto restart; - } - OBD_ALLOC(*och_p, sizeof (struct obd_client_handle)); - if (!*och_p) - GOTO(out_och_free, rc = -ENOMEM); + goto restart; + } + OBD_ALLOC(*och_p, sizeof(struct obd_client_handle)); + if (!*och_p) + GOTO(out_och_free, rc = -ENOMEM); - (*och_usecount)++; + (*och_usecount)++; - /* md_intent_lock() didn't get a request ref if there was an - * open error, so don't do cleanup on the request here - * (bug 3430) */ - /* XXX (green): Should not we bail out on any error here, not - * just open error? */ + /* md_intent_lock() didn't get a request ref if there was an + * open error, so don't do cleanup on the request here + * (bug 3430) */ + /* XXX (green): Should not we bail out on any error here, not + * just open error? */ rc = it_open_error(DISP_OPEN_OPEN, it); if (rc != 0) GOTO(out_och_free, rc); @@ -847,24 +993,24 @@ restart: GOTO(out_och_free, rc); mutex_unlock(&lli->lli_och_mutex); - fd = NULL; - /* Must do this outside lli_och_mutex lock to prevent deadlock where - different kind of OPEN lock for this same inode gets cancelled - by ldlm_cancel_lru */ - if (!S_ISREG(inode->i_mode)) - GOTO(out_och_free, rc); + fd = NULL; + /* Must do this outside lli_och_mutex lock to prevent deadlock where + different kind of OPEN lock for this same inode gets cancelled + by ldlm_cancel_lru */ + if (!S_ISREG(inode->i_mode)) + GOTO(out_och_free, rc); cl_lov_delay_create_clear(&file->f_flags); GOTO(out_och_free, rc); out_och_free: - if (rc) { - if (och_p && *och_p) { - OBD_FREE(*och_p, sizeof (struct obd_client_handle)); - *och_p = NULL; /* OBD_FREE writes some magic there */ - (*och_usecount)--; - } + if (rc) { + if (och_p && *och_p) { + OBD_FREE(*och_p, sizeof(struct obd_client_handle)); + *och_p = NULL; /* OBD_FREE writes some magic there */ + (*och_usecount)--; + } mutex_unlock(&lli->lli_och_mutex); out_openerr: @@ -873,9 +1019,10 @@ out_openerr: if (fd != NULL) ll_file_data_put(fd); - } else { - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1); - } + } else { + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, + ktime_us_delta(ktime_get(), kstart)); + } out_nofiledata: if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) { @@ -883,7 +1030,7 @@ out_nofiledata: it_clear_disposition(it, DISP_ENQ_OPEN_REF); } - return rc; + return rc; } static int ll_md_blocking_lease_ast(struct ldlm_lock *lock, @@ -918,7 +1065,7 @@ static int ll_lease_och_acquire(struct inode *inode, struct file *file, struct lustre_handle *old_open_handle) { struct ll_inode_info *lli = ll_i2info(inode); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; struct obd_client_handle **och_p; __u64 *och_usecount; int rc = 0; @@ -962,7 +1109,7 @@ out_unlock: static int ll_lease_och_release(struct inode *inode, struct file *file) { struct ll_inode_info *lli = ll_i2info(inode); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; struct obd_client_handle **och_p; struct obd_client_handle *old_och = NULL; __u64 *och_usecount; @@ -1063,15 +1210,17 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, GOTO(out_release_it, rc); LASSERT(it_disposition(&it, DISP_ENQ_OPEN_REF)); - ll_och_fill(sbi->ll_md_exp, &it, och); + rc = ll_och_fill(sbi->ll_md_exp, &it, och); + if (rc) + GOTO(out_release_it, rc); if (!it_disposition(&it, DISP_OPEN_LEASE)) /* old server? */ GOTO(out_close, rc = -EOPNOTSUPP); /* already get lease, handle lease lock */ ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL); - if (it.it_lock_mode == 0 || - it.it_lock_bits != MDS_INODELOCK_OPEN) { + if (!it.it_lock_mode || + !(it.it_lock_bits & MDS_INODELOCK_OPEN)) { /* open lock must return for lease */ CERROR(DFID "lease granted but no open lock, %d/%llu.\n", PFID(ll_inode2fid(inode)), it.it_lock_mode, @@ -1278,11 +1427,10 @@ int ll_merge_attr(const struct lu_env *env, struct inode *inode) * POSIX. Solving this problem needs to send an RPC to MDT for each * read, this will hurt performance. */ - if (inode->i_atime.tv_sec < lli->lli_atime || - lli->lli_update_atime) { + if (test_and_clear_bit(LLIF_UPDATE_ATIME, &lli->lli_flags) || + inode->i_atime.tv_sec < lli->lli_atime) inode->i_atime.tv_sec = lli->lli_atime; - lli->lli_update_atime = 0; - } + inode->i_mtime.tv_sec = lli->lli_mtime; inode->i_ctime.tv_sec = lli->lli_ctime; @@ -1333,7 +1481,7 @@ out_size_unlock: */ void ll_io_set_mirror(struct cl_io *io, const struct file *file) { - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; /* clear layout version for generic(non-resync) I/O in case it carries * stale layout version due to I/O restart */ @@ -1382,7 +1530,7 @@ void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot, struct vvp_io_args *args) { struct inode *inode = file_inode(file); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK; io->ci_lock_no_expand = fd->ll_lock_no_expand; @@ -1394,8 +1542,8 @@ void ll_io_init(struct cl_io *io, struct file *file, enum cl_io_type iot, IS_SYNC(inode)); #ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS io->u.ci_wr.wr_sync |= !!(args && - args->via_io_subtype == IO_NORMAL && - args->u.normal.via_iocb->ki_flags & IOCB_DSYNC); + (args->u.normal.via_iocb->ki_flags & + IOCB_DSYNC)); #endif } @@ -1453,16 +1601,24 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, struct file *file, enum cl_io_type iot, loff_t *ppos, size_t count) { - struct vvp_io *vio = vvp_env_io(env); - struct inode *inode = file_inode(file); - struct ll_inode_info *lli = ll_i2info(inode); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - struct range_lock range; - struct cl_io *io; - ssize_t result = 0; - int rc = 0; - unsigned retried = 0; - bool restarted = false; + struct vvp_io *vio = vvp_env_io(env); + struct inode *inode = file_inode(file); + struct ll_inode_info *lli = ll_i2info(inode); + struct ll_sb_info *sbi = ll_i2sbi(inode); + struct ll_file_data *fd = file->private_data; + struct range_lock range; + bool range_locked = false; + struct cl_io *io; + ssize_t result = 0; + int rc = 0; + int rc2 = 0; + unsigned int retried = 0, dio_lock = 0; + bool is_aio = false; + bool is_parallel_dio = false; + struct cl_dio_aio *ci_aio = NULL; + size_t per_bytes; + bool partial_io = false; + size_t max_io_pages, max_cached_pages; ENTRY; @@ -1470,72 +1626,144 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, file_dentry(file)->d_name.name, iot == CIT_READ ? "read" : "write", *ppos, count); + max_io_pages = PTLRPC_MAX_BRW_PAGES * OBD_MAX_RIF_DEFAULT; + max_cached_pages = sbi->ll_cache->ccc_lru_max; + if (max_io_pages > (max_cached_pages >> 2)) + max_io_pages = max_cached_pages >> 2; + + io = vvp_env_thread_io(env); + if (file->f_flags & O_DIRECT) { + if (!is_sync_kiocb(args->u.normal.via_iocb)) + is_aio = true; + + /* the kernel does not support AIO on pipes, and parallel DIO + * uses part of the AIO path, so we must not do parallel dio + * to pipes + */ + is_parallel_dio = !iov_iter_is_pipe(args->u.normal.via_iter) && + !is_aio; + + if (!ll_sbi_has_parallel_dio(sbi)) + is_parallel_dio = false; + + ci_aio = cl_aio_alloc(args->u.normal.via_iocb, + ll_i2info(inode)->lli_clob); + if (!ci_aio) + GOTO(out, rc = -ENOMEM); + } + restart: + /** + * IO block size need be aware of cached page limit, otherwise + * if we have small max_cached_mb but large block IO issued, io + * could not be finished and blocked whole client. + */ + if (file->f_flags & O_DIRECT) + per_bytes = count; + else + per_bytes = min(max_io_pages << PAGE_SHIFT, count); + partial_io = per_bytes < count; io = vvp_env_thread_io(env); ll_io_init(io, file, iot, args); + io->ci_aio = ci_aio; + io->ci_dio_lock = dio_lock; io->ci_ndelay_tried = retried; + io->ci_parallel_dio = is_parallel_dio; - if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) { - bool range_locked = false; - + if (cl_io_rw_init(env, io, iot, *ppos, per_bytes) == 0) { if (file->f_flags & O_APPEND) range_lock_init(&range, 0, LUSTRE_EOF); else - range_lock_init(&range, *ppos, *ppos + count - 1); - - vio->vui_fd = LUSTRE_FPRIVATE(file); - vio->vui_io_subtype = args->via_io_subtype; - - switch (vio->vui_io_subtype) { - case IO_NORMAL: - vio->vui_iter = args->u.normal.via_iter; - vio->vui_iocb = args->u.normal.via_iocb; - /* Direct IO reads must also take range lock, - * or multiple reads will try to work on the same pages - * See LU-6227 for details. */ - if (((iot == CIT_WRITE) || - (iot == CIT_READ && (file->f_flags & O_DIRECT))) && - !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { - CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n", - RL_PARA(&range)); - rc = range_lock(&lli->lli_write_tree, &range); - if (rc < 0) - GOTO(out, rc); + range_lock_init(&range, *ppos, *ppos + per_bytes - 1); + + vio->vui_fd = file->private_data; + vio->vui_iter = args->u.normal.via_iter; + vio->vui_iocb = args->u.normal.via_iocb; + /* Direct IO reads must also take range lock, + * or multiple reads will try to work on the same pages + * See LU-6227 for details. + */ + if (((iot == CIT_WRITE) || + (iot == CIT_READ && (file->f_flags & O_DIRECT))) && + !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { + CDEBUG(D_VFSTRACE, "Range lock "RL_FMT"\n", + RL_PARA(&range)); + rc = range_lock(&lli->lli_write_tree, &range); + if (rc < 0) + GOTO(out, rc); - range_locked = true; - } - break; - case IO_SPLICE: - vio->u.splice.vui_pipe = args->u.splice.via_pipe; - vio->u.splice.vui_flags = args->u.splice.via_flags; - break; - default: - CERROR("unknown IO subtype %u\n", vio->vui_io_subtype); - LBUG(); + range_locked = true; } ll_cl_add(file, env, io, LCC_RW); rc = cl_io_loop(env, io); ll_cl_remove(file, env); - if (range_locked) { + if (range_locked && !is_parallel_dio) { CDEBUG(D_VFSTRACE, "Range unlock "RL_FMT"\n", RL_PARA(&range)); range_unlock(&lli->lli_write_tree, &range); + range_locked = false; } } else { /* cl_io_rw_init() handled IO */ rc = io->ci_result; } + /* N/B: parallel DIO may be disabled during i/o submission; + * if that occurs, async RPCs are resolved before we get here, and this + * wait call completes immediately. + */ + if (is_parallel_dio) { + struct cl_sync_io *anchor = &io->ci_aio->cda_sync; + + /* for dio, EIOCBQUEUED is an implementation detail, + * and we don't return it to userspace + */ + if (rc == -EIOCBQUEUED) + rc = 0; + + rc2 = cl_sync_io_wait_recycle(env, anchor, 0, 0); + if (rc2 < 0) + rc = rc2; + + if (range_locked) { + range_unlock(&lli->lli_write_tree, &range); + range_locked = false; + } + } + + /* + * In order to move forward AIO, ci_nob was increased, + * but that doesn't mean io have been finished, it just + * means io have been submited, we will always return + * EIOCBQUEUED to the caller, So we could only return + * number of bytes in non-AIO case. + */ if (io->ci_nob > 0) { - result += io->ci_nob; - count -= io->ci_nob; - *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */ + if (!is_aio) { + if (rc2 == 0) { + result += io->ci_nob; + *ppos = io->u.ci_wr.wr.crw_pos; /* for splice */ + } else if (rc2) { + result = 0; + } + } + count -= io->ci_nob; /* prepare IO restart */ - if (count > 0 && args->via_io_subtype == IO_NORMAL) + if (count > 0) args->u.normal.via_iter = vio->vui_iter; + + if (partial_io) { + /** + * Reexpand iov count because it was zero + * after IO finish. + */ + iov_iter_reexpand(vio->vui_iter, count); + if (per_bytes == io->ci_nob) + io->ci_need_restart = 1; + } } out: cl_io_fini(env, io); @@ -1545,7 +1773,8 @@ out: file->f_path.dentry->d_name.name, iot, rc, result, io->ci_need_restart); - if ((rc == 0 || rc == -ENODATA) && count > 0 && io->ci_need_restart) { + if ((rc == 0 || rc == -ENODATA || rc == -ENOLCK) && + count > 0 && io->ci_need_restart) { CDEBUG(D_VFSTRACE, "%s: restart %s from %lld, count: %zu, ret: %zd, rc: %d\n", file_dentry(file)->d_name.name, @@ -1553,10 +1782,31 @@ out: *ppos, count, result, rc); /* preserve the tried count for FLR */ retried = io->ci_ndelay_tried; - restarted = true; + dio_lock = io->ci_dio_lock; goto restart; } + if (io->ci_aio) { + /* + * VFS will call aio_complete() if no -EIOCBQUEUED + * is returned for AIO, so we can not call aio_complete() + * in our end_io(). + */ + if (rc != -EIOCBQUEUED) + io->ci_aio->cda_no_aio_complete = 1; + /** + * Drop one extra reference so that end_io() could be + * called for this IO context, we could call it after + * we make sure all AIO requests have been proceed. + */ + cl_sync_io_note(env, &io->ci_aio->cda_sync, + rc == -EIOCBQUEUED ? 0 : rc); + if (!is_aio) { + cl_aio_free(env, io->ci_aio); + io->ci_aio = NULL; + } + } + if (iot == CIT_READ) { if (result > 0) ll_stats_ops_tally(ll_i2sbi(inode), @@ -1643,7 +1893,7 @@ ll_do_fast_read(struct kiocb *iocb, struct iov_iter *iter) if (result > 0) { ll_heat_add(file_inode(iocb->ki_filp), CIT_READ, result); ll_stats_ops_tally(ll_i2sbi(file_inode(iocb->ki_filp)), - LPROC_LL_READ_BYTES, result); + LPROC_LL_READ_BYTES, result); } return result; @@ -1656,9 +1906,11 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct lu_env *env; struct vvp_io_args *args; + struct file *file = iocb->ki_filp; ssize_t result; ssize_t rc2; __u16 refcheck; + ktime_t kstart = ktime_get(); bool cached; if (!iov_iter_count(to)) @@ -1677,9 +1929,9 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to) */ result = pcc_file_read_iter(iocb, to, &cached); if (cached) - return result; + GOTO(out, result); - ll_ras_enter(iocb->ki_filp); + ll_ras_enter(file, iocb->ki_pos, iov_iter_count(to)); result = ll_do_fast_read(iocb, to); if (result < 0 || iov_iter_count(to) == 0) @@ -1689,11 +1941,11 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to) if (IS_ERR(env)) return PTR_ERR(env); - args = ll_env_args(env, IO_NORMAL); + args = ll_env_args(env); args->u.normal.via_iter = to; args->u.normal.via_iocb = iocb; - rc2 = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ, + rc2 = ll_file_io_generic(env, args, file, CIT_READ, &iocb->ki_pos, iov_iter_count(to)); if (rc2 > 0) result += rc2; @@ -1702,6 +1954,14 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to) cl_env_put(env, &refcheck); out: + if (result > 0) { + ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid, + file->private_data, iocb->ki_pos, result, + READ); + ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_READ, + ktime_us_delta(ktime_get(), kstart)); + } + return result; } @@ -1756,7 +2016,7 @@ static ssize_t ll_do_tiny_write(struct kiocb *iocb, struct iov_iter *iter) ll_heat_add(inode, CIT_WRITE, result); ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_WRITE_BYTES, result); - ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED); + set_bit(LLIF_DATA_MODIFIED, &ll_i2info(inode)->lli_flags); } CDEBUG(D_VFSTRACE, "result: %zu, original count %zu\n", result, count); @@ -1772,8 +2032,10 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct vvp_io_args *args; struct lu_env *env; ssize_t rc_tiny = 0, rc_normal; + struct file *file = iocb->ki_filp; __u16 refcheck; bool cached; + ktime_t kstart = ktime_get(); int result; ENTRY; @@ -1793,15 +2055,15 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from) */ result = pcc_file_write_iter(iocb, from, &cached); if (cached && result != -ENOSPC && result != -EDQUOT) - return result; + GOTO(out, rc_normal = result); /* NB: we can't do direct IO for tiny writes because they use the page * cache, we can't do sync writes because tiny writes can't flush * pages, and we can't do append writes because we can't guarantee the * required DLM locks are held to protect file size. */ - if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(iocb->ki_filp))) && - !(iocb->ki_filp->f_flags & (O_DIRECT | O_SYNC | O_APPEND))) + if (ll_sbi_has_tiny_write(ll_i2sbi(file_inode(file))) && + !(file->f_flags & (O_DIRECT | O_SYNC | O_APPEND))) rc_tiny = ll_do_tiny_write(iocb, from); /* In case of error, go on and try normal write - Only stop if tiny @@ -1814,12 +2076,12 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (IS_ERR(env)) return PTR_ERR(env); - args = ll_env_args(env, IO_NORMAL); + args = ll_env_args(env); args->u.normal.via_iter = from; args->u.normal.via_iocb = iocb; - rc_normal = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE, - &iocb->ki_pos, iov_iter_count(from)); + rc_normal = ll_file_io_generic(env, args, file, CIT_WRITE, + &iocb->ki_pos, iov_iter_count(from)); /* On success, combine bytes written. */ if (rc_tiny >= 0 && rc_normal > 0) @@ -1832,6 +2094,14 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from) cl_env_put(env, &refcheck); out: + if (rc_normal > 0) { + ll_rw_stats_tally(ll_i2sbi(file_inode(file)), current->pid, + file->private_data, iocb->ki_pos, + rc_normal, WRITE); + ll_stats_ops_tally(ll_i2sbi(file_inode(file)), LPROC_LL_WRITE, + ktime_us_delta(ktime_get(), kstart)); + } + RETURN(rc_normal); } @@ -1840,7 +2110,8 @@ out: * XXX: exact copy from kernel code (__generic_file_aio_write_nolock) */ static int ll_file_get_iov_count(const struct iovec *iov, - unsigned long *nr_segs, size_t *count) + unsigned long *nr_segs, size_t *count, + int access_flags) { size_t cnt = 0; unsigned long seg; @@ -1855,7 +2126,7 @@ static int ll_file_get_iov_count(const struct iovec *iov, cnt += iv->iov_len; if (unlikely((ssize_t)(cnt|iv->iov_len) < 0)) return -EINVAL; - if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) + if (access_ok(access_flags, iv->iov_base, iv->iov_len)) continue; if (seg == 0) return -EFAULT; @@ -1875,7 +2146,7 @@ static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov, ssize_t result; ENTRY; - result = ll_file_get_iov_count(iov, &nr_segs, &iov_count); + result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_READ); if (result) RETURN(result); @@ -1931,7 +2202,7 @@ static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov, ssize_t result; ENTRY; - result = ll_file_get_iov_count(iov, &nr_segs, &iov_count); + result = ll_file_get_iov_count(iov, &nr_segs, &iov_count, VERIFY_WRITE); if (result) RETURN(result); @@ -1977,41 +2248,6 @@ static ssize_t ll_file_write(struct file *file, const char __user *buf, } #endif /* !HAVE_FILE_OPERATIONS_READ_WRITE_ITER */ -/* - * Send file content (through pagecache) somewhere with helper - */ -static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos, - struct pipe_inode_info *pipe, size_t count, - unsigned int flags) -{ - struct lu_env *env; - struct vvp_io_args *args; - ssize_t result; - __u16 refcheck; - bool cached; - - ENTRY; - - result = pcc_file_splice_read(in_file, ppos, pipe, - count, flags, &cached); - if (cached) - RETURN(result); - - ll_ras_enter(in_file); - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - RETURN(PTR_ERR(env)); - - args = ll_env_args(env, IO_SPLICE); - args->u.splice.via_pipe = pipe; - args->u.splice.via_flags = flags; - - result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count); - cl_env_put(env, &refcheck); - RETURN(result); -} - int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, __u64 flags, struct lov_user_md *lum, int lum_size) { @@ -2025,7 +2261,7 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, if ((__swab32(lum->lmm_magic) & le32_to_cpu(LOV_MAGIC_MASK)) == le32_to_cpu(LOV_MAGIC_MAGIC)) { /* this code will only exist for big-endian systems */ - lustre_swab_lov_user_md(lum); + lustre_swab_lov_user_md(lum, 0); } ll_inode_size_lock(inode); @@ -2046,44 +2282,45 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, struct lov_mds_md **lmmp, int *lmm_size, struct ptlrpc_request **request) { - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct mdt_body *body; - struct lov_mds_md *lmm = NULL; - struct ptlrpc_request *req = NULL; - struct md_op_data *op_data; - int rc, lmmsize; + struct ll_sb_info *sbi = ll_i2sbi(inode); + struct mdt_body *body; + struct lov_mds_md *lmm = NULL; + struct ptlrpc_request *req = NULL; + struct md_op_data *op_data; + int rc, lmmsize; + + ENTRY; rc = ll_get_default_mdsize(sbi, &lmmsize); if (rc) RETURN(rc); - op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, - strlen(filename), lmmsize, - LUSTRE_OPC_ANY, NULL); - if (IS_ERR(op_data)) - RETURN(PTR_ERR(op_data)); + op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, + strlen(filename), lmmsize, + LUSTRE_OPC_ANY, NULL); + if (IS_ERR(op_data)) + RETURN(PTR_ERR(op_data)); - op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA; - rc = md_getattr_name(sbi->ll_md_exp, op_data, &req); - ll_finish_md_op_data(op_data); - if (rc < 0) { - CDEBUG(D_INFO, "md_getattr_name failed " - "on %s: rc %d\n", filename, rc); - GOTO(out, rc); - } + op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA; + rc = md_getattr_name(sbi->ll_md_exp, op_data, &req); + ll_finish_md_op_data(op_data); + if (rc < 0) { + CDEBUG(D_INFO, "md_getattr_name failed " + "on %s: rc %d\n", filename, rc); + GOTO(out, rc); + } - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); /* checked by mdc_getattr_name */ + body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); + LASSERT(body != NULL); /* checked by mdc_getattr_name */ lmmsize = body->mbo_eadatasize; if (!(body->mbo_valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) || - lmmsize == 0) { - GOTO(out, rc = -ENODATA); - } + lmmsize == 0) + GOTO(out, rc = -ENODATA); - lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize); - LASSERT(lmm != NULL); + lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize); + LASSERT(lmm != NULL); if (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1) && lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3) && @@ -2093,11 +2330,10 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, /* * This is coming from the MDS, so is probably in - * little endian. We convert it to host endian before + * little endian. We convert it to host endian before * passing it to userspace. */ - if ((lmm->lmm_magic & __swab32(LOV_MAGIC_MAGIC)) == - __swab32(LOV_MAGIC_MAGIC)) { + if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) { int stripe_count = 0; if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) || @@ -2106,28 +2342,74 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED) stripe_count = 0; - } - - lustre_swab_lov_user_md((struct lov_user_md *)lmm); + lustre_swab_lov_user_md((struct lov_user_md *)lmm, 0); - /* if function called for directory - we should - * avoid swab not existent lsm objects */ - if (lmm->lmm_magic == LOV_MAGIC_V1 && S_ISREG(body->mbo_mode)) - lustre_swab_lov_user_md_objects( + /* if function called for directory - we should + * avoid swab not existent lsm objects + */ + if (lmm->lmm_magic == LOV_MAGIC_V1 && + S_ISREG(body->mbo_mode)) + lustre_swab_lov_user_md_objects( ((struct lov_user_md_v1 *)lmm)->lmm_objects, stripe_count); - else if (lmm->lmm_magic == LOV_MAGIC_V3 && - S_ISREG(body->mbo_mode)) - lustre_swab_lov_user_md_objects( + else if (lmm->lmm_magic == LOV_MAGIC_V3 && + S_ISREG(body->mbo_mode)) + lustre_swab_lov_user_md_objects( ((struct lov_user_md_v3 *)lmm)->lmm_objects, stripe_count); + } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) { + lustre_swab_lov_comp_md_v1( + (struct lov_comp_md_v1 *)lmm); + } } + if (lmm->lmm_magic == LOV_MAGIC_COMP_V1) { + struct lov_comp_md_v1 *comp_v1 = NULL; + struct lov_comp_md_entry_v1 *ent; + struct lov_user_md_v1 *v1; + __u32 off; + int i = 0; + + comp_v1 = (struct lov_comp_md_v1 *)lmm; + /* Dump the striping information */ + for (; i < comp_v1->lcm_entry_count; i++) { + ent = &comp_v1->lcm_entries[i]; + off = ent->lcme_offset; + v1 = (struct lov_user_md_v1 *)((char *)lmm + off); + CDEBUG(D_INFO, + "comp[%d]: stripe_count=%u, stripe_size=%u\n", + i, v1->lmm_stripe_count, v1->lmm_stripe_size); + } + + /** + * Return valid stripe_count and stripe_size instead of 0 for + * DoM files to avoid divide-by-zero for older userspace that + * calls this ioctl, e.g. lustre ADIO driver. + */ + if (lmm->lmm_stripe_count == 0) + lmm->lmm_stripe_count = 1; + if (lmm->lmm_stripe_size == 0) { + /* Since the first component of the file data is placed + * on the MDT for faster access, the stripe_size of the + * second one is always that applications which are + * doing large IOs. + */ + if (lmm->lmm_pattern == LOV_PATTERN_MDT) + i = comp_v1->lcm_entry_count > 1 ? 1 : 0; + else + i = comp_v1->lcm_entry_count > 1 ? + comp_v1->lcm_entry_count - 1 : 0; + ent = &comp_v1->lcm_entries[i]; + off = ent->lcme_offset; + v1 = (struct lov_user_md_v1 *)((char *)lmm + off); + lmm->lmm_stripe_size = v1->lmm_stripe_size; + } + } out: *lmmp = lmm; *lmm_size = lmmsize; *request = req; - return rc; + RETURN(rc); } static int ll_lov_setea(struct inode *inode, struct file *file, @@ -2140,7 +2422,7 @@ static int ll_lov_setea(struct inode *inode, struct file *file, int rc; ENTRY; - if (!cfs_capable(CFS_CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) RETURN(-EPERM); OBD_ALLOC_LARGE(lump, lum_size); @@ -2203,6 +2485,13 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file, GOTO(out, rc); rc = ll_file_getstripe(inode, arg, lum_size); + if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) && + ll_i2info(inode)->lli_clob) { + struct iattr attr = { 0 }; + + rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, &attr, + OP_XVALID_FLAGS, LUSTRE_ENCRYPT_FL); + } } cl_lov_delay_create_clear(&file->f_flags); @@ -2211,12 +2500,13 @@ out: RETURN(rc); } + static int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg) { struct ll_inode_info *lli = ll_i2info(inode); struct cl_object *obj = lli->lli_clob; - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; struct ll_grouplock grouplock; int rc; ENTRY; @@ -2226,18 +2516,28 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg) RETURN(-EINVAL); } - if (ll_file_nolock(file)) - RETURN(-EOPNOTSUPP); + if (ll_file_nolock(file)) + RETURN(-EOPNOTSUPP); +retry: + if (file->f_flags & O_NONBLOCK) { + if (!mutex_trylock(&lli->lli_group_mutex)) + RETURN(-EAGAIN); + } else + mutex_lock(&lli->lli_group_mutex); - spin_lock(&lli->lli_lock); if (fd->fd_flags & LL_FILE_GROUP_LOCKED) { CWARN("group lock already existed with gid %lu\n", fd->fd_grouplock.lg_gid); - spin_unlock(&lli->lli_lock); - RETURN(-EINVAL); + GOTO(out, rc = -EINVAL); + } + if (arg != lli->lli_group_gid && lli->lli_group_users != 0) { + if (file->f_flags & O_NONBLOCK) + GOTO(out, rc = -EAGAIN); + mutex_unlock(&lli->lli_group_mutex); + wait_var_event(&lli->lli_group_users, !lli->lli_group_users); + GOTO(retry, rc = 0); } LASSERT(fd->fd_grouplock.lg_lock == NULL); - spin_unlock(&lli->lli_lock); /** * XXX: group lock needs to protect all OST objects while PFL @@ -2257,71 +2557,77 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg) env = cl_env_get(&refcheck); if (IS_ERR(env)) - RETURN(PTR_ERR(env)); + GOTO(out, rc = PTR_ERR(env)); rc = cl_object_layout_get(env, obj, &cl); - if (!rc && cl.cl_is_composite) + if (rc >= 0 && cl.cl_is_composite) rc = ll_layout_write_intent(inode, LAYOUT_INTENT_WRITE, &ext); cl_env_put(env, &refcheck); - if (rc) - RETURN(rc); + if (rc < 0) + GOTO(out, rc); } rc = cl_get_grouplock(ll_i2info(inode)->lli_clob, arg, (file->f_flags & O_NONBLOCK), &grouplock); - if (rc) - RETURN(rc); - spin_lock(&lli->lli_lock); - if (fd->fd_flags & LL_FILE_GROUP_LOCKED) { - spin_unlock(&lli->lli_lock); - CERROR("another thread just won the race\n"); - cl_put_grouplock(&grouplock); - RETURN(-EINVAL); - } + if (rc) + GOTO(out, rc); fd->fd_flags |= LL_FILE_GROUP_LOCKED; fd->fd_grouplock = grouplock; - spin_unlock(&lli->lli_lock); + if (lli->lli_group_users == 0) + lli->lli_group_gid = grouplock.lg_gid; + lli->lli_group_users++; CDEBUG(D_INFO, "group lock %lu obtained\n", arg); - RETURN(0); +out: + mutex_unlock(&lli->lli_group_mutex); + + RETURN(rc); } static int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg) { struct ll_inode_info *lli = ll_i2info(inode); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; struct ll_grouplock grouplock; + int rc; ENTRY; - spin_lock(&lli->lli_lock); + mutex_lock(&lli->lli_group_mutex); if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) { - spin_unlock(&lli->lli_lock); - CWARN("no group lock held\n"); - RETURN(-EINVAL); - } + CWARN("no group lock held\n"); + GOTO(out, rc = -EINVAL); + } LASSERT(fd->fd_grouplock.lg_lock != NULL); if (fd->fd_grouplock.lg_gid != arg) { CWARN("group lock %lu doesn't match current id %lu\n", arg, fd->fd_grouplock.lg_gid); - spin_unlock(&lli->lli_lock); - RETURN(-EINVAL); + GOTO(out, rc = -EINVAL); } grouplock = fd->fd_grouplock; memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock)); fd->fd_flags &= ~LL_FILE_GROUP_LOCKED; - spin_unlock(&lli->lli_lock); cl_put_grouplock(&grouplock); + + lli->lli_group_users--; + if (lli->lli_group_users == 0) { + lli->lli_group_gid = 0; + wake_up_var(&lli->lli_group_users); + } CDEBUG(D_INFO, "group lock %lu released\n", arg); - RETURN(0); + GOTO(out, rc = 0); +out: + mutex_unlock(&lli->lli_group_mutex); + + RETURN(rc); } /** @@ -2335,28 +2641,30 @@ static int ll_put_grouplock(struct inode *inode, struct file *file, */ int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it) { - struct inode *inode = dentry->d_inode; - struct obd_client_handle *och; - int rc; - ENTRY; + struct inode *inode = dentry->d_inode; + struct obd_client_handle *och; + int rc; + ENTRY; - LASSERT(inode); + LASSERT(inode); - /* Root ? Do nothing. */ - if (dentry->d_inode->i_sb->s_root == dentry) - RETURN(0); + /* Root ? Do nothing. */ + if (is_root_inode(inode)) + RETURN(0); - /* No open handle to close? Move away */ - if (!it_disposition(it, DISP_OPEN_OPEN)) - RETURN(0); + /* No open handle to close? Move away */ + if (!it_disposition(it, DISP_OPEN_OPEN)) + RETURN(0); - LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0); + LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0); - OBD_ALLOC(och, sizeof(*och)); - if (!och) - GOTO(out, rc = -ENOMEM); + OBD_ALLOC(och, sizeof(*och)); + if (!och) + GOTO(out, rc = -ENOMEM); - ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och); + rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och); + if (rc) + GOTO(out, rc); rc = ll_close_inode_openhandle(inode, och, 0, NULL); out: @@ -2436,7 +2744,7 @@ int ll_fid2path(struct inode *inode, void __user *arg) ENTRY; - if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) && + if (!capable(CAP_DAC_READ_SEARCH) && !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH)) RETURN(-EPERM); @@ -2724,7 +3032,7 @@ int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss) /* Non-root users are forbidden to set or clear flags which are * NOT defined in HSM_USER_MASK. */ if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) && - !cfs_capable(CFS_CAP_SYS_ADMIN)) + !capable(CAP_SYS_ADMIN)) RETURN(-EPERM); if (!exp_connect_archive_id_array(exp)) { @@ -2895,9 +3203,9 @@ int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise) ENTRY; - CDEBUG(D_VFSTRACE, "Lock request: file=%.*s, inode=%p, mode=%s " - "start=%llu, end=%llu\n", dentry->d_name.len, - dentry->d_name.name, dentry->d_inode, + CDEBUG(D_VFSTRACE, + "Lock request: file=%pd, inode=%p, mode=%s start=%llu, end=%llu\n", + dentry, dentry->d_inode, user_lockname[ladvise->lla_lockahead_mode], (__u64) start, (__u64) end); @@ -2928,8 +3236,7 @@ int ll_file_lock_ahead(struct file *file, struct llapi_lu_ladvise *ladvise) descr->cld_mode = cl_mode; /* CEF_MUST is used because we do not want to convert a * lockahead request to a lockless lock */ - descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND | - CEF_NONBLOCK; + descr->cld_enq_flags = CEF_MUST | CEF_LOCK_NO_EXPAND; if (ladvise->lla_peradvice_flags & LF_ASYNC) descr->cld_enq_flags |= CEF_SPECULATIVE; @@ -2973,8 +3280,8 @@ static int ll_ladvise_sanity(struct inode *inode, if (advice > LU_LADVISE_MAX || advice == LU_LADVISE_INVALID) { rc = -EINVAL; - CDEBUG(D_VFSTRACE, "%s: advice with value '%d' not recognized," - "last supported advice is %s (value '%d'): rc = %d\n", + CDEBUG(D_VFSTRACE, + "%s: advice with value '%d' not recognized, last supported advice is %s (value '%d'): rc = %d\n", sbi->ll_fsname, advice, ladvise_names[LU_LADVISE_MAX-1], LU_LADVISE_MAX-1, rc); GOTO(out, rc); @@ -3002,6 +3309,7 @@ static int ll_ladvise_sanity(struct inode *inode, ladvise_names[advice], rc); GOTO(out, rc); } + /* fallthrough */ case LU_LADVISE_WILLREAD: case LU_LADVISE_DONTNEED: default: @@ -3081,7 +3389,7 @@ static int ll_ladvise(struct inode *inode, struct file *file, __u64 flags, static int ll_lock_noexpand(struct file *file, int flags) { - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; fd->ll_lock_no_expand = !(flags & LF_UNSET); @@ -3099,7 +3407,7 @@ int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd, RETURN(-EFAULT); fsxattr.fsx_xflags = ll_inode_flags_to_xflags(inode->i_flags); - if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT)) + if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) fsxattr.fsx_xflags |= FS_XFLAG_PROJINHERIT; fsxattr.fsx_projid = ll_i2info(inode)->lli_projid; if (copy_to_user((struct fsxattr __user *)arg, @@ -3109,48 +3417,49 @@ int ll_ioctl_fsgetxattr(struct inode *inode, unsigned int cmd, RETURN(0); } -int ll_ioctl_check_project(struct inode *inode, struct fsxattr *fa) +int ll_ioctl_check_project(struct inode *inode, __u32 xflags, + __u32 projid) { /* * Project Quota ID state is only allowed to change from within the init * namespace. Enforce that restriction only if we are trying to change * the quota ID state. Everything else is allowed in user namespaces. */ - if (current_user_ns() == &init_user_ns) + if (current_user_ns() == &init_user_ns) { + /* + * Caller is allowed to change the project ID. if it is being + * changed, make sure that the new value is valid. + */ + if (ll_i2info(inode)->lli_projid != projid && + !projid_valid(make_kprojid(&init_user_ns, projid))) + return -EINVAL; + return 0; + } - if (ll_i2info(inode)->lli_projid != fa->fsx_projid) + if (ll_i2info(inode)->lli_projid != projid) return -EINVAL; - if (ll_file_test_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT)) { - if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT)) + if (test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags)) { + if (!(xflags & FS_XFLAG_PROJINHERIT)) return -EINVAL; } else { - if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT) + if (xflags & FS_XFLAG_PROJINHERIT) return -EINVAL; } return 0; } -int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd, - unsigned long arg) +static int ll_set_project(struct inode *inode, __u32 xflags, __u32 projid) { - struct md_op_data *op_data; struct ptlrpc_request *req = NULL; - int rc = 0; - struct fsxattr fsxattr; struct cl_object *obj; - struct iattr *attr; - int flags; - - if (copy_from_user(&fsxattr, - (const struct fsxattr __user *)arg, - sizeof(fsxattr))) - RETURN(-EFAULT); + unsigned int inode_flags; + int rc = 0; - rc = ll_ioctl_check_project(inode, &fsxattr); + rc = ll_ioctl_check_project(inode, xflags, projid); if (rc) RETURN(rc); @@ -3159,39 +3468,118 @@ int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd, if (IS_ERR(op_data)) RETURN(PTR_ERR(op_data)); - flags = ll_xflags_to_inode_flags(fsxattr.fsx_xflags); - op_data->op_attr_flags = ll_inode_to_ext_flags(flags); - if (fsxattr.fsx_xflags & FS_XFLAG_PROJINHERIT) + inode_flags = ll_xflags_to_inode_flags(xflags); + op_data->op_attr_flags = ll_inode_to_ext_flags(inode_flags); + if (xflags & FS_XFLAG_PROJINHERIT) op_data->op_attr_flags |= LUSTRE_PROJINHERIT_FL; - op_data->op_projid = fsxattr.fsx_projid; + op_data->op_projid = projid; op_data->op_xvalid |= OP_XVALID_PROJID | OP_XVALID_FLAGS; - rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, - 0, &req); + rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, &req); ptlrpc_req_finished(req); if (rc) GOTO(out_fsxattr, rc); ll_update_inode_flags(inode, op_data->op_attr_flags); - obj = ll_i2info(inode)->lli_clob; - if (obj == NULL) + + /* Avoid OST RPC if this is only ioctl setting project inherit flag */ + if (xflags == 0 || xflags == FS_XFLAG_PROJINHERIT) GOTO(out_fsxattr, rc); - OBD_ALLOC_PTR(attr); - if (attr == NULL) - GOTO(out_fsxattr, rc = -ENOMEM); + obj = ll_i2info(inode)->lli_clob; + if (obj) { + struct iattr attr = { 0 }; + + rc = cl_setattr_ost(obj, &attr, OP_XVALID_FLAGS, xflags); + } - rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, - fsxattr.fsx_xflags); - OBD_FREE_PTR(attr); out_fsxattr: ll_finish_md_op_data(op_data); RETURN(rc); } +int ll_ioctl_fssetxattr(struct inode *inode, unsigned int cmd, + unsigned long arg) +{ + struct fsxattr fsxattr; + + ENTRY; + + if (copy_from_user(&fsxattr, + (const struct fsxattr __user *)arg, + sizeof(fsxattr))) + RETURN(-EFAULT); + + RETURN(ll_set_project(inode, fsxattr.fsx_xflags, + fsxattr.fsx_projid)); +} + +int ll_ioctl_project(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct lu_project lu_project; + struct dentry *dentry = file_dentry(file); + struct inode *inode = file_inode(file); + struct dentry *child_dentry = NULL; + int rc = 0, name_len; + + if (copy_from_user(&lu_project, + (const struct lu_project __user *)arg, + sizeof(lu_project))) + RETURN(-EFAULT); + + /* apply child dentry if name is valid */ + name_len = strnlen(lu_project.project_name, NAME_MAX); + if (name_len > 0 && name_len <= NAME_MAX) { + inode_lock(inode); + child_dentry = lookup_one_len(lu_project.project_name, + dentry, name_len); + inode_unlock(inode); + if (IS_ERR(child_dentry)) { + rc = PTR_ERR(child_dentry); + goto out; + } + inode = child_dentry->d_inode; + if (!inode) { + rc = -ENOENT; + goto out; + } + } else if (name_len > NAME_MAX) { + rc = -EINVAL; + goto out; + } + + switch (lu_project.project_type) { + case LU_PROJECT_SET: + rc = ll_set_project(inode, lu_project.project_xflags, + lu_project.project_id); + break; + case LU_PROJECT_GET: + lu_project.project_xflags = + ll_inode_flags_to_xflags(inode->i_flags); + if (test_bit(LLIF_PROJECT_INHERIT, + &ll_i2info(inode)->lli_flags)) + lu_project.project_xflags |= FS_XFLAG_PROJINHERIT; + lu_project.project_id = ll_i2info(inode)->lli_projid; + if (copy_to_user((struct lu_project __user *)arg, + &lu_project, sizeof(lu_project))) { + rc = -EFAULT; + goto out; + } + break; + default: + rc = -EINVAL; + break; + } +out: + if (!IS_ERR_OR_NULL(child_dentry)) + dput(child_dentry); + RETURN(rc); +} + static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc, unsigned long arg) { struct inode *inode = file_inode(file); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; struct ll_inode_info *lli = ll_i2info(inode); struct obd_client_handle *och = NULL; struct split_param sp; @@ -3276,6 +3664,7 @@ static long ll_file_unlock_lease(struct file *file, struct ll_ioc_lease *ioc, if (!layout_file) GOTO(out_lease_close, rc = -EBADF); + /* if layout_file == file, it means to destroy the mirror */ sp.sp_inode = file_inode(layout_file); sp.sp_mirror_id = (__u16)mirror_id; data = &sp; @@ -3333,6 +3722,8 @@ out: case LL_LEASE_LAYOUT_SPLIT: if (layout_file) fput(layout_file); + + ll_layout_refresh(inode, &fd->fd_layout_version); break; case LL_LEASE_PCC_ATTACH: if (!rc) @@ -3354,7 +3745,7 @@ static long ll_file_set_lease(struct file *file, struct ll_ioc_lease *ioc, { struct inode *inode = file_inode(file); struct ll_inode_info *lli = ll_i2info(inode); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; struct obd_client_handle *och = NULL; __u64 open_flags = 0; bool lease_broken; @@ -3455,7 +3846,7 @@ static long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; int flags, rc; ENTRY; @@ -3605,7 +3996,10 @@ out: RETURN(0); } + case OBD_IOC_GETNAME_OLD: + /* fall through */ case OBD_IOC_GETDTNAME: + /* fall through */ case OBD_IOC_GETMDNAME: RETURN(ll_get_obd_name(inode, cmd, arg)); case LL_IOC_HSM_STATE_GET: { @@ -3828,10 +4222,12 @@ out_ladvise: fd->fd_designated_mirror = (__u32)arg; RETURN(0); } - case LL_IOC_FSGETXATTR: + case FS_IOC_FSGETXATTR: RETURN(ll_ioctl_fsgetxattr(inode, cmd, arg)); - case LL_IOC_FSSETXATTR: + case FS_IOC_FSSETXATTR: RETURN(ll_ioctl_fssetxattr(inode, cmd, arg)); + case LL_IOC_PROJECT: + RETURN(ll_ioctl_project(file, cmd, arg)); case BLKSSZGET: RETURN(put_user(PAGE_SIZE, (int __user *)arg)); case LL_IOC_HEAT_GET: { @@ -3911,101 +4307,135 @@ out_state: OBD_FREE_PTR(state); RETURN(rc); } +#ifdef HAVE_LUSTRE_CRYPTO + case LL_IOC_SET_ENCRYPTION_POLICY: + if (!ll_sbi_has_encrypt(ll_i2sbi(inode))) + return -EOPNOTSUPP; + return llcrypt_ioctl_set_policy(file, (const void __user *)arg); + case LL_IOC_GET_ENCRYPTION_POLICY_EX: + if (!ll_sbi_has_encrypt(ll_i2sbi(inode))) + return -EOPNOTSUPP; + return llcrypt_ioctl_get_policy_ex(file, (void __user *)arg); + case LL_IOC_ADD_ENCRYPTION_KEY: + if (!ll_sbi_has_encrypt(ll_i2sbi(inode))) + return -EOPNOTSUPP; + return llcrypt_ioctl_add_key(file, (void __user *)arg); + case LL_IOC_REMOVE_ENCRYPTION_KEY: + if (!ll_sbi_has_encrypt(ll_i2sbi(inode))) + return -EOPNOTSUPP; + return llcrypt_ioctl_remove_key(file, (void __user *)arg); + case LL_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: + if (!ll_sbi_has_encrypt(ll_i2sbi(inode))) + return -EOPNOTSUPP; + return llcrypt_ioctl_remove_key_all_users(file, + (void __user *)arg); + case LL_IOC_GET_ENCRYPTION_KEY_STATUS: + if (!ll_sbi_has_encrypt(ll_i2sbi(inode))) + return -EOPNOTSUPP; + return llcrypt_ioctl_get_key_status(file, (void __user *)arg); +#endif + + case LL_IOC_UNLOCK_FOREIGN: { + struct dentry *dentry = file_dentry(file); + + /* if not a foreign symlink do nothing */ + if (ll_foreign_is_removable(dentry, true)) { + CDEBUG(D_INFO, + "prevent unlink of non-foreign file ("DFID")\n", + PFID(ll_inode2fid(inode))); + RETURN(-EOPNOTSUPP); + } + RETURN(0); + } + default: RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL, (void __user *)arg)); } } -#ifndef HAVE_FILE_LLSEEK_SIZE -static inline loff_t -llseek_execute(struct file *file, loff_t offset, loff_t maxsize) +loff_t ll_lseek(struct file *file, loff_t offset, int whence) { - if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) - return -EINVAL; - if (offset > maxsize) - return -EINVAL; + struct inode *inode = file_inode(file); + struct lu_env *env; + struct cl_io *io; + struct cl_lseek_io *lsio; + __u16 refcheck; + int rc; + loff_t retval; - if (offset != file->f_pos) { - file->f_pos = offset; - file->f_version = 0; - } - return offset; -} + ENTRY; -static loff_t -generic_file_llseek_size(struct file *file, loff_t offset, int origin, - loff_t maxsize, loff_t eof) -{ - struct inode *inode = file_inode(file); + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); - switch (origin) { - case SEEK_END: - offset += eof; - break; - case SEEK_CUR: - /* - * Here we special-case the lseek(fd, 0, SEEK_CUR) - * position-querying operation. Avoid rewriting the "same" - * f_pos value back to the file because a concurrent read(), - * write() or lseek() might have altered it - */ - if (offset == 0) - return file->f_pos; - /* - * f_lock protects against read/modify/write race with other - * SEEK_CURs. Note that parallel writes and reads behave - * like SEEK_SET. - */ - inode_lock(inode); - offset = llseek_execute(file, file->f_pos + offset, maxsize); - inode_unlock(inode); - return offset; - case SEEK_DATA: - /* - * In the generic case the entire file is data, so as long as - * offset isn't at the end of the file then the offset is data. - */ - if (offset >= eof) - return -ENXIO; - break; - case SEEK_HOLE: - /* - * There is a virtual hole at the end of the file, so as long as - * offset isn't i_size or larger, return i_size. - */ - if (offset >= eof) - return -ENXIO; - offset = eof; - break; - } + io = vvp_env_thread_io(env); + io->ci_obj = ll_i2info(inode)->lli_clob; + ll_io_set_mirror(io, file); + + lsio = &io->u.ci_lseek; + lsio->ls_start = offset; + lsio->ls_whence = whence; + lsio->ls_result = -ENXIO; + + do { + rc = cl_io_init(env, io, CIT_LSEEK, io->ci_obj); + if (!rc) { + struct vvp_io *vio = vvp_env_io(env); + + vio->vui_fd = file->private_data; + rc = cl_io_loop(env, io); + } else { + rc = io->ci_result; + } + retval = rc ? : lsio->ls_result; + cl_io_fini(env, io); + } while (unlikely(io->ci_need_restart)); + + cl_env_put(env, &refcheck); - return llseek_execute(file, offset, maxsize); + RETURN(retval); } -#endif static loff_t ll_file_seek(struct file *file, loff_t offset, int origin) { struct inode *inode = file_inode(file); - loff_t retval, eof = 0; + loff_t retval = offset, eof = 0; + ktime_t kstart = ktime_get(); ENTRY; - retval = offset + ((origin == SEEK_END) ? i_size_read(inode) : - (origin == SEEK_CUR) ? file->f_pos : 0); + CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n", PFID(ll_inode2fid(inode)), inode, retval, retval, origin); - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1); - if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) { + if (origin == SEEK_END) { retval = ll_glimpse_size(inode); if (retval != 0) RETURN(retval); eof = i_size_read(inode); } - retval = ll_generic_file_llseek_size(file, offset, origin, - ll_file_maxbytes(inode), eof); + if (origin == SEEK_HOLE || origin == SEEK_DATA) { + if (offset < 0) + return -ENXIO; + + /* flush local cache first if any */ + cl_sync_file_range(inode, offset, OBD_OBJECT_EOF, + CL_FSYNC_LOCAL, 0); + + retval = ll_lseek(file, offset, origin); + if (retval < 0) + return retval; + retval = vfs_setpos(file, retval, ll_file_maxbytes(inode)); + } else { + retval = generic_file_llseek_size(file, offset, origin, + ll_file_maxbytes(inode), eof); + } + if (retval >= 0) + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, + ktime_us_delta(ktime_get(), kstart)); RETURN(retval); } @@ -4013,7 +4443,7 @@ static int ll_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); struct ll_inode_info *lli = ll_i2info(inode); - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; int rc, err; LASSERT(!S_ISDIR(inode->i_mode)); @@ -4095,20 +4525,18 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync) struct inode *inode = dentry->d_inode; struct ll_inode_info *lli = ll_i2info(inode); struct ptlrpc_request *req; + ktime_t kstart = ktime_get(); int rc, err; ENTRY; - CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), start %lld, end %lld," - "datasync %d\n", + CDEBUG(D_VFSTRACE, + "VFS Op:inode="DFID"(%p), start %lld, end %lld, datasync %d\n", PFID(ll_inode2fid(inode)), inode, start, end, datasync); - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1); - /* fsync's caller has already called _fdata{sync,write}, we want * that IO to finish before calling the osc and mdc sync methods */ rc = filemap_write_and_wait_range(inode->i_mapping, start, end); - inode_lock(inode); /* catch async errors that were recorded back when async writeback * failed for pages in this mapping. */ @@ -4131,7 +4559,7 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync) ptlrpc_req_finished(req); if (S_ISREG(inode->i_mode)) { - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; bool cached; /* Sync metadata on MDT first, and then sync the cached data @@ -4149,7 +4577,9 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync) fd->fd_write_failed = false; } - inode_unlock(inode); + if (!rc) + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, + ktime_us_delta(ktime_get(), kstart)); RETURN(rc); } @@ -4167,6 +4597,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) struct lustre_handle lockh = { 0 }; union ldlm_policy_data flock = { { 0 } }; int fl_type = file_lock->fl_type; + ktime_t kstart = ktime_get(); __u64 flags = 0; int rc; int rc2 = 0; @@ -4175,23 +4606,22 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n", PFID(ll_inode2fid(inode)), file_lock); - ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1); - - if (file_lock->fl_flags & FL_FLOCK) { - LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK)); - /* flocks are whole-file locks */ - flock.l_flock.end = OFFSET_MAX; - /* For flocks owner is determined by the local file desctiptor*/ - flock.l_flock.owner = (unsigned long)file_lock->fl_file; - } else if (file_lock->fl_flags & FL_POSIX) { - flock.l_flock.owner = (unsigned long)file_lock->fl_owner; - flock.l_flock.start = file_lock->fl_start; - flock.l_flock.end = file_lock->fl_end; - } else { - RETURN(-EINVAL); - } - flock.l_flock.pid = file_lock->fl_pid; + if (file_lock->fl_flags & FL_FLOCK) { + LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK)); + /* flocks are whole-file locks */ + flock.l_flock.end = OFFSET_MAX; + /* For flocks owner is determined by the local file desctiptor*/ + flock.l_flock.owner = (unsigned long)file_lock->fl_file; + } else if (file_lock->fl_flags & FL_POSIX) { + flock.l_flock.owner = (unsigned long)file_lock->fl_owner; + flock.l_flock.start = file_lock->fl_start; + flock.l_flock.end = file_lock->fl_end; + } else { + RETURN(-EINVAL); + } + flock.l_flock.pid = file_lock->fl_pid; +#if defined(HAVE_LM_COMPARE_OWNER) || defined(lm_compare_owner) /* Somewhat ugly workaround for svc lockd. * lockd installs custom fl_lmops->lm_compare_owner that checks * for the fl_owner to be the same (which it always is on local node @@ -4201,6 +4631,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) * pointer space for current->files are not intersecting */ if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner) flock.l_flock.owner = (unsigned long)file_lock->fl_pid; +#endif switch (fl_type) { case F_RDLCK: @@ -4293,7 +4724,10 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) ll_finish_md_op_data(op_data); - RETURN(rc); + if (!rc) + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, + ktime_us_delta(ktime_get(), kstart)); + RETURN(rc); } int ll_get_fid_by_name(struct inode *parent, const char *name, @@ -4324,7 +4758,7 @@ int ll_get_fid_by_name(struct inode *parent, const char *name, *fid = body->mbo_fid1; if (inode != NULL) - rc = ll_prep_inode(inode, req, parent->i_sb, NULL); + rc = ll_prep_inode(inode, &req->rq_pill, parent->i_sb, NULL); out_req: ptlrpc_req_finished(req); RETURN(rc); @@ -4390,9 +4824,26 @@ int ll_migrate(struct inode *parent, struct file *file, struct lmv_user_md *lum, * by checking the migrate FID against the FID of the * filesystem root. */ - if (child_inode == parent->i_sb->s_root->d_inode) + if (is_root_inode(child_inode)) GOTO(out_iput, rc = -EINVAL); + if (IS_ENCRYPTED(child_inode)) { + rc = llcrypt_get_encryption_info(child_inode); + if (rc) + GOTO(out_iput, rc); + if (!llcrypt_has_encryption_key(child_inode)) { + CDEBUG(D_SEC, "no enc key for "DFID"\n", + PFID(ll_inode2fid(child_inode))); + GOTO(out_iput, rc = -ENOKEY); + } + if (unlikely(!llcrypt_policy_has_filename_enc(child_inode))) { + CDEBUG(D_SEC, + "cannot migrate old format encrypted "DFID", please move to new enc dir first\n", + PFID(ll_inode2fid(child_inode))); + GOTO(out_iput, rc = -EUCLEAN); + } + } + op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen, child_inode->i_mode, LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) @@ -4483,7 +4934,7 @@ out_iput: static int ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock) { - struct ll_file_data *fd = LUSTRE_FPRIVATE(file); + struct ll_file_data *fd = file->private_data; ENTRY; /* @@ -4529,24 +4980,24 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits, enum ldlm_mode l_req_mode) flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK; for (i = 0; i < MDS_INODELOCK_NUMBITS && *bits != 0; i++) { - policy.l_inodebits.bits = *bits & (1 << i); + policy.l_inodebits.bits = *bits & BIT(i); if (policy.l_inodebits.bits == 0) continue; - if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS, - &policy, mode, &lockh)) { - struct ldlm_lock *lock; - - lock = ldlm_handle2lock(&lockh); - if (lock) { - *bits &= - ~(lock->l_policy_data.l_inodebits.bits); - LDLM_LOCK_PUT(lock); - } else { - *bits &= ~policy.l_inodebits.bits; - } - } - } + if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS, + &policy, mode, &lockh)) { + struct ldlm_lock *lock; + + lock = ldlm_handle2lock(&lockh); + if (lock) { + *bits &= + ~(lock->l_policy_data.l_inodebits.bits); + LDLM_LOCK_PUT(lock); + } else { + *bits &= ~policy.l_inodebits.bits; + } + } + } RETURN(*bits == 0); } @@ -4596,6 +5047,7 @@ static int ll_inode_revalidate_fini(struct inode *inode, int rc) static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op) { + struct inode *parent; struct inode *inode = dentry->d_inode; struct obd_export *exp = ll_i2mdexp(inode); struct lookup_intent oit = { @@ -4603,18 +5055,30 @@ static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op) }; struct ptlrpc_request *req = NULL; struct md_op_data *op_data; + const char *name = NULL; + size_t namelen = 0; int rc = 0; ENTRY; CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%s\n", PFID(ll_inode2fid(inode)), inode, dentry->d_name.name); - /* Call getattr by fid, so do not provide name at all. */ - op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0, + if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID) { + parent = dentry->d_parent->d_inode; + name = dentry->d_name.name; + namelen = dentry->d_name.len; + } else { + parent = inode; + } + + op_data = ll_prep_md_op_data(NULL, parent, inode, name, namelen, 0, LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) RETURN(PTR_ERR(op_data)); + /* Call getattr by fid */ + if (exp_connect_flags2(exp) & OBD_CONNECT2_GETATTR_PFID) + op_data->op_flags = MF_GETATTR_BY_FID; rc = md_intent_lock(exp, op_data, &oit, &req, &ll_md_blocking_ast, 0); ll_finish_md_op_data(op_data); if (rc < 0) { @@ -4632,11 +5096,8 @@ static int ll_inode_revalidate(struct dentry *dentry, enum ldlm_intent_flags op) * do_lookup() -> ll_revalidate_it(). We cannot use d_drop * here to preserve get_cwd functionality on 2.6. * Bug 10503 */ - if (!dentry->d_inode->i_nlink) { - ll_lock_dcache(inode); - d_lustre_invalidate(dentry, 0); - ll_unlock_dcache(inode); - } + if (!dentry->d_inode->i_nlink) + d_lustre_invalidate(dentry); ll_lookup_finish_locks(&oit, dentry); out: @@ -4674,26 +5135,69 @@ static int ll_merge_md_attr(struct inode *inode) RETURN(0); } -int ll_getattr_dentry(struct dentry *de, struct kstat *stat) +int ll_getattr_dentry(struct dentry *de, struct kstat *stat, u32 request_mask, + unsigned int flags, bool foreign) { struct inode *inode = de->d_inode; struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_inode_info *lli = ll_i2info(inode); + struct inode *dir = de->d_parent->d_inode; + bool need_glimpse = true; + ktime_t kstart = ktime_get(); int rc; - ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1); + /* The OST object(s) determine the file size, blocks and mtime. */ + if (!(request_mask & STATX_SIZE || request_mask & STATX_BLOCKS || + request_mask & STATX_MTIME)) + need_glimpse = false; + + if (dentry_may_statahead(dir, de)) + ll_start_statahead(dir, de, need_glimpse && + !(flags & AT_STATX_DONT_SYNC)); + + if (flags & AT_STATX_DONT_SYNC) + GOTO(fill_attr, rc = 0); rc = ll_inode_revalidate(de, IT_GETATTR); if (rc < 0) RETURN(rc); - if (S_ISREG(inode->i_mode)) { + /* foreign file/dir are always of zero length, so don't + * need to validate size. + */ + if (S_ISREG(inode->i_mode) && !foreign) { bool cached; - rc = pcc_inode_getattr(inode, &cached); + if (!need_glimpse) + GOTO(fill_attr, rc); + + rc = pcc_inode_getattr(inode, request_mask, flags, &cached); if (cached && rc < 0) RETURN(rc); + if (cached) + GOTO(fill_attr, rc); + + /* + * If the returned attr is masked with OBD_MD_FLSIZE & + * OBD_MD_FLBLOCKS & OBD_MD_FLMTIME, it means that the file size + * or blocks obtained from MDT is strictly correct, and the file + * is usually not being modified by clients, and the [a|m|c]time + * got from MDT is also strictly correct. + * Under this circumstance, it does not need to send glimpse + * RPCs to OSTs for file attributes such as the size and blocks. + */ + if (lli->lli_attr_valid & OBD_MD_FLSIZE && + lli->lli_attr_valid & OBD_MD_FLBLOCKS && + lli->lli_attr_valid & OBD_MD_FLMTIME) { + inode->i_mtime.tv_sec = lli->lli_mtime; + if (lli->lli_attr_valid & OBD_MD_FLATIME) + inode->i_atime.tv_sec = lli->lli_atime; + if (lli->lli_attr_valid & OBD_MD_FLCTIME) + inode->i_ctime.tv_sec = lli->lli_ctime; + GOTO(fill_attr, rc); + } + /* In case of restore, the MDT has the right size and has * already send it back without granting the layout lock, * inode is up-to-date so glimpse is useless. @@ -4701,24 +5205,29 @@ int ll_getattr_dentry(struct dentry *de, struct kstat *stat) * restore the MDT holds the layout lock so the glimpse will * block up to the end of restore (getattr will block) */ - if (!cached && !ll_file_test_flag(lli, LLIF_FILE_RESTORING)) { + if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags)) { rc = ll_glimpse_size(inode); if (rc < 0) RETURN(rc); } } else { /* If object isn't regular a file then don't validate size. */ - if (ll_dir_striped(inode)) { + /* foreign dir is not striped dir */ + if (ll_dir_striped(inode) && !foreign) { rc = ll_merge_md_attr(inode); if (rc < 0) RETURN(rc); } - inode->i_atime.tv_sec = lli->lli_atime; - inode->i_mtime.tv_sec = lli->lli_mtime; - inode->i_ctime.tv_sec = lli->lli_ctime; + if (lli->lli_attr_valid & OBD_MD_FLATIME) + inode->i_atime.tv_sec = lli->lli_atime; + if (lli->lli_attr_valid & OBD_MD_FLMTIME) + inode->i_mtime.tv_sec = lli->lli_mtime; + if (lli->lli_attr_valid & OBD_MD_FLCTIME) + inode->i_ctime.tv_sec = lli->lli_ctime; } +fill_attr: OBD_FAIL_TIMEOUT(OBD_FAIL_GETATTR_DELAY, 30); if (ll_need_32bit_api(sbi)) { @@ -4731,31 +5240,161 @@ int ll_getattr_dentry(struct dentry *de, struct kstat *stat) stat->rdev = inode->i_rdev; } - stat->mode = inode->i_mode; + /* foreign symlink to be exposed as a real symlink */ + if (!foreign) + stat->mode = inode->i_mode; + else + stat->mode = (inode->i_mode & ~S_IFMT) | S_IFLNK; + stat->uid = inode->i_uid; stat->gid = inode->i_gid; stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; stat->ctime = inode->i_ctime; - stat->blksize = sbi->ll_stat_blksize ?: 1 << inode->i_blkbits; + /* stat->blksize is used to tell about preferred IO size */ + if (sbi->ll_stat_blksize) + stat->blksize = sbi->ll_stat_blksize; + else if (S_ISREG(inode->i_mode)) + stat->blksize = 1 << min(PTLRPC_MAX_BRW_BITS + 1, + LL_MAX_BLKSIZE_BITS); + else + stat->blksize = 1 << inode->i_sb->s_blocksize_bits; stat->nlink = inode->i_nlink; stat->size = i_size_read(inode); stat->blocks = inode->i_blocks; - return 0; +#ifdef HAVE_INODEOPS_ENHANCED_GETATTR + if (flags & AT_STATX_DONT_SYNC) { + if (stat->size == 0 && + lli->lli_attr_valid & OBD_MD_FLLAZYSIZE) + stat->size = lli->lli_lazysize; + if (stat->blocks == 0 && + lli->lli_attr_valid & OBD_MD_FLLAZYBLOCKS) + stat->blocks = lli->lli_lazyblocks; + } + + if (lli->lli_attr_valid & OBD_MD_FLBTIME) { + stat->result_mask |= STATX_BTIME; + stat->btime.tv_sec = lli->lli_btime; + } + + stat->attributes_mask = STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND; + stat->attributes |= ll_inode_to_ext_flags(inode->i_flags); + stat->result_mask &= request_mask; +#endif + + ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, + ktime_us_delta(ktime_get(), kstart)); + + return 0; } #ifdef HAVE_INODEOPS_ENHANCED_GETATTR int ll_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { - struct dentry *de = path->dentry; + return ll_getattr_dentry(path->dentry, stat, request_mask, flags, + false); +} #else int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat) { + return ll_getattr_dentry(de, stat, STATX_BASIC_STATS, + AT_STATX_SYNC_AS_STAT, false); +} #endif - return ll_getattr_dentry(de, stat); + +int cl_falloc(struct inode *inode, int mode, loff_t offset, loff_t len) +{ + struct lu_env *env; + struct cl_io *io; + __u16 refcheck; + int rc; + loff_t size = i_size_read(inode); + + ENTRY; + + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); + + io = vvp_env_thread_io(env); + io->ci_obj = ll_i2info(inode)->lli_clob; + io->ci_verify_layout = 1; + io->u.ci_setattr.sa_parent_fid = lu_object_fid(&io->ci_obj->co_lu); + io->u.ci_setattr.sa_falloc_mode = mode; + io->u.ci_setattr.sa_falloc_offset = offset; + io->u.ci_setattr.sa_falloc_end = offset + len; + io->u.ci_setattr.sa_subtype = CL_SETATTR_FALLOCATE; + if (io->u.ci_setattr.sa_falloc_end > size) { + loff_t newsize = io->u.ci_setattr.sa_falloc_end; + + /* Check new size against VFS/VM file size limit and rlimit */ + rc = inode_newsize_ok(inode, newsize); + if (rc) + goto out; + if (newsize > ll_file_maxbytes(inode)) { + CDEBUG(D_INODE, "file size too large %llu > %llu\n", + (unsigned long long)newsize, + ll_file_maxbytes(inode)); + rc = -EFBIG; + goto out; + } + } + + do { + rc = cl_io_init(env, io, CIT_SETATTR, io->ci_obj); + if (!rc) + rc = cl_io_loop(env, io); + else + rc = io->ci_result; + cl_io_fini(env, io); + } while (unlikely(io->ci_need_restart)); + +out: + cl_env_put(env, &refcheck); + RETURN(rc); +} + +long ll_fallocate(struct file *filp, int mode, loff_t offset, loff_t len) +{ + struct inode *inode = filp->f_path.dentry->d_inode; + int rc; + + if (offset < 0 || len <= 0) + RETURN(-EINVAL); + /* + * Encrypted inodes can't handle collapse range or zero range or insert + * range since we would need to re-encrypt blocks with a different IV or + * XTS tweak (which are based on the logical block number). + * Similar to what ext4 does. + */ + if (IS_ENCRYPTED(inode) && + (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE | + FALLOC_FL_ZERO_RANGE))) + RETURN(-EOPNOTSUPP); + + /* + * mode == 0 (which is standard prealloc) and PUNCH is supported + * Rest of mode options are not supported yet. + */ + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + RETURN(-EOPNOTSUPP); + + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FALLOCATE, 1); + + rc = cl_falloc(inode, mode, offset, len); + /* + * ENOTSUPP (524) is an NFSv3 specific error code erroneously + * used by Lustre in several places. Retuning it here would + * confuse applications that explicity test for EOPNOTSUPP + * (95) and fall back to ftruncate(). + */ + if (rc == -ENOTSUPP) + rc = -EOPNOTSUPP; + + RETURN(rc); } static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, @@ -4784,6 +5423,15 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, rc = ll_do_fiemap(inode, fiemap, num_bytes); + if (IS_ENCRYPTED(inode)) { + int i; + + for (i = 0; i < fiemap->fm_mapped_extents; i++) + fiemap->fm_extents[i].fe_flags |= + FIEMAP_EXTENT_DATA_ENCRYPTED | + FIEMAP_EXTENT_ENCODED; + } + fieinfo->fi_flags = fiemap->fm_flags; fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents; if (extent_count > 0 && @@ -4796,147 +5444,31 @@ out: return rc; } -struct posix_acl *ll_get_acl(struct inode *inode, int type) -{ - struct ll_inode_info *lli = ll_i2info(inode); - struct posix_acl *acl = NULL; - ENTRY; - - spin_lock(&lli->lli_lock); - /* VFS' acl_permission_check->check_acl will release the refcount */ - acl = posix_acl_dup(lli->lli_posix_acl); - spin_unlock(&lli->lli_lock); - - RETURN(acl); -} - -#ifdef HAVE_IOP_SET_ACL -#ifdef CONFIG_FS_POSIX_ACL -int ll_set_acl(struct inode *inode, struct posix_acl *acl, int type) -{ - struct ll_sb_info *sbi = ll_i2sbi(inode); - struct ptlrpc_request *req = NULL; - const char *name = NULL; - char *value = NULL; - size_t value_size = 0; - int rc = 0; - ENTRY; - - switch (type) { - case ACL_TYPE_ACCESS: - name = XATTR_NAME_POSIX_ACL_ACCESS; - if (acl) - rc = posix_acl_update_mode(inode, &inode->i_mode, &acl); - break; - - case ACL_TYPE_DEFAULT: - name = XATTR_NAME_POSIX_ACL_DEFAULT; - if (!S_ISDIR(inode->i_mode)) - rc = acl ? -EACCES : 0; - break; - - default: - rc = -EINVAL; - break; - } - if (rc) - return rc; - - if (acl) { - value_size = posix_acl_xattr_size(acl->a_count); - value = kmalloc(value_size, GFP_NOFS); - if (value == NULL) - GOTO(out, rc = -ENOMEM); - - rc = posix_acl_to_xattr(&init_user_ns, acl, value, value_size); - if (rc < 0) - GOTO(out_value, rc); - } - - rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), - value ? OBD_MD_FLXATTR : OBD_MD_FLXATTRRM, - name, value, value_size, 0, 0, &req); - - ptlrpc_req_finished(req); -out_value: - kfree(value); -out: - if (rc) - forget_cached_acl(inode, type); - else - set_cached_acl(inode, type, acl); - RETURN(rc); -} -#endif /* CONFIG_FS_POSIX_ACL */ -#endif /* HAVE_IOP_SET_ACL */ - -#ifndef HAVE_GENERIC_PERMISSION_2ARGS -static int -# ifdef HAVE_GENERIC_PERMISSION_4ARGS -ll_check_acl(struct inode *inode, int mask, unsigned int flags) -# else -ll_check_acl(struct inode *inode, int mask) -# endif -{ -# ifdef CONFIG_FS_POSIX_ACL - struct posix_acl *acl; - int rc; - ENTRY; - -# ifdef HAVE_GENERIC_PERMISSION_4ARGS - if (flags & IPERM_FLAG_RCU) - return -ECHILD; -# endif - acl = ll_get_acl(inode, ACL_TYPE_ACCESS); - - if (!acl) - RETURN(-EAGAIN); - - rc = posix_acl_permission(inode, acl, mask); - posix_acl_release(acl); - - RETURN(rc); -# else /* !CONFIG_FS_POSIX_ACL */ - return -EAGAIN; -# endif /* CONFIG_FS_POSIX_ACL */ -} -#endif /* HAVE_GENERIC_PERMISSION_2ARGS */ - -#ifdef HAVE_GENERIC_PERMISSION_4ARGS -int ll_inode_permission(struct inode *inode, int mask, unsigned int flags) -#else -# ifdef HAVE_INODE_PERMISION_2ARGS int ll_inode_permission(struct inode *inode, int mask) -# else -int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd) -# endif -#endif { int rc = 0; struct ll_sb_info *sbi; struct root_squash_info *squash; struct cred *cred = NULL; const struct cred *old_cred = NULL; - cfs_cap_t cap; bool squash_id = false; + ktime_t kstart = ktime_get(); + ENTRY; -#ifdef MAY_NOT_BLOCK if (mask & MAY_NOT_BLOCK) return -ECHILD; -#elif defined(HAVE_GENERIC_PERMISSION_4ARGS) - if (flags & IPERM_FLAG_RCU) - return -ECHILD; -#endif - /* as root inode are NOT getting validated in lookup operation, - * need to do it before permission check. */ + /* + * as root inode are NOT getting validated in lookup operation, + * need to do it before permission check. + */ - if (inode == inode->i_sb->s_root->d_inode) { + if (is_root_inode(inode)) { rc = ll_inode_revalidate(inode->i_sb->s_root, IT_LOOKUP); - if (rc) - RETURN(rc); - } + if (rc) + RETURN(rc); + } CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n", PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask); @@ -4962,26 +5494,28 @@ int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd) cred->fsuid = make_kuid(&init_user_ns, squash->rsi_uid); cred->fsgid = make_kgid(&init_user_ns, squash->rsi_gid); - for (cap = 0; cap < sizeof(cfs_cap_t) * 8; cap++) { - if ((1 << cap) & CFS_CAP_FS_MASK) - cap_lower(cred->cap_effective, cap); - } + cred->cap_effective = cap_drop_nfsd_set(cred->cap_effective); + cred->cap_effective = cap_drop_fs_set(cred->cap_effective); + old_cred = override_creds(cred); } - ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM, 1); - rc = ll_generic_permission(inode, mask, flags, ll_check_acl); + rc = generic_permission(inode, mask); /* restore current process's credentials and FS capability */ if (squash_id) { revert_creds(old_cred); put_cred(cred); } + if (!rc) + ll_stats_ops_tally(sbi, LPROC_LL_INODE_PERM, + ktime_us_delta(ktime_get(), kstart)); + RETURN(rc); } /* -o localflock - only provides locally consistent flock locks */ -struct file_operations ll_file_operations = { +static const struct file_operations ll_file_operations = { #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER # ifdef HAVE_SYNC_READ_WRITE .read = new_sync_read, @@ -5000,12 +5534,17 @@ struct file_operations ll_file_operations = { .release = ll_file_release, .mmap = ll_file_mmap, .llseek = ll_file_seek, - .splice_read = ll_file_splice_read, +#ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT + .splice_read = generic_file_splice_read, +#else + .splice_read = pcc_file_splice_read, +#endif .fsync = ll_fsync, - .flush = ll_flush + .flush = ll_flush, + .fallocate = ll_fallocate, }; -struct file_operations ll_file_operations_flock = { +static const struct file_operations ll_file_operations_flock = { #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER # ifdef HAVE_SYNC_READ_WRITE .read = new_sync_read, @@ -5024,15 +5563,20 @@ struct file_operations ll_file_operations_flock = { .release = ll_file_release, .mmap = ll_file_mmap, .llseek = ll_file_seek, - .splice_read = ll_file_splice_read, +#ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT + .splice_read = generic_file_splice_read, +#else + .splice_read = pcc_file_splice_read, +#endif .fsync = ll_fsync, .flush = ll_flush, .flock = ll_file_flock, - .lock = ll_file_flock + .lock = ll_file_flock, + .fallocate = ll_fallocate, }; /* These are for -o noflock - to return ENOSYS on flock calls */ -struct file_operations ll_file_operations_noflock = { +static const struct file_operations ll_file_operations_noflock = { #ifdef HAVE_FILE_OPERATIONS_READ_WRITE_ITER # ifdef HAVE_SYNC_READ_WRITE .read = new_sync_read, @@ -5051,14 +5595,19 @@ struct file_operations ll_file_operations_noflock = { .release = ll_file_release, .mmap = ll_file_mmap, .llseek = ll_file_seek, - .splice_read = ll_file_splice_read, +#ifndef HAVE_DEFAULT_FILE_SPLICE_READ_EXPORT + .splice_read = generic_file_splice_read, +#else + .splice_read = pcc_file_splice_read, +#endif .fsync = ll_fsync, .flush = ll_flush, .flock = ll_file_noflock, - .lock = ll_file_noflock + .lock = ll_file_noflock, + .fallocate = ll_fallocate, }; -struct inode_operations ll_file_inode_operations = { +const struct inode_operations ll_file_inode_operations = { .setattr = ll_setattr, .getattr = ll_getattr, .permission = ll_inode_permission, @@ -5069,14 +5618,24 @@ struct inode_operations ll_file_inode_operations = { #endif .listxattr = ll_listxattr, .fiemap = ll_fiemap, -#ifdef HAVE_IOP_GET_ACL .get_acl = ll_get_acl, -#endif #ifdef HAVE_IOP_SET_ACL .set_acl = ll_set_acl, #endif }; +const struct file_operations *ll_select_file_operations(struct ll_sb_info *sbi) +{ + const struct file_operations *fops = &ll_file_operations_noflock; + + if (sbi->ll_flags & LL_SBI_FLOCK) + fops = &ll_file_operations_flock; + else if (sbi->ll_flags & LL_SBI_LOCALFLOCK) + fops = &ll_file_operations; + + return fops; +} + int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) { struct ll_inode_info *lli = ll_i2info(inode); @@ -5126,7 +5685,7 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) out: cl_env_put(env, &refcheck); - RETURN(rc); + RETURN(rc < 0 ? rc : 0); } /* Fetch layout from MDT with getxattr request, if it's not ready yet */ @@ -5219,7 +5778,9 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, lock = ldlm_handle2lock(lockh); LASSERT(lock != NULL); - LASSERT(ldlm_has_layout(lock)); + + if (!ldlm_has_layout(lock)) + GOTO(out, rc = -EAGAIN); LDLM_DEBUG(lock, "file "DFID"(%p) being reconfigured", PFID(&lli->lli_fid), inode);