*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/llite/llite_lib.c
*
#define DEBUG_SUBSYSTEM S_LLITE
+#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/statfs.h>
#include <linux/user_namespace.h>
#include <linux/delay.h>
#include <linux/uidgid.h>
-#include <linux/security.h>
+#include <linux/fs_struct.h>
+#ifndef HAVE_CPUS_READ_LOCK
+#include <libcfs/linux/linux-cpu.h>
+#endif
#include <uapi/linux/lustre/lustre_ioctl.h>
#ifdef HAVE_UAPI_LINUX_MOUNT_H
#include <uapi/linux/mount.h>
#define log2(n) ffz(~(n))
#endif
+/**
+ * If there is only one number of core visible to Lustre,
+ * async readahead will be disabled, to avoid massive over
+ * subscription, we use 1/2 of active cores as default max
+ * async readahead requests.
+ */
+static inline unsigned int ll_get_ra_async_max_active(void)
+{
+ return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
+}
+
static struct ll_sb_info *ll_init_sbi(void)
{
struct ll_sb_info *sbi = NULL;
pages = si.totalram - si.totalhigh;
lru_page_max = pages / 2;
- sbi->ll_ra_info.ra_async_max_active = 0;
+ sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
sbi->ll_ra_info.ll_readahead_wq =
- alloc_workqueue("ll-readahead-wq", WQ_UNBOUND,
- sbi->ll_ra_info.ra_async_max_active);
- if (!sbi->ll_ra_info.ll_readahead_wq)
- GOTO(out_pcc, rc = -ENOMEM);
+ cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
+ 0, CFS_CPT_ANY,
+ sbi->ll_ra_info.ra_async_max_active);
+ if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
+ GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
/* initialize ll_cache data */
sbi->ll_cache = cl_cache_init(lru_page_max);
if (sbi->ll_cache == NULL)
GOTO(out_destroy_ra, rc = -ENOMEM);
- sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
- SBI_DEFAULT_READAHEAD_MAX);
+ /* initialize foreign symlink prefix path */
+ OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
+ if (sbi->ll_foreign_symlink_prefix == NULL)
+ GOTO(out_destroy_ra, rc = -ENOMEM);
+ memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
+ sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
+
+ /* initialize foreign symlink upcall path, none by default */
+ OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
+ if (sbi->ll_foreign_symlink_upcall == NULL)
+ GOTO(out_destroy_ra, rc = -ENOMEM);
+ memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
+ sbi->ll_foreign_symlink_upcall_items = NULL;
+ sbi->ll_foreign_symlink_upcall_nb_items = 0;
+ init_rwsem(&sbi->ll_foreign_symlink_sem);
+ /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
+ * not enabled by default
+ */
+
+ sbi->ll_ra_info.ra_max_pages =
+ min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
+ sbi->ll_ra_info.ra_max_pages_per_file =
+ min(sbi->ll_ra_info.ra_max_pages / 4,
+ SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
sbi->ll_ra_info.ra_async_pages_per_file_threshold =
sbi->ll_ra_info.ra_max_pages_per_file;
- sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
+ sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
+ atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
sbi->ll_flags |= LL_SBI_VERBOSE;
#ifdef ENABLE_CHECKSUM
sbi->ll_flags |= LL_SBI_AGL_ENABLED;
sbi->ll_flags |= LL_SBI_FAST_READ;
sbi->ll_flags |= LL_SBI_TINY_WRITE;
+ ll_sbi_set_encrypt(sbi, true);
/* root squash */
sbi->ll_squash.rsi_uid = 0;
/* Per-filesystem file heat */
sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
+
+ /* Per-fs open heat level before requesting open lock */
+ sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT;
+ sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS;
+ sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS;
RETURN(sbi);
out_destroy_ra:
+ if (sbi->ll_foreign_symlink_prefix)
+ OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
+ if (sbi->ll_cache) {
+ cl_cache_decref(sbi->ll_cache);
+ sbi->ll_cache = NULL;
+ }
destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
out_pcc:
pcc_super_fini(&sbi->ll_pcc_super);
cl_cache_decref(sbi->ll_cache);
sbi->ll_cache = NULL;
}
+ if (sbi->ll_foreign_symlink_prefix) {
+ OBD_FREE(sbi->ll_foreign_symlink_prefix,
+ sbi->ll_foreign_symlink_prefix_size);
+ sbi->ll_foreign_symlink_prefix = NULL;
+ }
+ if (sbi->ll_foreign_symlink_upcall) {
+ OBD_FREE(sbi->ll_foreign_symlink_upcall,
+ strlen(sbi->ll_foreign_symlink_upcall) +
+ 1);
+ sbi->ll_foreign_symlink_upcall = NULL;
+ }
+ if (sbi->ll_foreign_symlink_upcall_items) {
+ int i;
+ int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
+ struct ll_foreign_symlink_upcall_item *items =
+ sbi->ll_foreign_symlink_upcall_items;
+
+ for (i = 0 ; i < nb_items; i++)
+ if (items[i].type == STRING_TYPE)
+ OBD_FREE(items[i].string,
+ items[i].size);
+
+ OBD_FREE_LARGE(items, nb_items *
+ sizeof(struct ll_foreign_symlink_upcall_item));
+ sbi->ll_foreign_symlink_upcall_items = NULL;
+ }
pcc_super_fini(&sbi->ll_pcc_super);
OBD_FREE(sbi, sizeof(*sbi));
}
OBD_CONNECT2_INC_XID |
OBD_CONNECT2_LSOM |
OBD_CONNECT2_ASYNC_DISCARD |
- OBD_CONNECT2_PCC;
+ OBD_CONNECT2_PCC |
+ OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
+ OBD_CONNECT2_GETATTR_PFID |
+ OBD_CONNECT2_DOM_LVB |
+ OBD_CONNECT2_REP_MBITS;
#ifdef HAVE_LRU_RESIZE_SUPPORT
if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
#endif
-#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
- data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK |
- OBD_CONNECT_LARGE_ACL;
-#endif
+ data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
data->ocd_cksum_types = obd_cksum_types_supported_client();
*/
sb->s_flags |= SB_NOSEC;
#endif
-
- if (sbi->ll_flags & LL_SBI_FLOCK)
- sbi->ll_fop = &ll_file_operations_flock;
- else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
- sbi->ll_fop = &ll_file_operations;
- else
- sbi->ll_fop = &ll_file_operations_noflock;
+ sbi->ll_fop = ll_select_file_operations(sbi);
/* always ping even if server suppress_pings */
if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
obd_connect_set_secctx(data);
+ if (ll_sbi_has_encrypt(sbi))
+ obd_connect_set_enc(data);
#if defined(CONFIG_SECURITY)
data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
if (obd_connect_has_secctx(data))
sbi->ll_flags |= LL_SBI_FILE_SECCTX;
+ if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
+ if (ll_sbi_has_test_dummy_encryption(sbi))
+ LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
+ sbi->ll_fsname,
+ sbi->ll_md_exp->exp_obd->obd_name);
+ ll_sbi_set_encrypt(sbi, false);
+ }
+
if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
LCONSOLE_INFO("%s: disabling xattr cache due to "
OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
-
-/* The client currently advertises support for OBD_CONNECT_LOCKAHEAD_OLD so it
- * can interoperate with an older version of lockahead which was released prior
- * to landing in master. This support will be dropped when 2.13 development
- * starts. At the point, we should not just drop the connect flag (below), we
- * should also remove the support in the code.
- *
- * Removing it means a few things:
- * 1. Remove this section here
- * 2. Remove CEF_NONBLOCK in ll_file_lockahead()
- * 3. Remove function exp_connect_lockahead_old
- * 4. Remove LDLM_FL_LOCKAHEAD_OLD_RESERVED in lustre_dlm_flags.h
- * */
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 12, 50, 0)
- data->ocd_connect_flags |= OBD_CONNECT_LOCKAHEAD_OLD;
-#endif
-
data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
- OBD_CONNECT2_INC_XID;
+ OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK |
+ OBD_CONNECT2_REP_MBITS;
if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
+ if (ll_sbi_has_encrypt(sbi))
+ obd_connect_set_enc(data);
+
CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
"ocd_grant: %d\n", data->ocd_connect_flags,
data->ocd_version, data->ocd_grant);
GOTO(out_md, err);
}
+ if (ll_sbi_has_encrypt(sbi) &&
+ !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
+ if (ll_sbi_has_test_dummy_encryption(sbi))
+ LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
+ sbi->ll_fsname, dt);
+ ll_sbi_set_encrypt(sbi, false);
+ } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
+ LCONSOLE_WARN("Test dummy encryption mode enabled\n");
+ }
+
sbi->ll_dt_exp->exp_connect_data = *data;
/* Don't change value if it was specified in the config log */
if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
- max_t(unsigned long, SBI_DEFAULT_READAHEAD_WHOLE_MAX,
+ max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
(data->ocd_brw_size >> PAGE_SHIFT));
if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
sbi->ll_ra_info.ra_max_pages_per_file)
#if THREAD_SIZE >= 8192 /*b=17630*/
sb->s_export_op = &lustre_export_operations;
#endif
+#ifdef HAVE_LUSTRE_CRYPTO
+ llcrypt_set_ops(sb, &lustre_cryptops);
+#endif
/* make root inode
* XXX: move this to after cbd setup? */
ptlrpc_req_finished(request);
if (IS_ERR(root)) {
-#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
- if (lmd.posix_acl) {
- posix_acl_release(lmd.posix_acl);
- lmd.posix_acl = NULL;
- }
-#endif
+ lmd_clear_acl(&lmd);
err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
root = NULL;
CERROR("%s: bad ll_iget() for root: rc = %d\n",
RETURN(err);
out_root:
- if (root)
- iput(root);
+ iput(root);
out_lock_cn_cb:
obd_fid_fini(sbi->ll_dt_exp->exp_obd);
out_dt:
*flags |= tmp;
goto next;
}
+ tmp = ll_set_opt("test_dummy_encryption", s1,
+ LL_SBI_TEST_DUMMY_ENCRYPTION);
+ if (tmp) {
+#ifdef HAVE_LUSTRE_CRYPTO
+ *flags |= tmp;
+#else
+ LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
+#endif
+ goto next;
+ }
+ tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT);
+ if (tmp) {
+#ifdef HAVE_LUSTRE_CRYPTO
+ *flags &= ~tmp;
+#else
+ LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n");
+#endif
+ goto next;
+ }
+ tmp = ll_set_opt("foreign_symlink", s1, LL_SBI_FOREIGN_SYMLINK);
+ if (tmp) {
+ int prefix_pos = sizeof("foreign_symlink=") - 1;
+ int equal_pos = sizeof("foreign_symlink=") - 2;
+
+ /* non-default prefix provided ? */
+ if (strlen(s1) >= sizeof("foreign_symlink=") &&
+ *(s1 + equal_pos) == '=') {
+ char *old = sbi->ll_foreign_symlink_prefix;
+ size_t old_len =
+ sbi->ll_foreign_symlink_prefix_size;
+
+ /* path must be absolute */
+ if (*(s1 + sizeof("foreign_symlink=")
+ - 1) != '/') {
+ LCONSOLE_ERROR_MSG(0x152,
+ "foreign prefix '%s' must be an absolute path\n",
+ s1 + prefix_pos);
+ RETURN(-EINVAL);
+ }
+ /* last option ? */
+ s2 = strchrnul(s1 + prefix_pos, ',');
+
+ if (sbi->ll_foreign_symlink_prefix) {
+ sbi->ll_foreign_symlink_prefix = NULL;
+ sbi->ll_foreign_symlink_prefix_size = 0;
+ }
+ /* alloc for path length and '\0' */
+ OBD_ALLOC(sbi->ll_foreign_symlink_prefix,
+ s2 - (s1 + prefix_pos) + 1);
+ if (!sbi->ll_foreign_symlink_prefix) {
+ /* restore previous */
+ sbi->ll_foreign_symlink_prefix = old;
+ sbi->ll_foreign_symlink_prefix_size =
+ old_len;
+ RETURN(-ENOMEM);
+ }
+ if (old)
+ OBD_FREE(old, old_len);
+ strncpy(sbi->ll_foreign_symlink_prefix,
+ s1 + prefix_pos,
+ s2 - (s1 + prefix_pos));
+ sbi->ll_foreign_symlink_prefix_size =
+ s2 - (s1 + prefix_pos) + 1;
+ } else {
+ LCONSOLE_ERROR_MSG(0x152,
+ "invalid %s option\n", s1);
+ }
+ /* enable foreign symlink support */
+ *flags |= tmp;
+ goto next;
+ }
LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
s1);
RETURN(-EINVAL);
init_rwsem(&lli->lli_lsm_sem);
} else {
mutex_init(&lli->lli_size_mutex);
+ mutex_init(&lli->lli_setattr_mutex);
lli->lli_symlink_name = NULL;
ll_trunc_sem_init(&lli->lli_trunc_sem);
range_lock_tree_init(&lli->lli_write_tree);
CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
profilenm, cfg_instance, sb);
+ OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
+
OBD_ALLOC_PTR(cfg);
if (cfg == NULL)
GOTO(out_free_cfg, err = -ENOMEM);
/* UUID handling */
generate_random_uuid(uuid.b);
- snprintf(sbi->ll_sb_uuid.uuid, UUID_SIZE, "%pU", uuid.b);
+ snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
cl_env_cache_purge(~0);
- module_put(THIS_MODULE);
-
EXIT;
} /* client_put_super */
}
up_write(&lli->lli_lsm_sem);
}
- } else if (lli->lli_default_lsm_md) {
- /* update default lsm if it changes */
+ return;
+ }
+
+ if (lli->lli_default_lsm_md) {
+ /* do nonthing if default lsm isn't changed */
down_read(&lli->lli_lsm_sem);
if (lli->lli_default_lsm_md &&
- !lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
- up_read(&lli->lli_lsm_sem);
- down_write(&lli->lli_lsm_sem);
- if (lli->lli_default_lsm_md)
- lmv_free_memmd(lli->lli_default_lsm_md);
- lli->lli_default_lsm_md = md->default_lmv;
- lsm_md_dump(D_INODE, md->default_lmv);
- md->default_lmv = NULL;
- up_write(&lli->lli_lsm_sem);
- } else {
+ lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
up_read(&lli->lli_lsm_sem);
+ return;
}
- } else {
- /* init default lsm */
- down_write(&lli->lli_lsm_sem);
- lli->lli_default_lsm_md = md->default_lmv;
- lsm_md_dump(D_INODE, md->default_lmv);
- md->default_lmv = NULL;
- up_write(&lli->lli_lsm_sem);
+ up_read(&lli->lli_lsm_sem);
}
+
+ down_write(&lli->lli_lsm_sem);
+ if (lli->lli_default_lsm_md)
+ lmv_free_memmd(lli->lli_default_lsm_md);
+ lli->lli_default_lsm_md = md->default_lmv;
+ lsm_md_dump(D_INODE, md->default_lmv);
+ md->default_lmv = NULL;
+ up_write(&lli->lli_lsm_sem);
}
static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
ll_xattr_cache_destroy(inode);
-#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
forget_all_cached_acls(inode);
- if (lli->lli_posix_acl) {
- posix_acl_release(lli->lli_posix_acl);
- lli->lli_posix_acl = NULL;
- }
-#endif
+ lli_clear_acl(lli);
lli->lli_inode_magic = LLI_INODE_DEAD;
if (S_ISDIR(inode->i_mode))
*/
cl_inode_fini(inode);
+ llcrypt_put_encryption_info(inode);
+
EXIT;
}
RETURN(rc);
}
+/**
+ * Zero portion of page that is part of @inode.
+ * This implies, if necessary:
+ * - taking cl_lock on range corresponding to concerned page
+ * - grabbing vm page
+ * - associating cl_page
+ * - proceeding to clio read
+ * - zeroing range in page
+ * - proceeding to cl_page flush
+ * - releasing cl_lock
+ *
+ * \param[in] inode inode
+ * \param[in] index page index
+ * \param[in] offset offset in page to start zero from
+ * \param[in] len len to zero
+ *
+ * \retval 0 on success
+ * \retval negative errno on failure
+ */
+int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
+ unsigned len)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_object *clob = lli->lli_clob;
+ __u16 refcheck;
+ struct lu_env *env = NULL;
+ struct cl_io *io = NULL;
+ struct cl_page *clpage = NULL;
+ struct page *vmpage = NULL;
+ unsigned from = index << PAGE_SHIFT;
+ struct cl_lock *lock = NULL;
+ struct cl_lock_descr *descr = NULL;
+ struct cl_2queue *queue = NULL;
+ struct cl_sync_io *anchor = NULL;
+ bool holdinglock = false;
+ bool lockedbymyself = true;
+ int rc;
+
+ ENTRY;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ io = vvp_env_thread_io(env);
+ io->ci_obj = clob;
+ rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
+ if (rc)
+ GOTO(putenv, rc);
+
+ lock = vvp_env_lock(env);
+ descr = &lock->cll_descr;
+ descr->cld_obj = io->ci_obj;
+ descr->cld_start = cl_index(io->ci_obj, from);
+ descr->cld_end = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
+ descr->cld_mode = CLM_WRITE;
+ descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
+
+ /* request lock for page */
+ rc = cl_lock_request(env, io, lock);
+ /* -ECANCELED indicates a matching lock with a different extent
+ * was already present, and -EEXIST indicates a matching lock
+ * on exactly the same extent was already present.
+ * In both cases it means we are covered.
+ */
+ if (rc == -ECANCELED || rc == -EEXIST)
+ rc = 0;
+ else if (rc < 0)
+ GOTO(iofini, rc);
+ else
+ holdinglock = true;
+
+ /* grab page */
+ vmpage = grab_cache_page_nowait(inode->i_mapping, index);
+ if (vmpage == NULL)
+ GOTO(rellock, rc = -EOPNOTSUPP);
+
+ if (!PageDirty(vmpage)) {
+ /* associate cl_page */
+ clpage = cl_page_find(env, clob, vmpage->index,
+ vmpage, CPT_CACHEABLE);
+ if (IS_ERR(clpage))
+ GOTO(pagefini, rc = PTR_ERR(clpage));
+
+ cl_page_assume(env, io, clpage);
+ }
+
+ if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
+ !PageWriteback(vmpage)) {
+ /* read page */
+ /* set PagePrivate2 to detect special case of empty page
+ * in osc_brw_fini_request()
+ */
+ SetPagePrivate2(vmpage);
+ rc = ll_io_read_page(env, io, clpage, NULL);
+ if (!PagePrivate2(vmpage))
+ /* PagePrivate2 was cleared in osc_brw_fini_request()
+ * meaning we read an empty page. In this case, in order
+ * to avoid allocating unnecessary block in truncated
+ * file, we must not zero and write as below. Subsequent
+ * server-side truncate will handle things correctly.
+ */
+ GOTO(clpfini, rc = 0);
+ ClearPagePrivate2(vmpage);
+ if (rc)
+ GOTO(clpfini, rc);
+ lockedbymyself = trylock_page(vmpage);
+ cl_page_assume(env, io, clpage);
+ }
+
+ /* zero range in page */
+ zero_user(vmpage, offset, len);
+
+ if (holdinglock && clpage) {
+ /* explicitly write newly modified page */
+ queue = &io->ci_queue;
+ cl_2queue_init(queue);
+ anchor = &vvp_env_info(env)->vti_anchor;
+ cl_sync_io_init(anchor, 1);
+ clpage->cp_sync_io = anchor;
+ cl_2queue_add(queue, clpage);
+ rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
+ if (rc)
+ GOTO(queuefini1, rc);
+ rc = cl_sync_io_wait(env, anchor, 0);
+ if (rc)
+ GOTO(queuefini2, rc);
+ cl_page_assume(env, io, clpage);
+
+queuefini2:
+ cl_2queue_discard(env, io, queue);
+queuefini1:
+ cl_2queue_disown(env, io, queue);
+ cl_2queue_fini(env, queue);
+ }
+
+clpfini:
+ if (clpage)
+ cl_page_put(env, clpage);
+pagefini:
+ if (lockedbymyself) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ }
+rellock:
+ if (holdinglock)
+ cl_lock_release(env, lock);
+iofini:
+ cl_io_fini(env, io);
+putenv:
+ if (env)
+ cl_env_put(env, &refcheck);
+
+ RETURN(rc);
+}
+
/* If this inode has objects allocated to it (lsm != NULL), then the OST
* object(s) determine the file size and mtime. Otherwise, the MDS will
* keep these values until such a time that objects are allocated for it.
/* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
if (attr->ia_valid & TIMES_SET_FLAGS) {
if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
- !cfs_capable(CFS_CAP_FOWNER))
+ !capable(CAP_FOWNER))
RETURN(-EPERM);
}
*/
xvalid |= OP_XVALID_OWNEROVERRIDE;
op_data->op_bias |= MDS_DATA_MODIFIED;
- ll_file_clear_flag(lli, LLIF_DATA_MODIFIED);
+ clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
}
if (attr->ia_valid & ATTR_FILE) {
GOTO(out, rc);
}
} else {
+ unsigned int flags = 0;
+
/* For truncate and utimes sending attributes to OSTs,
* setting mtime/atime to the past will be performed
* under PW [0:EOF] extent lock (new_size:EOF for
* it is necessary due to possible time
* de-synchronization between MDT inode and OST objects
*/
- rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, 0);
+ if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
+ attr->ia_valid & ATTR_SIZE) {
+ xvalid |= OP_XVALID_FLAGS;
+ flags = LUSTRE_ENCRYPT_FL;
+ /* Call to ll_io_zero_page is not necessary if
+ * truncating on PAGE_SIZE boundary, because
+ * whole pages will be wiped.
+ * In case of Direct IO, all we need is to set
+ * new size.
+ */
+ if (attr->ia_size & ~PAGE_MASK &&
+ !(attr->ia_valid & ATTR_FILE &&
+ attr->ia_file->f_flags & O_DIRECT)) {
+ pgoff_t offset =
+ attr->ia_size & (PAGE_SIZE - 1);
+
+ rc = ll_io_zero_page(inode,
+ attr->ia_size >> PAGE_SHIFT,
+ offset, PAGE_SIZE - offset);
+ if (rc)
+ GOTO(out, rc);
+ }
+ }
+ rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
}
}
* LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
* This way we can save an RPC for common open + trunc
* operation. */
- if (ll_file_test_and_clear_flag(lli, LLIF_DATA_MODIFIED)) {
+ if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
struct hsm_state_set hss = {
.hss_valid = HSS_SETMASK,
.hss_setmask = HS_DIRTY,
{
int mode = de->d_inode->i_mode;
enum op_xvalid xvalid = 0;
+ int rc;
+
+ rc = llcrypt_prepare_setattr(de, attr);
+ if (rc)
+ return rc;
if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
(ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
- if (osfs->os_state & OS_STATE_SUM)
+ if (osfs->os_state & OS_STATFS_SUM)
GOTO(out, rc);
rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
RETURN(rc);
}
+static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
+{
+ struct if_quotactl qctl = {
+ .qc_cmd = LUSTRE_Q_GETQUOTA,
+ .qc_type = PRJQUOTA,
+ .qc_valid = QC_GENERAL,
+ };
+ u64 limit, curblock;
+ int ret;
+
+ qctl.qc_id = ll_i2info(inode)->lli_projid;
+ ret = quotactl_ioctl(ll_i2sbi(inode), &qctl);
+ if (ret) {
+ /* ignore errors if project ID does not have
+ * a quota limit or feature unsupported.
+ */
+ if (ret == -ESRCH || ret == -EOPNOTSUPP)
+ ret = 0;
+ return ret;
+ }
+
+ limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
+ qctl.qc_dqblk.dqb_bsoftlimit :
+ qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
+ if (limit && sfs->f_blocks > limit) {
+ curblock = (qctl.qc_dqblk.dqb_curspace +
+ sfs->f_bsize - 1) / sfs->f_bsize;
+ sfs->f_blocks = limit;
+ sfs->f_bfree = sfs->f_bavail =
+ (sfs->f_blocks > curblock) ?
+ (sfs->f_blocks - curblock) : 0;
+ }
+
+ limit = qctl.qc_dqblk.dqb_isoftlimit ?
+ qctl.qc_dqblk.dqb_isoftlimit :
+ qctl.qc_dqblk.dqb_ihardlimit;
+ if (limit && sfs->f_files > limit) {
+ sfs->f_files = limit;
+ sfs->f_ffree = (sfs->f_files >
+ qctl.qc_dqblk.dqb_curinodes) ?
+ (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
+ }
+
+ return 0;
+}
+
int ll_statfs(struct dentry *de, struct kstatfs *sfs)
{
struct super_block *sb = de->d_sb;
sfs->f_bavail = osfs.os_bavail;
sfs->f_fsid.val[0] = (__u32)fsid;
sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
+ if (ll_i2info(de->d_inode)->lli_projid)
+ return ll_statfs_project(de->d_inode, sfs);
ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
ktime_us_delta(ktime_get(), kstart));
mutex_unlock(&lli->lli_size_mutex);
}
-void ll_update_inode_flags(struct inode *inode, int ext_flags)
+void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
{
+ /* do not clear encryption flag */
+ ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
inode->i_flags = ll_ext_to_inode_flags(ext_flags);
if (ext_flags & LUSTRE_PROJINHERIT_FL)
- ll_file_set_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
+ set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
else
- ll_file_clear_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
+ clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
}
int ll_update_inode(struct inode *inode, struct lustre_md *md)
return rc;
}
-#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
- if (body->mbo_valid & OBD_MD_FLACL) {
- spin_lock(&lli->lli_lock);
- if (lli->lli_posix_acl)
- posix_acl_release(lli->lli_posix_acl);
- lli->lli_posix_acl = md->posix_acl;
- spin_unlock(&lli->lli_lock);
- }
-#endif
+ if (body->mbo_valid & OBD_MD_FLACL)
+ lli_replace_acl(lli, md);
+
inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
sbi->ll_flags & LL_SBI_32BIT_API);
inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
lli->lli_ctime = body->mbo_ctime;
}
+ if (body->mbo_valid & OBD_MD_FLBTIME)
+ lli->lli_btime = body->mbo_btime;
+
/* Clear i_flags to remove S_NOSEC before permissions are updated */
if (body->mbo_valid & OBD_MD_FLFLAGS)
ll_update_inode_flags(inode, body->mbo_flags);
(body->mbo_mode & S_IFMT);
LASSERT(inode->i_mode != 0);
- if (S_ISREG(inode->i_mode))
- inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
- LL_MAX_BLKSIZE_BITS);
- else
- inode->i_blkbits = inode->i_sb->s_blocksize_bits;
-
if (body->mbo_valid & OBD_MD_FLUID)
inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
if (body->mbo_valid & OBD_MD_FLGID)
LASSERT(fid_seq(&lli->lli_fid) != 0);
+ lli->lli_attr_valid = body->mbo_valid;
if (body->mbo_valid & OBD_MD_FLSIZE) {
i_size_write(inode, body->mbo_size);
if (body->mbo_valid & OBD_MD_FLBLOCKS)
inode->i_blocks = body->mbo_blocks;
+ } else {
+ if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
+ lli->lli_lazysize = body->mbo_size;
+ if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
+ lli->lli_lazyblocks = body->mbo_blocks;
}
if (body->mbo_valid & OBD_MD_TSTATE) {
* glimpsing updated attrs
*/
if (body->mbo_t_state & MS_RESTORE)
- ll_file_set_flag(lli, LLIF_FILE_RESTORING);
+ set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
else
- ll_file_clear_flag(lli, LLIF_FILE_RESTORING);
+ clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
}
return 0;
}
+void ll_truncate_inode_pages_final(struct inode *inode)
+{
+ struct address_space *mapping = &inode->i_data;
+ unsigned long nrpages;
+ unsigned long flags;
+
+ truncate_inode_pages_final(mapping);
+
+ /* Workaround for LU-118: Note nrpages may not be totally updated when
+ * truncate_inode_pages() returns, as there can be a page in the process
+ * of deletion (inside __delete_from_page_cache()) in the specified
+ * range. Thus mapping->nrpages can be non-zero when this function
+ * returns even after truncation of the whole mapping. Only do this if
+ * npages isn't already zero.
+ */
+ nrpages = mapping->nrpages;
+ if (nrpages) {
+ ll_xa_lock_irqsave(&mapping->i_pages, flags);
+ nrpages = mapping->nrpages;
+ ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
+ } /* Workaround end */
+
+ LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
+ "see https://jira.whamcloud.com/browse/LU-118\n",
+ ll_i2sbi(inode)->ll_fsname,
+ PFID(ll_inode2fid(inode)), inode, nrpages);
+}
+
int ll_read_inode2(struct inode *inode, void *opaque)
{
struct lustre_md *md = opaque;
void ll_delete_inode(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct address_space *mapping = &inode->i_data;
- unsigned long nrpages;
- unsigned long flags;
-
ENTRY;
if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
}
- truncate_inode_pages_final(mapping);
-
- /* Workaround for LU-118: Note nrpages may not be totally updated when
- * truncate_inode_pages() returns, as there can be a page in the process
- * of deletion (inside __delete_from_page_cache()) in the specified
- * range. Thus mapping->nrpages can be non-zero when this function
- * returns even after truncation of the whole mapping. Only do this if
- * npages isn't already zero.
- */
- nrpages = mapping->nrpages;
- if (nrpages) {
- xa_lock_irqsave(&mapping->i_pages, flags);
- nrpages = mapping->nrpages;
- xa_unlock_irqrestore(&mapping->i_pages, flags);
- } /* Workaround end */
-
- LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
- "see https://jira.whamcloud.com/browse/LU-118\n",
- ll_i2sbi(inode)->ll_fsname,
- PFID(ll_inode2fid(inode)), inode, nrpages);
+ ll_truncate_inode_pages_final(inode);
ll_clear_inode(inode);
clear_inode(inode);
sbi->ll_flags & LL_SBI_32BIT_API),
&md);
if (IS_ERR(*inode)) {
-#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
- if (md.posix_acl) {
- posix_acl_release(md.posix_acl);
- md.posix_acl = NULL;
- }
-#endif
+ lmd_clear_acl(&md);
rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
*inode = NULL;
CERROR("new_inode -fatal: rc %d\n", rc);
if (default_lmv_deleted)
ll_update_default_lsm_md(*inode, &md);
+ /* we may want to apply some policy for foreign file/dir */
+ if (ll_sbi_has_foreign_symlink(sbi)) {
+ rc = ll_manage_foreign(*inode, &md);
+ if (rc < 0)
+ GOTO(out, rc);
+ }
+
GOTO(out, rc = 0);
out:
int ll_obd_statfs(struct inode *inode, void __user *arg)
{
- struct ll_sb_info *sbi = NULL;
- struct obd_export *exp;
- char *buf = NULL;
- struct obd_ioctl_data *data = NULL;
- __u32 type;
- int len = 0, rc;
-
- if (!inode || !(sbi = ll_i2sbi(inode)))
- GOTO(out_statfs, rc = -EINVAL);
-
- rc = obd_ioctl_getdata(&buf, &len, arg);
- if (rc)
- GOTO(out_statfs, rc);
-
- data = (void*)buf;
- if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
- !data->ioc_pbuf1 || !data->ioc_pbuf2)
- GOTO(out_statfs, rc = -EINVAL);
-
- if (data->ioc_inllen1 != sizeof(__u32) ||
- data->ioc_inllen2 != sizeof(__u32) ||
- data->ioc_plen1 != sizeof(struct obd_statfs) ||
- data->ioc_plen2 != sizeof(struct obd_uuid))
- GOTO(out_statfs, rc = -EINVAL);
-
- memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
+ struct ll_sb_info *sbi = NULL;
+ struct obd_export *exp;
+ struct obd_ioctl_data *data = NULL;
+ __u32 type;
+ int len = 0, rc;
+
+ if (inode)
+ sbi = ll_i2sbi(inode);
+ if (!sbi)
+ GOTO(out_statfs, rc = -EINVAL);
+
+ rc = obd_ioctl_getdata(&data, &len, arg);
+ if (rc)
+ GOTO(out_statfs, rc);
+
+ if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
+ !data->ioc_pbuf1 || !data->ioc_pbuf2)
+ GOTO(out_statfs, rc = -EINVAL);
+
+ if (data->ioc_inllen1 != sizeof(__u32) ||
+ data->ioc_inllen2 != sizeof(__u32) ||
+ data->ioc_plen1 != sizeof(struct obd_statfs) ||
+ data->ioc_plen2 != sizeof(struct obd_uuid))
+ GOTO(out_statfs, rc = -EINVAL);
+
+ memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
if (type & LL_STATFS_LMV)
- exp = sbi->ll_md_exp;
+ exp = sbi->ll_md_exp;
else if (type & LL_STATFS_LOV)
- exp = sbi->ll_dt_exp;
- else
- GOTO(out_statfs, rc = -ENODEV);
+ exp = sbi->ll_dt_exp;
+ else
+ GOTO(out_statfs, rc = -ENODEV);
- rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
- if (rc)
- GOTO(out_statfs, rc);
+ rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
+ if (rc)
+ GOTO(out_statfs, rc);
out_statfs:
- OBD_FREE_LARGE(buf, len);
+ OBD_FREE_LARGE(data, len);
return rc;
}
void ll_unlock_md_op_lsm(struct md_op_data *op_data)
{
if (op_data->op_mea2_sem) {
- up_read(op_data->op_mea2_sem);
+ up_read_non_owner(op_data->op_mea2_sem);
op_data->op_mea2_sem = NULL;
}
if (op_data->op_mea1_sem) {
- up_read(op_data->op_mea1_sem);
+ up_read_non_owner(op_data->op_mea1_sem);
op_data->op_mea1_sem = NULL;
}
}
if (namelen > ll_i2sbi(i1)->ll_namelen)
return ERR_PTR(-ENAMETOOLONG);
- if (!lu_name_is_valid_2(name, namelen))
+ /* "/" is not valid name, but it's allowed */
+ if (!lu_name_is_valid_2(name, namelen) &&
+ strncmp("/", name, namelen) != 0)
return ERR_PTR(-EINVAL);
}
op_data->op_code = opc;
if (S_ISDIR(i1->i_mode)) {
- down_read(&ll_i2info(i1)->lli_lsm_sem);
+ down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
op_data->op_fid2 = *ll_inode2fid(i2);
if (S_ISDIR(i2->i_mode)) {
if (i2 != i1) {
- down_read(&ll_i2info(i2)->lli_lsm_sem);
+ /* i2 is typically a child of i1, and MUST be
+ * further from the root to avoid deadlocks.
+ */
+ down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
op_data->op_mea2_sem =
&ll_i2info(i2)->lli_lsm_sem;
}
void ll_finish_md_op_data(struct md_op_data *op_data)
{
ll_unlock_md_op_lsm(op_data);
- security_release_secctx(op_data->op_file_secctx,
- op_data->op_file_secctx_size);
- OBD_FREE_PTR(op_data);
+ ll_security_release_secctx(op_data->op_file_secctx,
+ op_data->op_file_secctx_size);
+ llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
+ OBD_FREE_PTR(op_data);
}
int ll_show_options(struct seq_file *seq, struct dentry *dentry)
if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
seq_puts(seq, ",always_ping");
+ if (ll_sbi_has_test_dummy_encryption(sbi))
+ seq_puts(seq, ",test_dummy_encryption");
+
+ if (ll_sbi_has_encrypt(sbi))
+ seq_puts(seq, ",encrypt");
+ else
+ seq_puts(seq, ",noencrypt");
+
+ if (sbi->ll_flags & LL_SBI_FOREIGN_SYMLINK) {
+ seq_puts(seq, ",foreign_symlink=");
+ seq_puts(seq, sbi->ll_foreign_symlink_prefix);
+ }
+
RETURN(0);
}
struct obd_device *obd;
ENTRY;
- if (cmd == OBD_IOC_GETDTNAME)
+ if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
obd = class_exp2obd(sbi->ll_dt_exp);
else if (cmd == OBD_IOC_GETMDNAME)
obd = class_exp2obd(sbi->ll_md_exp);
matched = false;
i = 0;
while (LNetGetId(i++, &id) != -ENOENT) {
- if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND)
+ if (id.nid == LNET_NID_LO_0)
continue;
if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
matched = true;
ENTRY;
- if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
+ if (!capable(CAP_DAC_READ_SEARCH) &&
!(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
RETURN(-EPERM);