X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fllite%2Fllite_lib.c;h=d0597f288ca5c557ad9fb81f4ff0c269ff87154c;hb=09c558d16f0a80f436522edde89367c088fe2055;hp=7f696c873a92763c1ebfec0171a395d6c0c1ee0d;hpb=58d744e3eaab358ef346e51ff4aa17e9f08efbb3;p=fs%2Flustre-release.git diff --git a/lustre/llite/llite_lib.c b/lustre/llite/llite_lib.c index 7f696c8..d0597f2 100644 --- a/lustre/llite/llite_lib.c +++ b/lustre/llite/llite_lib.c @@ -27,7 +27,6 @@ */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * lustre/llite/llite_lib.c * @@ -36,6 +35,7 @@ #define DEBUG_SUBSYSTEM S_LLITE +#include #include #include #include @@ -45,12 +45,18 @@ #include #include #include -#ifdef HAVE_UIDGID_HEADER -# include -#endif -#include +#include +#include +#include +#ifndef HAVE_CPUS_READ_LOCK +#include +#endif #include +#ifdef HAVE_UAPI_LINUX_MOUNT_H +#include +#endif + #include #include #include @@ -67,40 +73,89 @@ struct kmem_cache *ll_file_data_slab; #define log2(n) ffz(~(n)) #endif +/** + * If there is only one number of core visible to Lustre, + * async readahead will be disabled, to avoid massive over + * subscription, we use 1/2 of active cores as default max + * async readahead requests. + */ +static inline unsigned int ll_get_ra_async_max_active(void) +{ + return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1; +} + static struct ll_sb_info *ll_init_sbi(void) { struct ll_sb_info *sbi = NULL; unsigned long pages; unsigned long lru_page_max; struct sysinfo si; + int rc; int i; + ENTRY; OBD_ALLOC_PTR(sbi); if (sbi == NULL) - RETURN(NULL); + RETURN(ERR_PTR(-ENOMEM)); + + rc = pcc_super_init(&sbi->ll_pcc_super); + if (rc < 0) + GOTO(out_sbi, rc); spin_lock_init(&sbi->ll_lock); mutex_init(&sbi->ll_lco.lco_lock); spin_lock_init(&sbi->ll_pp_extent_lock); spin_lock_init(&sbi->ll_process_lock); sbi->ll_rw_stats_on = 0; + sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS; si_meminfo(&si); pages = si.totalram - si.totalhigh; lru_page_max = pages / 2; + sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active(); + sbi->ll_ra_info.ll_readahead_wq = + cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab, + 0, CFS_CPT_ANY, + sbi->ll_ra_info.ra_async_max_active); + if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq)) + GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq)); + /* initialize ll_cache data */ sbi->ll_cache = cl_cache_init(lru_page_max); - if (sbi->ll_cache == NULL) { - OBD_FREE(sbi, sizeof(*sbi)); - RETURN(NULL); - } + if (sbi->ll_cache == NULL) + GOTO(out_destroy_ra, rc = -ENOMEM); + + /* initialize foreign symlink prefix path */ + OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/")); + if (sbi->ll_foreign_symlink_prefix == NULL) + GOTO(out_destroy_ra, rc = -ENOMEM); + memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/")); + sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/"); + + /* initialize foreign symlink upcall path, none by default */ + OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none")); + if (sbi->ll_foreign_symlink_upcall == NULL) + GOTO(out_destroy_ra, rc = -ENOMEM); + memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none")); + sbi->ll_foreign_symlink_upcall_items = NULL; + sbi->ll_foreign_symlink_upcall_nb_items = 0; + init_rwsem(&sbi->ll_foreign_symlink_sem); + /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags) + * not enabled by default + */ - sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32, - SBI_DEFAULT_READAHEAD_MAX); - sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file; + sbi->ll_ra_info.ra_max_pages = + min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX); + sbi->ll_ra_info.ra_max_pages_per_file = + min(sbi->ll_ra_info.ra_max_pages / 4, + SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX); + sbi->ll_ra_info.ra_async_pages_per_file_threshold = + sbi->ll_ra_info.ra_max_pages_per_file; + sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES; sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1; + atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0); sbi->ll_flags |= LL_SBI_VERBOSE; #ifdef ENABLE_CHECKSUM @@ -132,18 +187,37 @@ static struct ll_sb_info *ll_init_sbi(void) sbi->ll_flags |= LL_SBI_AGL_ENABLED; sbi->ll_flags |= LL_SBI_FAST_READ; sbi->ll_flags |= LL_SBI_TINY_WRITE; + sbi->ll_flags |= LL_SBI_PARALLEL_DIO; + ll_sbi_set_encrypt(sbi, true); /* root squash */ sbi->ll_squash.rsi_uid = 0; sbi->ll_squash.rsi_gid = 0; INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids); - init_rwsem(&sbi->ll_squash.rsi_sem); - pcc_super_init(&sbi->ll_pcc_super); + spin_lock_init(&sbi->ll_squash.rsi_lock); /* Per-filesystem file heat */ sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT; sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND; + + /* Per-fs open heat level before requesting open lock */ + sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT; + sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS; + sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS; RETURN(sbi); +out_destroy_ra: + if (sbi->ll_foreign_symlink_prefix) + OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/")); + if (sbi->ll_cache) { + cl_cache_decref(sbi->ll_cache); + sbi->ll_cache = NULL; + } + destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq); +out_pcc: + pcc_super_fini(&sbi->ll_pcc_super); +out_sbi: + OBD_FREE_PTR(sbi); + RETURN(ERR_PTR(rc)); } static void ll_free_sbi(struct super_block *sb) @@ -154,18 +228,45 @@ static void ll_free_sbi(struct super_block *sb) if (sbi != NULL) { if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids)) cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids); + if (sbi->ll_ra_info.ll_readahead_wq) + destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq); if (sbi->ll_cache != NULL) { cl_cache_decref(sbi->ll_cache); sbi->ll_cache = NULL; } + if (sbi->ll_foreign_symlink_prefix) { + OBD_FREE(sbi->ll_foreign_symlink_prefix, + sbi->ll_foreign_symlink_prefix_size); + sbi->ll_foreign_symlink_prefix = NULL; + } + if (sbi->ll_foreign_symlink_upcall) { + OBD_FREE(sbi->ll_foreign_symlink_upcall, + strlen(sbi->ll_foreign_symlink_upcall) + + 1); + sbi->ll_foreign_symlink_upcall = NULL; + } + if (sbi->ll_foreign_symlink_upcall_items) { + int i; + int nb_items = sbi->ll_foreign_symlink_upcall_nb_items; + struct ll_foreign_symlink_upcall_item *items = + sbi->ll_foreign_symlink_upcall_items; + + for (i = 0 ; i < nb_items; i++) + if (items[i].type == STRING_TYPE) + OBD_FREE(items[i].string, + items[i].size); + + OBD_FREE_LARGE(items, nb_items * + sizeof(struct ll_foreign_symlink_upcall_item)); + sbi->ll_foreign_symlink_upcall_items = NULL; + } pcc_super_fini(&sbi->ll_pcc_super); OBD_FREE(sbi, sizeof(*sbi)); } EXIT; } -static int client_common_fill_super(struct super_block *sb, char *md, char *dt, - struct vfsmount *mnt) +static int client_common_fill_super(struct super_block *sb, char *md, char *dt) { struct inode *root = NULL; struct ll_sb_info *sbi = ll_s2sbi(sb); @@ -229,53 +330,52 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, OBD_CONNECT2_FLR | OBD_CONNECT2_LOCK_CONVERT | OBD_CONNECT2_ARCHIVE_ID_ARRAY | + OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSOM | OBD_CONNECT2_ASYNC_DISCARD | - OBD_CONNECT2_PCC; + OBD_CONNECT2_PCC | + OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK | + OBD_CONNECT2_GETATTR_PFID | + OBD_CONNECT2_DOM_LVB | + OBD_CONNECT2_REP_MBITS | + OBD_CONNECT2_ATOMIC_OPEN_LOCK; #ifdef HAVE_LRU_RESIZE_SUPPORT if (sbi->ll_flags & LL_SBI_LRU_RESIZE) data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE; #endif -#ifdef CONFIG_FS_POSIX_ACL - data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK | - OBD_CONNECT_LARGE_ACL; -#endif + data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS; data->ocd_cksum_types = obd_cksum_types_supported_client(); if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT)) /* flag mdc connection as lightweight, only used for test * purpose, use with care */ - data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT; + data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT; - data->ocd_ibits_known = MDS_INODELOCK_FULL; - data->ocd_version = LUSTRE_VERSION_CODE; + data->ocd_ibits_known = MDS_INODELOCK_FULL; + data->ocd_version = LUSTRE_VERSION_CODE; - if (sb->s_flags & MS_RDONLY) - data->ocd_connect_flags |= OBD_CONNECT_RDONLY; - if (sbi->ll_flags & LL_SBI_USER_XATTR) - data->ocd_connect_flags |= OBD_CONNECT_XATTR; + if (sb->s_flags & SB_RDONLY) + data->ocd_connect_flags |= OBD_CONNECT_RDONLY; + if (sbi->ll_flags & LL_SBI_USER_XATTR) + data->ocd_connect_flags |= OBD_CONNECT_XATTR; -#ifdef MS_NOSEC +#ifdef SB_NOSEC /* Setting this indicates we correctly support S_NOSEC (See kernel * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf) */ - sb->s_flags |= MS_NOSEC; + sb->s_flags |= SB_NOSEC; #endif - - if (sbi->ll_flags & LL_SBI_FLOCK) - sbi->ll_fop = &ll_file_operations_flock; - else if (sbi->ll_flags & LL_SBI_LOCALFLOCK) - sbi->ll_fop = &ll_file_operations; - else - sbi->ll_fop = &ll_file_operations_noflock; + sbi->ll_fop = ll_select_file_operations(sbi); /* always ping even if server suppress_pings */ if (sbi->ll_flags & LL_SBI_ALWAYS_PING) data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS; obd_connect_set_secctx(data); + if (ll_sbi_has_encrypt(sbi)) + obd_connect_set_enc(data); #if defined(CONFIG_SECURITY) data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY; @@ -285,16 +385,16 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd, &sbi->ll_sb_uuid, data, sbi->ll_cache); - if (err == -EBUSY) { - LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing " - "recovery, of which this client is not a " - "part. Please wait for recovery to complete," - " abort, or time out.\n", md); - GOTO(out, err); - } else if (err) { - CERROR("cannot connect to %s: rc = %d\n", md, err); - GOTO(out, err); - } + if (err == -EBUSY) { + LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing " + "recovery, of which this client is not a " + "part. Please wait for recovery to complete," + " abort, or time out.\n", md); + GOTO(out, err); + } else if (err) { + CERROR("cannot connect to %s: rc = %d\n", md, err); + GOTO(out, err); + } sbi->ll_md_exp->exp_connect_data = *data; @@ -310,7 +410,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, * can make sure the client can be mounted as long as MDT0 is * avaible */ err = obd_statfs(NULL, sbi->ll_md_exp, osfs, - ktime_get_seconds() - OBD_STATFS_CACHE_SECONDS, + ktime_get_seconds() - sbi->ll_statfs_max_age, OBD_STATFS_FOR_MDT0); if (err) GOTO(out_md_fid, err); @@ -356,28 +456,28 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, sbi->ll_namelen = osfs->os_namelen; sbi->ll_mnt.mnt = current->fs->root.mnt; - if ((sbi->ll_flags & LL_SBI_USER_XATTR) && - !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) { - LCONSOLE_INFO("Disabling user_xattr feature because " - "it is not supported on the server\n"); - sbi->ll_flags &= ~LL_SBI_USER_XATTR; - } + if ((sbi->ll_flags & LL_SBI_USER_XATTR) && + !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) { + LCONSOLE_INFO("Disabling user_xattr feature because " + "it is not supported on the server\n"); + sbi->ll_flags &= ~LL_SBI_USER_XATTR; + } - if (data->ocd_connect_flags & OBD_CONNECT_ACL) { -#ifdef MS_POSIXACL - sb->s_flags |= MS_POSIXACL; + if (data->ocd_connect_flags & OBD_CONNECT_ACL) { +#ifdef SB_POSIXACL + sb->s_flags |= SB_POSIXACL; #endif - sbi->ll_flags |= LL_SBI_ACL; - } else { - LCONSOLE_INFO("client wants to enable acl, but mdt not!\n"); -#ifdef MS_POSIXACL - sb->s_flags &= ~MS_POSIXACL; + sbi->ll_flags |= LL_SBI_ACL; + } else { + LCONSOLE_INFO("client wants to enable acl, but mdt not!\n"); +#ifdef SB_POSIXACL + sb->s_flags &= ~SB_POSIXACL; #endif - sbi->ll_flags &= ~LL_SBI_ACL; - } + sbi->ll_flags &= ~LL_SBI_ACL; + } - if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH) - sbi->ll_flags |= LL_SBI_64BIT_HASH; + if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH) + sbi->ll_flags |= LL_SBI_64BIT_HASH; if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) sbi->ll_flags |= LL_SBI_LAYOUT_LOCK; @@ -385,6 +485,14 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, if (obd_connect_has_secctx(data)) sbi->ll_flags |= LL_SBI_FILE_SECCTX; + if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) { + if (ll_sbi_has_test_dummy_encryption(sbi)) + LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n", + sbi->ll_fsname, + sbi->ll_md_exp->exp_obd->obd_name); + ll_sbi_set_encrypt(sbi, false); + } + if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) { if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) { LCONSOLE_INFO("%s: disabling xattr cache due to " @@ -421,24 +529,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK | OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK; - -/* The client currently advertises support for OBD_CONNECT_LOCKAHEAD_OLD so it - * can interoperate with an older version of lockahead which was released prior - * to landing in master. This support will be dropped when 2.13 development - * starts. At the point, we should not just drop the connect flag (below), we - * should also remove the support in the code. - * - * Removing it means a few things: - * 1. Remove this section here - * 2. Remove CEF_NONBLOCK in ll_file_lockahead() - * 3. Remove function exp_connect_lockahead_old - * 4. Remove LDLM_FL_LOCKAHEAD_OLD_RESERVED in lustre_dlm_flags.h - * */ -#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 12, 50, 0) - data->ocd_connect_flags |= OBD_CONNECT_LOCKAHEAD_OLD; -#endif - - data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD; + data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD | + OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK | + OBD_CONNECT2_REP_MBITS; if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM)) data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM; @@ -462,6 +555,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, if (sbi->ll_flags & LL_SBI_ALWAYS_PING) data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS; + if (ll_sbi_has_encrypt(sbi)) + obd_connect_set_enc(data); + CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d " "ocd_grant: %d\n", data->ocd_connect_flags, data->ocd_version, data->ocd_grant); @@ -485,12 +581,22 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, GOTO(out_md, err); } + if (ll_sbi_has_encrypt(sbi) && + !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) { + if (ll_sbi_has_test_dummy_encryption(sbi)) + LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n", + sbi->ll_fsname, dt); + ll_sbi_set_encrypt(sbi, false); + } else if (ll_sbi_has_test_dummy_encryption(sbi)) { + LCONSOLE_WARN("Test dummy encryption mode enabled\n"); + } + sbi->ll_dt_exp->exp_connect_data = *data; /* Don't change value if it was specified in the config log */ if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) { sbi->ll_ra_info.ra_max_read_ahead_whole_pages = - max_t(unsigned long, SBI_DEFAULT_READAHEAD_WHOLE_MAX, + max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX, (data->ocd_brw_size >> PAGE_SHIFT)); if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages > sbi->ll_ra_info.ra_max_pages_per_file) @@ -528,12 +634,13 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid)); sb->s_op = &lustre_super_operations; -#ifdef HAVE_XATTR_HANDLER_FLAGS sb->s_xattr = ll_xattr_handlers; -#endif #if THREAD_SIZE >= 8192 /*b=17630*/ sb->s_export_op = &lustre_export_operations; #endif +#ifdef HAVE_LUSTRE_CRYPTO + llcrypt_set_ops(sb, &lustre_cryptops); +#endif /* make root inode * XXX: move this to after cbd setup? */ @@ -558,8 +665,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, GOTO(out_lock_cn_cb, err); } - err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp, - sbi->ll_md_exp, &lmd); + err = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, + sbi->ll_dt_exp, sbi->ll_md_exp, &lmd); if (err) { CERROR("failed to understand root inode md: rc = %d\n", err); ptlrpc_req_finished(request); @@ -574,15 +681,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, ptlrpc_req_finished(request); if (IS_ERR(root)) { -#ifdef CONFIG_FS_POSIX_ACL - if (lmd.posix_acl) { - posix_acl_release(lmd.posix_acl); - lmd.posix_acl = NULL; - } -#endif + lmd_clear_acl(&lmd); err = IS_ERR(root) ? PTR_ERR(root) : -EBADF; root = NULL; - CERROR("lustre_lite: bad iget4 for root\n"); + CERROR("%s: bad ll_iget() for root: rc = %d\n", + sbi->ll_fsname, err); GOTO(out_root, err); } @@ -606,9 +709,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, sbi->ll_fsname, err); GOTO(out_root, err); } -#ifdef HAVE_DCACHE_LOCK - sb->s_root->d_op = &ll_d_ops; -#endif sbi->ll_sdev_orig = sb->s_dev; @@ -650,8 +750,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, RETURN(err); out_root: - if (root) - iput(root); + iput(root); out_lock_cn_cb: obd_fid_fini(sbi->ll_dt_exp->exp_obd); out_dt: @@ -773,8 +872,8 @@ void ll_kill_super(struct super_block *sb) struct ll_sb_info *sbi; ENTRY; - /* not init sb ?*/ - if (!(sb->s_flags & MS_ACTIVE)) + /* not init sb ?*/ + if (!(sb->s_flags & SB_ACTIVE)) return; sbi = ll_s2sbi(sb); @@ -783,13 +882,11 @@ void ll_kill_super(struct super_block *sb) * put_super not affected real removing devices */ if (sbi) { sb->s_dev = sbi->ll_sdev_orig; - sbi->ll_umounting = 1; /* wait running statahead threads to quit */ - while (atomic_read(&sbi->ll_sa_running) > 0) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC >> 3)); - } + while (atomic_read(&sbi->ll_sa_running) > 0) + schedule_timeout_uninterruptible( + cfs_time_seconds(1) >> 3); } EXIT; @@ -816,38 +913,38 @@ static int ll_options(char *options, struct ll_sb_info *sbi) CDEBUG(D_CONFIG, "Parsing opts %s\n", options); - while (*s1) { - CDEBUG(D_SUPER, "next opt=%s\n", s1); - tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK); - if (tmp) { - *flags &= ~tmp; - goto next; - } - tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR); - if (tmp) { - *flags |= tmp; - goto next; - } - tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR); - if (tmp) { - *flags &= ~tmp; - goto next; - } + while (*s1) { + CDEBUG(D_SUPER, "next opt=%s\n", s1); + tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK); + if (tmp) { + *flags |= tmp; + goto next; + } + tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK); + if (tmp) { + *flags = (*flags & ~LL_SBI_LOCALFLOCK) | tmp; + goto next; + } + tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK); + if (tmp) { + *flags = (*flags & ~LL_SBI_FLOCK) | tmp; + goto next; + } + tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK); + if (tmp) { + *flags &= ~tmp; + goto next; + } + tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR); + if (tmp) { + *flags |= tmp; + goto next; + } + tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR); + if (tmp) { + *flags &= ~tmp; + goto next; + } tmp = ll_set_opt("context", s1, 1); if (tmp) goto next; @@ -923,6 +1020,77 @@ static int ll_options(char *options, struct ll_sb_info *sbi) *flags |= tmp; goto next; } + tmp = ll_set_opt("test_dummy_encryption", s1, + LL_SBI_TEST_DUMMY_ENCRYPTION); + if (tmp) { +#ifdef HAVE_LUSTRE_CRYPTO + *flags |= tmp; +#else + LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n"); +#endif + goto next; + } + tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT); + if (tmp) { +#ifdef HAVE_LUSTRE_CRYPTO + *flags &= ~tmp; +#else + LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n"); +#endif + goto next; + } + tmp = ll_set_opt("foreign_symlink", s1, LL_SBI_FOREIGN_SYMLINK); + if (tmp) { + int prefix_pos = sizeof("foreign_symlink=") - 1; + int equal_pos = sizeof("foreign_symlink=") - 2; + + /* non-default prefix provided ? */ + if (strlen(s1) >= sizeof("foreign_symlink=") && + *(s1 + equal_pos) == '=') { + char *old = sbi->ll_foreign_symlink_prefix; + size_t old_len = + sbi->ll_foreign_symlink_prefix_size; + + /* path must be absolute */ + if (*(s1 + sizeof("foreign_symlink=") + - 1) != '/') { + LCONSOLE_ERROR_MSG(0x152, + "foreign prefix '%s' must be an absolute path\n", + s1 + prefix_pos); + RETURN(-EINVAL); + } + /* last option ? */ + s2 = strchrnul(s1 + prefix_pos, ','); + + if (sbi->ll_foreign_symlink_prefix) { + sbi->ll_foreign_symlink_prefix = NULL; + sbi->ll_foreign_symlink_prefix_size = 0; + } + /* alloc for path length and '\0' */ + OBD_ALLOC(sbi->ll_foreign_symlink_prefix, + s2 - (s1 + prefix_pos) + 1); + if (!sbi->ll_foreign_symlink_prefix) { + /* restore previous */ + sbi->ll_foreign_symlink_prefix = old; + sbi->ll_foreign_symlink_prefix_size = + old_len; + RETURN(-ENOMEM); + } + if (old) + OBD_FREE(old, old_len); + strncpy(sbi->ll_foreign_symlink_prefix, + s1 + prefix_pos, + s2 - (s1 + prefix_pos)); + sbi->ll_foreign_symlink_prefix_size = + s2 - (s1 + prefix_pos) + 1; + } else { + LCONSOLE_ERROR_MSG(0x152, + "invalid %s option\n", s1); + } + /* enable foreign symlink support */ + *flags |= tmp; + goto next; + } LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n", s1); RETURN(-EINVAL); @@ -962,18 +1130,17 @@ void ll_lli_init(struct ll_inode_info *lli) LASSERT(lli->lli_vfs_inode.i_mode != 0); if (S_ISDIR(lli->lli_vfs_inode.i_mode)) { - mutex_init(&lli->lli_readdir_mutex); lli->lli_opendir_key = NULL; lli->lli_sai = NULL; spin_lock_init(&lli->lli_sa_lock); lli->lli_opendir_pid = 0; lli->lli_sa_enabled = 0; - lli->lli_def_stripe_offset = -1; init_rwsem(&lli->lli_lsm_sem); } else { mutex_init(&lli->lli_size_mutex); + mutex_init(&lli->lli_setattr_mutex); lli->lli_symlink_name = NULL; - init_rwsem(&lli->lli_trunc_sem); + ll_trunc_sem_init(&lli->lli_trunc_sem); range_lock_tree_init(&lli->lli_write_tree); init_rwsem(&lli->lli_glimpse_sem); lli->lli_glimpse_time = ktime_set(0, 0); @@ -986,6 +1153,11 @@ void ll_lli_init(struct ll_inode_info *lli) mutex_init(&lli->lli_pcc_lock); lli->lli_pcc_state = PCC_STATE_FL_NONE; lli->lli_pcc_inode = NULL; + lli->lli_pcc_dsflags = PCC_DATASET_INVALID; + lli->lli_pcc_generation = 0; + mutex_init(&lli->lli_group_mutex); + lli->lli_group_users = 0; + lli->lli_group_gid = 0; } mutex_init(&lli->lli_layout_mutex); memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid)); @@ -1027,7 +1199,7 @@ static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...) } #endif /* !HAVE_SUPER_SETUP_BDI_NAME */ -int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) +int ll_fill_super(struct super_block *sb) { struct lustre_profile *lprof = NULL; struct lustre_sb_info *lsi = s2lsi(sb); @@ -1036,7 +1208,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) char *profilenm = get_profile_name(sb); struct config_llog_instance *cfg; /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */ - const int instlen = 16 + 2; + const int instlen = LUSTRE_MAXINSTANCE + 2; unsigned long cfg_instance = ll_get_cfg_instance(sb); char name[MAX_STRING_SIZE]; int md_len = 0; @@ -1051,7 +1223,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n", profilenm, cfg_instance, sb); - try_module_get(THIS_MODULE); + OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT); OBD_ALLOC_PTR(cfg); if (cfg == NULL) @@ -1059,20 +1231,19 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) /* client additional sb info */ lsi->lsi_llsbi = sbi = ll_init_sbi(); - if (!sbi) - GOTO(out_free_cfg, err = -ENOMEM); + if (IS_ERR(sbi)) + GOTO(out_free_cfg, err = PTR_ERR(sbi)); err = ll_options(lsi->lsi_lmd->lmd_opts, sbi); if (err) GOTO(out_free_cfg, err); -#ifndef HAVE_DCACHE_LOCK /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */ sb->s_d_op = &ll_d_ops; -#endif + /* UUID handling */ generate_random_uuid(uuid.b); - snprintf(sbi->ll_sb_uuid.uuid, UUID_SIZE, "%pU", uuid.b); + snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b); CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid); @@ -1149,7 +1320,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance); /* connections, registrations, sb setup */ - err = client_common_fill_super(sb, md, dt, mnt); + err = client_common_fill_super(sb, md, dt); if (err < 0) GOTO(out_free_md, err); @@ -1190,7 +1361,7 @@ void ll_put_super(struct super_block *sb) int next, force = 1, rc = 0; ENTRY; - if (!sbi) + if (IS_ERR(sbi)) GOTO(out_no_sbi, 0); /* Should replace instance_id with something better for ASLR */ @@ -1203,49 +1374,48 @@ void ll_put_super(struct super_block *sb) params_cfg.cfg_instance = cfg_instance; lustre_end_log(sb, PARAMS_FILENAME, ¶ms_cfg); - if (sbi->ll_md_exp) { - obd = class_exp2obd(sbi->ll_md_exp); - if (obd) - force = obd->obd_force; - } + if (sbi->ll_md_exp) { + obd = class_exp2obd(sbi->ll_md_exp); + if (obd) + force = obd->obd_force; + } /* Wait for unstable pages to be committed to stable storage */ if (force == 0) { - struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); - rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq, - atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0, - &lwi); + rc = l_wait_event_abortable( + sbi->ll_cache->ccc_unstable_waitq, + atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0); } ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr); - if (force == 0 && rc != -EINTR) + if (force == 0 && rc != -ERESTARTSYS) LASSERTF(ccc_count == 0, "count: %li\n", ccc_count); - /* We need to set force before the lov_disconnect in - lustre_common_put_super, since l_d cleans up osc's as well. */ - if (force) { - next = 0; - while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, - &next)) != NULL) { - obd->obd_force = force; - } - } + /* We need to set force before the lov_disconnect in + * lustre_common_put_super, since l_d cleans up osc's as well. + */ + if (force) { + next = 0; + while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, + &next)) != NULL) { + obd->obd_force = force; + } + } if (sbi->ll_client_common_fill_super_succeeded) { /* Only if client_common_fill_super succeeded */ client_common_put_super(sb); } - next = 0; - while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) { - class_manual_cleanup(obd); - } + next = 0; + while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next))) + class_manual_cleanup(obd); - if (sbi->ll_flags & LL_SBI_VERBOSE) - LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : ""); + if (sbi->ll_flags & LL_SBI_VERBOSE) + LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : ""); - if (profilenm) - class_del_profile(profilenm); + if (profilenm) + class_del_profile(profilenm); #ifndef HAVE_SUPER_SETUP_BDI_NAME if (lsi->lsi_flags & LSI_BDI_INITIALIZED) { @@ -1254,15 +1424,13 @@ void ll_put_super(struct super_block *sb) } #endif - ll_free_sbi(sb); - lsi->lsi_llsbi = NULL; + ll_free_sbi(sb); + lsi->lsi_llsbi = NULL; out_no_sbi: lustre_common_put_super(sb); cl_env_cache_purge(~0); - module_put(THIS_MODULE); - EXIT; } /* client_put_super */ @@ -1297,22 +1465,30 @@ void ll_dir_clear_lsm_md(struct inode *inode) LASSERT(S_ISDIR(inode->i_mode)); - if (lli->lli_lsm_md != NULL) { + if (lli->lli_lsm_md) { lmv_free_memmd(lli->lli_lsm_md); lli->lli_lsm_md = NULL; } + + if (lli->lli_default_lsm_md) { + lmv_free_memmd(lli->lli_default_lsm_md); + lli->lli_default_lsm_md = NULL; + } } static struct inode *ll_iget_anon_dir(struct super_block *sb, const struct lu_fid *fid, struct lustre_md *md) { - struct ll_sb_info *sbi = ll_s2sbi(sb); - struct mdt_body *body = md->body; - struct inode *inode; - ino_t ino; + struct ll_sb_info *sbi = ll_s2sbi(sb); + struct ll_inode_info *lli; + struct mdt_body *body = md->body; + struct inode *inode; + ino_t ino; + ENTRY; + LASSERT(md->lmv); ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API); inode = iget_locked(sb, ino); if (inode == NULL) { @@ -1321,10 +1497,8 @@ static struct inode *ll_iget_anon_dir(struct super_block *sb, RETURN(ERR_PTR(-ENOENT)); } + lli = ll_i2info(inode); if (inode->i_state & I_NEW) { - struct ll_inode_info *lli = ll_i2info(inode); - struct lmv_stripe_md *lsm = md->lmv; - inode->i_mode = (inode->i_mode & ~S_IFMT) | (body->mbo_mode & S_IFMT); LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n", @@ -1345,12 +1519,17 @@ static struct inode *ll_iget_anon_dir(struct super_block *sb, lli->lli_fid = *fid; ll_lli_init(lli); - LASSERT(lsm != NULL); /* master object FID */ lli->lli_pfid = body->mbo_fid1; CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n", lli, PFID(fid), PFID(&lli->lli_pfid)); unlock_new_inode(inode); + } else { + /* in directory restripe/auto-split, a directory will be + * transformed to a stripe if it's plain, set its pfid here, + * otherwise ll_lock_cancel_bits() can't find the master inode. + */ + lli->lli_pfid = body->mbo_fid1; } RETURN(inode); @@ -1369,6 +1548,9 @@ static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md) ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid)); lsm_md_dump(D_INODE, lsm); + if (!lmv_dir_striped(lsm)) + goto out; + /* XXX sigh, this lsm_root initialization should be in * LMV layer, but it needs ll_iget right now, so we * put this here right now. */ @@ -1396,16 +1578,54 @@ static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md) return rc; } } - +out: lli->lli_lsm_md = lsm; return 0; } +static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md) +{ + struct ll_inode_info *lli = ll_i2info(inode); + + if (!md->default_lmv) { + /* clear default lsm */ + if (lli->lli_default_lsm_md) { + down_write(&lli->lli_lsm_sem); + if (lli->lli_default_lsm_md) { + lmv_free_memmd(lli->lli_default_lsm_md); + lli->lli_default_lsm_md = NULL; + } + up_write(&lli->lli_lsm_sem); + } + return; + } + + if (lli->lli_default_lsm_md) { + /* do nonthing if default lsm isn't changed */ + down_read(&lli->lli_lsm_sem); + if (lli->lli_default_lsm_md && + lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) { + up_read(&lli->lli_lsm_sem); + return; + } + up_read(&lli->lli_lsm_sem); + } + + down_write(&lli->lli_lsm_sem); + if (lli->lli_default_lsm_md) + lmv_free_memmd(lli->lli_default_lsm_md); + lli->lli_default_lsm_md = md->default_lmv; + lsm_md_dump(D_INODE, md->default_lmv); + md->default_lmv = NULL; + up_write(&lli->lli_lsm_sem); +} + static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md) { struct ll_inode_info *lli = ll_i2info(inode); struct lmv_stripe_md *lsm = md->lmv; + struct cl_attr *attr; int rc = 0; ENTRY; @@ -1414,6 +1634,16 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md) CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md, PFID(ll_inode2fid(inode))); + /* update default LMV */ + if (md->default_lmv) + ll_update_default_lsm_md(inode, md); + + /* after dir migration/restripe, a stripe may be turned into a + * directory, in this case, zero out its lli_pfid. + */ + if (unlikely(fid_is_norm(&lli->lli_pfid))) + fid_zero(&lli->lli_pfid); + /* * no striped information from request, lustre_md from req does not * include stripeEA, see ll_md_setattr() @@ -1425,76 +1655,63 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md) * normally dir layout doesn't change, only take read lock to check * that to avoid blocking other MD operations. */ - if (lli->lli_lsm_md) - down_read(&lli->lli_lsm_sem); - else - down_write(&lli->lli_lsm_sem); + down_read(&lli->lli_lsm_sem); - /* - * if dir layout mismatch, check whether version is increased, which - * means layout is changed, this happens in dir migration and lfsck. + /* some current lookup initialized lsm, and unchanged */ + if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm)) + GOTO(unlock, rc = 0); + + /* if dir layout doesn't match, check whether version is increased, + * which means layout is changed, this happens in dir split/merge and + * lfsck. * * foreign LMV should not change. */ - if (lli->lli_lsm_md && - lli->lli_lsm_md->lsm_md_magic != LMV_MAGIC_FOREIGN && - !lsm_md_eq(lli->lli_lsm_md, lsm)) { - if (lsm->lsm_md_layout_version <= - lli->lli_lsm_md->lsm_md_layout_version) { - CERROR("%s: "DFID" dir layout mismatch:\n", - ll_i2sbi(inode)->ll_fsname, - PFID(&lli->lli_fid)); - lsm_md_dump(D_ERROR, lli->lli_lsm_md); - lsm_md_dump(D_ERROR, lsm); - GOTO(unlock, rc = -EINVAL); - } + if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) && + lsm->lsm_md_layout_version <= + lli->lli_lsm_md->lsm_md_layout_version) { + CERROR("%s: "DFID" dir layout mismatch:\n", + ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid)); + lsm_md_dump(D_ERROR, lli->lli_lsm_md); + lsm_md_dump(D_ERROR, lsm); + GOTO(unlock, rc = -EINVAL); + } - /* layout changed, switch to write lock */ - up_read(&lli->lli_lsm_sem); - down_write(&lli->lli_lsm_sem); - ll_dir_clear_lsm_md(inode); + up_read(&lli->lli_lsm_sem); + down_write(&lli->lli_lsm_sem); + /* clear existing lsm */ + if (lli->lli_lsm_md) { + lmv_free_memmd(lli->lli_lsm_md); + lli->lli_lsm_md = NULL; } - /* set directory layout */ - if (!lli->lli_lsm_md) { - struct cl_attr *attr; + rc = ll_init_lsm_md(inode, md); + up_write(&lli->lli_lsm_sem); - if (lsm->lsm_md_magic == LMV_MAGIC_FOREIGN) { - /* set md->lmv to NULL, so the following free lustre_md - * will not free this lsm */ - md->lmv = NULL; - lli->lli_lsm_md = lsm; - up_write(&lli->lli_lsm_sem); - RETURN(0); - } + if (rc) + RETURN(rc); - rc = ll_init_lsm_md(inode, md); - up_write(&lli->lli_lsm_sem); - if (rc != 0) - RETURN(rc); + /* set md->lmv to NULL, so the following free lustre_md will not free + * this lsm. + */ + md->lmv = NULL; - /* set md->lmv to NULL, so the following free lustre_md - * will not free this lsm */ - md->lmv = NULL; + /* md_merge_attr() may take long, since lsm is already set, switch to + * read lock. + */ + down_read(&lli->lli_lsm_sem); - /* - * md_merge_attr() may take long, since lsm is already set, - * switch to read lock. - */ - down_read(&lli->lli_lsm_sem); + if (!lmv_dir_striped(lli->lli_lsm_md)) + GOTO(unlock, rc = 0); - OBD_ALLOC_PTR(attr); - if (attr == NULL) - GOTO(unlock, rc = -ENOMEM); - - /* validate the lsm */ - rc = md_merge_attr(ll_i2mdexp(inode), lsm, attr, - ll_md_blocking_ast); - if (rc != 0) { - OBD_FREE_PTR(attr); - GOTO(unlock, rc); - } + OBD_ALLOC_PTR(attr); + if (!attr) + GOTO(unlock, rc = -ENOMEM); + /* validate the lsm */ + rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr, + ll_md_blocking_ast); + if (!rc) { if (md->body->mbo_valid & OBD_MD_FLNLINK) md->body->mbo_nlink = attr->cat_nlink; if (md->body->mbo_valid & OBD_MD_FLSIZE) @@ -1505,13 +1722,14 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md) md->body->mbo_ctime = attr->cat_ctime; if (md->body->mbo_valid & OBD_MD_FLMTIME) md->body->mbo_mtime = attr->cat_mtime; - - OBD_FREE_PTR(attr); } + + OBD_FREE_PTR(attr); + GOTO(unlock, rc); unlock: up_read(&lli->lli_lsm_sem); - RETURN(rc); + return rc; } void ll_clear_inode(struct inode *inode) @@ -1554,13 +1772,8 @@ void ll_clear_inode(struct inode *inode) ll_xattr_cache_destroy(inode); -#ifdef CONFIG_FS_POSIX_ACL forget_all_cached_acls(inode); - if (lli->lli_posix_acl) { - posix_acl_release(lli->lli_posix_acl); - lli->lli_posix_acl = NULL; - } -#endif + lli_clear_acl(lli); lli->lli_inode_magic = LLI_INODE_DEAD; if (S_ISDIR(inode->i_mode)) @@ -1574,6 +1787,8 @@ void ll_clear_inode(struct inode *inode) */ cl_inode_fini(inode); + llcrypt_put_encryption_info(inode); + EXIT; } @@ -1611,8 +1826,8 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data) RETURN(rc); } - rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp, - sbi->ll_md_exp, &md); + rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp, + sbi->ll_md_exp, &md); if (rc) { ptlrpc_req_finished(request); RETURN(rc); @@ -1635,6 +1850,162 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data) RETURN(rc); } +/** + * Zero portion of page that is part of @inode. + * This implies, if necessary: + * - taking cl_lock on range corresponding to concerned page + * - grabbing vm page + * - associating cl_page + * - proceeding to clio read + * - zeroing range in page + * - proceeding to cl_page flush + * - releasing cl_lock + * + * \param[in] inode inode + * \param[in] index page index + * \param[in] offset offset in page to start zero from + * \param[in] len len to zero + * + * \retval 0 on success + * \retval negative errno on failure + */ +int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset, + unsigned len) +{ + struct ll_inode_info *lli = ll_i2info(inode); + struct cl_object *clob = lli->lli_clob; + __u16 refcheck; + struct lu_env *env = NULL; + struct cl_io *io = NULL; + struct cl_page *clpage = NULL; + struct page *vmpage = NULL; + unsigned from = index << PAGE_SHIFT; + struct cl_lock *lock = NULL; + struct cl_lock_descr *descr = NULL; + struct cl_2queue *queue = NULL; + struct cl_sync_io *anchor = NULL; + bool holdinglock = false; + bool lockedbymyself = true; + int rc; + + ENTRY; + + env = cl_env_get(&refcheck); + if (IS_ERR(env)) + RETURN(PTR_ERR(env)); + + io = vvp_env_thread_io(env); + io->ci_obj = clob; + rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE); + if (rc) + GOTO(putenv, rc); + + lock = vvp_env_lock(env); + descr = &lock->cll_descr; + descr->cld_obj = io->ci_obj; + descr->cld_start = cl_index(io->ci_obj, from); + descr->cld_end = cl_index(io->ci_obj, from + PAGE_SIZE - 1); + descr->cld_mode = CLM_WRITE; + descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK; + + /* request lock for page */ + rc = cl_lock_request(env, io, lock); + /* -ECANCELED indicates a matching lock with a different extent + * was already present, and -EEXIST indicates a matching lock + * on exactly the same extent was already present. + * In both cases it means we are covered. + */ + if (rc == -ECANCELED || rc == -EEXIST) + rc = 0; + else if (rc < 0) + GOTO(iofini, rc); + else + holdinglock = true; + + /* grab page */ + vmpage = grab_cache_page_nowait(inode->i_mapping, index); + if (vmpage == NULL) + GOTO(rellock, rc = -EOPNOTSUPP); + + if (!PageDirty(vmpage)) { + /* associate cl_page */ + clpage = cl_page_find(env, clob, vmpage->index, + vmpage, CPT_CACHEABLE); + if (IS_ERR(clpage)) + GOTO(pagefini, rc = PTR_ERR(clpage)); + + cl_page_assume(env, io, clpage); + } + + if (!PageUptodate(vmpage) && !PageDirty(vmpage) && + !PageWriteback(vmpage)) { + /* read page */ + /* set PagePrivate2 to detect special case of empty page + * in osc_brw_fini_request() + */ + SetPagePrivate2(vmpage); + rc = ll_io_read_page(env, io, clpage, NULL); + if (!PagePrivate2(vmpage)) + /* PagePrivate2 was cleared in osc_brw_fini_request() + * meaning we read an empty page. In this case, in order + * to avoid allocating unnecessary block in truncated + * file, we must not zero and write as below. Subsequent + * server-side truncate will handle things correctly. + */ + GOTO(clpfini, rc = 0); + ClearPagePrivate2(vmpage); + if (rc) + GOTO(clpfini, rc); + lockedbymyself = trylock_page(vmpage); + cl_page_assume(env, io, clpage); + } + + /* zero range in page */ + zero_user(vmpage, offset, len); + + if (holdinglock && clpage) { + /* explicitly write newly modified page */ + queue = &io->ci_queue; + cl_2queue_init(queue); + anchor = &vvp_env_info(env)->vti_anchor; + cl_sync_io_init(anchor, 1); + clpage->cp_sync_io = anchor; + cl_2queue_add(queue, clpage); + rc = cl_io_submit_rw(env, io, CRT_WRITE, queue); + if (rc) + GOTO(queuefini1, rc); + rc = cl_sync_io_wait(env, anchor, 0); + if (rc) + GOTO(queuefini2, rc); + cl_page_assume(env, io, clpage); + +queuefini2: + cl_2queue_discard(env, io, queue); +queuefini1: + cl_2queue_disown(env, io, queue); + cl_2queue_fini(env, queue); + } + +clpfini: + if (clpage) + cl_page_put(env, clpage); +pagefini: + if (lockedbymyself) { + unlock_page(vmpage); + put_page(vmpage); + } +rellock: + if (holdinglock) + cl_lock_release(env, lock); +iofini: + cl_io_fini(env, io); +putenv: + if (env) + cl_env_put(env, &refcheck); + + RETURN(rc); +} + /* If this inode has objects allocated to it (lsm != NULL), then the OST * object(s) determine the file size and mtime. Otherwise, the MDS will * keep these values until such a time that objects are allocated for it. @@ -1656,6 +2027,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, struct inode *inode = dentry->d_inode; struct ll_inode_info *lli = ll_i2info(inode); struct md_op_data *op_data = NULL; + ktime_t kstart = ktime_get(); int rc = 0; ENTRY; @@ -1688,7 +2060,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */ if (attr->ia_valid & TIMES_SET_FLAGS) { if ((!uid_eq(current_fsuid(), inode->i_uid)) && - !cfs_capable(CFS_CAP_FOWNER)) + !capable(CAP_FOWNER)) RETURN(-EPERM); } @@ -1714,11 +2086,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec, ktime_get_real_seconds()); - if (S_ISREG(inode->i_mode)) { - if (attr->ia_valid & ATTR_SIZE) - inode_dio_write_done(inode); + if (S_ISREG(inode->i_mode)) inode_unlock(inode); - } /* We always do an MDS RPC, even if we're only changing the size; * only the MDS knows whether truncate() should fail with -ETXTBUSY */ @@ -1733,11 +2102,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, */ xvalid |= OP_XVALID_OWNEROVERRIDE; op_data->op_bias |= MDS_DATA_MODIFIED; - ll_file_clear_flag(lli, LLIF_DATA_MODIFIED); + clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags); } if (attr->ia_valid & ATTR_FILE) { - struct ll_file_data *fd = LUSTRE_FPRIVATE(attr->ia_file); + struct ll_file_data *fd = attr->ia_file->private_data; if (fd->fd_lease_och) op_data->op_bias |= MDS_TRUNC_KEEP_LEASE; @@ -1768,6 +2137,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, GOTO(out, rc); } } else { + unsigned int flags = 0; + /* For truncate and utimes sending attributes to OSTs, * setting mtime/atime to the past will be performed * under PW [0:EOF] extent lock (new_size:EOF for @@ -1776,7 +2147,30 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, * it is necessary due to possible time * de-synchronization between MDT inode and OST objects */ - rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, 0); + if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) { + xvalid |= OP_XVALID_FLAGS; + flags = LUSTRE_ENCRYPT_FL; + /* Call to ll_io_zero_page is not necessary if + * truncating on PAGE_SIZE boundary, because + * whole pages will be wiped. + * In case of Direct IO, all we need is to set + * new size. + */ + if (attr->ia_valid & ATTR_SIZE && + attr->ia_size & ~PAGE_MASK && + !(attr->ia_valid & ATTR_FILE && + attr->ia_file->f_flags & O_DIRECT)) { + pgoff_t offset = + attr->ia_size & (PAGE_SIZE - 1); + + rc = ll_io_zero_page(inode, + attr->ia_size >> PAGE_SHIFT, + offset, PAGE_SIZE - offset); + if (rc) + GOTO(out, rc); + } + } + rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags); } } @@ -1793,7 +2187,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()). * This way we can save an RPC for common open + trunc * operation. */ - if (ll_file_test_and_clear_flag(lli, LLIF_DATA_MODIFIED)) { + if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) { struct hsm_state_set hss = { .hss_valid = HSS_SETMASK, .hss_setmask = HS_DIRTY, @@ -1828,8 +2222,10 @@ out: inode_has_no_xattr(inode); } - ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ? - LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1); + if (!rc) + ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ? + LPROC_LL_TRUNC : LPROC_LL_SETATTR, + ktime_us_delta(ktime_get(), kstart)); return rc; } @@ -1838,6 +2234,11 @@ int ll_setattr(struct dentry *de, struct iattr *attr) { int mode = de->d_inode->i_mode; enum op_xvalid xvalid = 0; + int rc; + + rc = llcrypt_prepare_setattr(de, attr); + if (rc) + return rc; if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) == (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) @@ -1873,7 +2274,10 @@ int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs, int rc; ENTRY; - max_age = ktime_get_seconds() - OBD_STATFS_CACHE_SECONDS; + max_age = ktime_get_seconds() - sbi->ll_statfs_max_age; + + if (sbi->ll_flags & LL_SBI_LAZYSTATFS) + flags |= OBD_STATFS_NODELAY; rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags); if (rc) @@ -1884,12 +2288,9 @@ int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs, CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n", osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files); - if (osfs->os_state & OS_STATE_SUM) + if (osfs->os_state & OS_STATFS_SUM) GOTO(out, rc); - if (sbi->ll_flags & LL_SBI_LAZYSTATFS) - flags |= OBD_STATFS_NODELAY; - rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags); if (rc) /* Possibly a filesystem with no OSTs. Report MDT totals. */ GOTO(out, rc = 0); @@ -1917,15 +2318,62 @@ int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs, out: RETURN(rc); } + +static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs) +{ + struct if_quotactl qctl = { + .qc_cmd = LUSTRE_Q_GETQUOTA, + .qc_type = PRJQUOTA, + .qc_valid = QC_GENERAL, + }; + u64 limit, curblock; + int ret; + + qctl.qc_id = ll_i2info(inode)->lli_projid; + ret = quotactl_ioctl(ll_i2sbi(inode), &qctl); + if (ret) { + /* ignore errors if project ID does not have + * a quota limit or feature unsupported. + */ + if (ret == -ESRCH || ret == -EOPNOTSUPP) + ret = 0; + return ret; + } + + limit = ((qctl.qc_dqblk.dqb_bsoftlimit ? + qctl.qc_dqblk.dqb_bsoftlimit : + qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize; + if (limit && sfs->f_blocks > limit) { + curblock = (qctl.qc_dqblk.dqb_curspace + + sfs->f_bsize - 1) / sfs->f_bsize; + sfs->f_blocks = limit; + sfs->f_bfree = sfs->f_bavail = + (sfs->f_blocks > curblock) ? + (sfs->f_blocks - curblock) : 0; + } + + limit = qctl.qc_dqblk.dqb_isoftlimit ? + qctl.qc_dqblk.dqb_isoftlimit : + qctl.qc_dqblk.dqb_ihardlimit; + if (limit && sfs->f_files > limit) { + sfs->f_files = limit; + sfs->f_ffree = (sfs->f_files > + qctl.qc_dqblk.dqb_curinodes) ? + (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0; + } + + return 0; +} + int ll_statfs(struct dentry *de, struct kstatfs *sfs) { struct super_block *sb = de->d_sb; struct obd_statfs osfs; __u64 fsid = huge_encode_dev(sb->s_dev); + ktime_t kstart = ktime_get(); int rc; - CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64()); - ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1); + CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb); /* Some amount of caching on the client is allowed */ rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM); @@ -1948,11 +2396,17 @@ int ll_statfs(struct dentry *de, struct kstatfs *sfs) } } - sfs->f_blocks = osfs.os_blocks; - sfs->f_bfree = osfs.os_bfree; - sfs->f_bavail = osfs.os_bavail; + sfs->f_blocks = osfs.os_blocks; + sfs->f_bfree = osfs.os_bfree; + sfs->f_bavail = osfs.os_bavail; sfs->f_fsid.val[0] = (__u32)fsid; sfs->f_fsid.val[1] = (__u32)(fsid >> 32); + if (ll_i2info(de->d_inode)->lli_projid) + return ll_statfs_project(de->d_inode, sfs); + + ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS, + ktime_us_delta(ktime_get(), kstart)); + return 0; } @@ -1974,13 +2428,15 @@ void ll_inode_size_unlock(struct inode *inode) mutex_unlock(&lli->lli_size_mutex); } -void ll_update_inode_flags(struct inode *inode, int ext_flags) +void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags) { + /* do not clear encryption flag */ + ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL; inode->i_flags = ll_ext_to_inode_flags(ext_flags); if (ext_flags & LUSTRE_PROJINHERIT_FL) - ll_file_set_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT); + set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags); else - ll_file_clear_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT); + clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags); } int ll_update_inode(struct inode *inode, struct lustre_md *md) @@ -2002,15 +2458,9 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md) return rc; } -#ifdef CONFIG_FS_POSIX_ACL - if (body->mbo_valid & OBD_MD_FLACL) { - spin_lock(&lli->lli_lock); - if (lli->lli_posix_acl) - posix_acl_release(lli->lli_posix_acl); - lli->lli_posix_acl = md->posix_acl; - spin_unlock(&lli->lli_lock); - } -#endif + if (body->mbo_valid & OBD_MD_FLACL) + lli_replace_acl(lli, md); + inode->i_ino = cl_fid_build_ino(&body->mbo_fid1, sbi->ll_flags & LL_SBI_32BIT_API); inode->i_generation = cl_fid_build_gen(&body->mbo_fid1); @@ -2038,6 +2488,9 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md) lli->lli_ctime = body->mbo_ctime; } + if (body->mbo_valid & OBD_MD_FLBTIME) + lli->lli_btime = body->mbo_btime; + /* Clear i_flags to remove S_NOSEC before permissions are updated */ if (body->mbo_valid & OBD_MD_FLFLAGS) ll_update_inode_flags(inode, body->mbo_flags); @@ -2050,12 +2503,6 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md) (body->mbo_mode & S_IFMT); LASSERT(inode->i_mode != 0); - if (S_ISREG(inode->i_mode)) - inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, - LL_MAX_BLKSIZE_BITS); - else - inode->i_blkbits = inode->i_sb->s_blocksize_bits; - if (body->mbo_valid & OBD_MD_FLUID) inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid); if (body->mbo_valid & OBD_MD_FLGID) @@ -2082,6 +2529,7 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md) LASSERT(fid_seq(&lli->lli_fid) != 0); + lli->lli_attr_valid = body->mbo_valid; if (body->mbo_valid & OBD_MD_FLSIZE) { i_size_write(inode, body->mbo_size); @@ -2091,6 +2539,11 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md) if (body->mbo_valid & OBD_MD_FLBLOCKS) inode->i_blocks = body->mbo_blocks; + } else { + if (body->mbo_valid & OBD_MD_FLLAZYSIZE) + lli->lli_lazysize = body->mbo_size; + if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS) + lli->lli_lazyblocks = body->mbo_blocks; } if (body->mbo_valid & OBD_MD_TSTATE) { @@ -2099,14 +2552,42 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md) * glimpsing updated attrs */ if (body->mbo_t_state & MS_RESTORE) - ll_file_set_flag(lli, LLIF_FILE_RESTORING); + set_bit(LLIF_FILE_RESTORING, &lli->lli_flags); else - ll_file_clear_flag(lli, LLIF_FILE_RESTORING); + clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags); } return 0; } +void ll_truncate_inode_pages_final(struct inode *inode) +{ + struct address_space *mapping = &inode->i_data; + unsigned long nrpages; + unsigned long flags; + + truncate_inode_pages_final(mapping); + + /* Workaround for LU-118: Note nrpages may not be totally updated when + * truncate_inode_pages() returns, as there can be a page in the process + * of deletion (inside __delete_from_page_cache()) in the specified + * range. Thus mapping->nrpages can be non-zero when this function + * returns even after truncation of the whole mapping. Only do this if + * npages isn't already zero. + */ + nrpages = mapping->nrpages; + if (nrpages) { + ll_xa_lock_irqsave(&mapping->i_pages, flags); + nrpages = mapping->nrpages; + ll_xa_unlock_irqrestore(&mapping->i_pages, flags); + } /* Workaround end */ + + LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, " + "see https://jira.whamcloud.com/browse/LU-118\n", + ll_i2sbi(inode)->ll_fsname, + PFID(ll_inode2fid(inode)), inode, nrpages); +} + int ll_read_inode2(struct inode *inode, void *opaque) { struct lustre_md *md = opaque; @@ -2164,8 +2645,6 @@ int ll_read_inode2(struct inode *inode, void *opaque) void ll_delete_inode(struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); - struct address_space *mapping = &inode->i_data; - unsigned long nrpages; ENTRY; if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) { @@ -2179,30 +2658,9 @@ void ll_delete_inode(struct inode *inode) cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ? CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1); } - truncate_inode_pages_final(mapping); - - /* Workaround for LU-118: Note nrpages may not be totally updated when - * truncate_inode_pages() returns, as there can be a page in the process - * of deletion (inside __delete_from_page_cache()) in the specified - * range. Thus mapping->nrpages can be non-zero when this function - * returns even after truncation of the whole mapping. Only do this if - * npages isn't already zero. - */ - nrpages = mapping->nrpages; - if (nrpages) { - xa_lock_irq(&mapping->i_pages); - nrpages = mapping->nrpages; - xa_unlock_irq(&mapping->i_pages); - } /* Workaround end */ - - LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, " - "see https://jira.whamcloud.com/browse/LU-118\n", - ll_i2sbi(inode)->ll_fsname, - PFID(ll_inode2fid(inode)), inode, nrpages); -#ifdef HAVE_SBOPS_EVICT_INODE + ll_truncate_inode_pages_final(inode); ll_clear_inode(inode); -#endif clear_inode(inode); EXIT; @@ -2258,7 +2716,8 @@ int ll_iocontrol(struct inode *inode, struct file *file, if (flags & LUSTRE_PROJINHERIT_FL) fa.fsx_xflags = FS_XFLAG_PROJINHERIT; - rc = ll_ioctl_check_project(inode, &fa); + rc = ll_ioctl_check_project(inode, fa.fsx_xflags, + fa.fsx_projid); if (rc) RETURN(rc); @@ -2319,8 +2778,7 @@ void ll_umount_begin(struct super_block *sb) struct ll_sb_info *sbi = ll_s2sbi(sb); struct obd_device *obd; struct obd_ioctl_data *ioc_data; - struct l_wait_info lwi; - wait_queue_head_t waitq; + int cnt; ENTRY; CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb, @@ -2335,16 +2793,16 @@ void ll_umount_begin(struct super_block *sb) } obd->obd_force = 1; - obd = class_exp2obd(sbi->ll_dt_exp); - if (obd == NULL) { + obd = class_exp2obd(sbi->ll_dt_exp); + if (obd == NULL) { CERROR("Invalid LOV connection handle %#llx\n", - sbi->ll_dt_exp->exp_handle.h_cookie); - EXIT; - return; - } - obd->obd_force = 1; + sbi->ll_dt_exp->exp_handle.h_cookie); + EXIT; + return; + } + obd->obd_force = 1; - OBD_ALLOC_PTR(ioc_data); + OBD_ALLOC_PTR(ioc_data); if (ioc_data) { obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp, sizeof *ioc_data, ioc_data, NULL); @@ -2359,44 +2817,46 @@ void ll_umount_begin(struct super_block *sb) * and then continue. For now, we just periodically checking for vfs * to decrement mnt_cnt and hope to finish it within 10sec. */ - init_waitqueue_head(&waitq); - lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(10), - cfs_time_seconds(1), NULL, NULL); - l_wait_event(waitq, may_umount(sbi->ll_mnt.mnt), &lwi); + cnt = 10; + while (cnt > 0 && + !may_umount(sbi->ll_mnt.mnt)) { + ssleep(1); + cnt -= 1; + } EXIT; } int ll_remount_fs(struct super_block *sb, int *flags, char *data) { - struct ll_sb_info *sbi = ll_s2sbi(sb); - char *profilenm = get_profile_name(sb); - int err; - __u32 read_only; - - if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { - read_only = *flags & MS_RDONLY; - err = obd_set_info_async(NULL, sbi->ll_md_exp, - sizeof(KEY_READ_ONLY), - KEY_READ_ONLY, sizeof(read_only), - &read_only, NULL); - if (err) { - LCONSOLE_WARN("Failed to remount %s %s (%d)\n", - profilenm, read_only ? - "read-only" : "read-write", err); - return err; - } + struct ll_sb_info *sbi = ll_s2sbi(sb); + char *profilenm = get_profile_name(sb); + int err; + __u32 read_only; + + if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) { + read_only = *flags & MS_RDONLY; + err = obd_set_info_async(NULL, sbi->ll_md_exp, + sizeof(KEY_READ_ONLY), + KEY_READ_ONLY, sizeof(read_only), + &read_only, NULL); + if (err) { + LCONSOLE_WARN("Failed to remount %s %s (%d)\n", + profilenm, read_only ? + "read-only" : "read-write", err); + return err; + } - if (read_only) - sb->s_flags |= MS_RDONLY; - else - sb->s_flags &= ~MS_RDONLY; + if (read_only) + sb->s_flags |= SB_RDONLY; + else + sb->s_flags &= ~SB_RDONLY; - if (sbi->ll_flags & LL_SBI_VERBOSE) - LCONSOLE_WARN("Remounted %s %s\n", profilenm, - read_only ? "read-only" : "read-write"); - } - return 0; + if (sbi->ll_flags & LL_SBI_VERBOSE) + LCONSOLE_WARN("Remounted %s %s\n", profilenm, + read_only ? "read-only" : "read-write"); + } + return 0; } /** @@ -2414,7 +2874,7 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data) * \param[in] sb super block for this file-system * \param[in] open_req pointer to the original open request */ -void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req) +void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill) { struct mdt_body *body; struct md_op_data *op_data; @@ -2422,7 +2882,7 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req) struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp; ENTRY; - body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY); + body = req_capsule_server_get(pill, &RMF_MDT_BODY); OBD_ALLOC_PTR(op_data); if (op_data == NULL) { CWARN("%s: cannot allocate op_data to release open handle for " @@ -2441,21 +2901,32 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req) EXIT; } -int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, +int ll_prep_inode(struct inode **inode, struct req_capsule *pill, struct super_block *sb, struct lookup_intent *it) { struct ll_sb_info *sbi = NULL; struct lustre_md md = { NULL }; + bool default_lmv_deleted = false; int rc; + ENTRY; LASSERT(*inode || sb); sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode); - rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp, + rc = md_get_lustre_md(sbi->ll_md_exp, pill, sbi->ll_dt_exp, sbi->ll_md_exp, &md); if (rc != 0) GOTO(out, rc); + /* + * clear default_lmv only if intent_getattr reply doesn't contain it. + * but it needs to be done after iget, check this early because + * ll_update_lsm_md() may change md. + */ + if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) && + S_ISDIR(md.body->mbo_mode) && !md.default_lmv) + default_lmv_deleted = true; + if (*inode) { rc = ll_update_inode(*inode, &md); if (rc != 0) @@ -2478,12 +2949,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, sbi->ll_flags & LL_SBI_32BIT_API), &md); if (IS_ERR(*inode)) { -#ifdef CONFIG_FS_POSIX_ACL - if (md.posix_acl) { - posix_acl_release(md.posix_acl); - md.posix_acl = NULL; - } -#endif + lmd_clear_acl(&md); rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM; *inode = NULL; CERROR("new_inode -fatal: rc %d\n", rc); @@ -2519,58 +2985,70 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, LDLM_LOCK_PUT(lock); } + if (default_lmv_deleted) + ll_update_default_lsm_md(*inode, &md); + + /* we may want to apply some policy for foreign file/dir */ + if (ll_sbi_has_foreign_symlink(sbi)) { + rc = ll_manage_foreign(*inode, &md); + if (rc < 0) + GOTO(out, rc); + } + GOTO(out, rc = 0); out: /* cleanup will be done if necessary */ md_free_lustre_md(sbi->ll_md_exp, &md); - if (rc != 0 && it != NULL && it->it_op & IT_OPEN) - ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, req); + if (rc != 0 && it != NULL && it->it_op & IT_OPEN) { + ll_intent_drop_lock(it); + ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, pill); + } return rc; } int ll_obd_statfs(struct inode *inode, void __user *arg) { - struct ll_sb_info *sbi = NULL; - struct obd_export *exp; - char *buf = NULL; - struct obd_ioctl_data *data = NULL; - __u32 type; - int len = 0, rc; - - if (!inode || !(sbi = ll_i2sbi(inode))) - GOTO(out_statfs, rc = -EINVAL); - - rc = obd_ioctl_getdata(&buf, &len, arg); - if (rc) - GOTO(out_statfs, rc); - - data = (void*)buf; - if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 || - !data->ioc_pbuf1 || !data->ioc_pbuf2) - GOTO(out_statfs, rc = -EINVAL); - - if (data->ioc_inllen1 != sizeof(__u32) || - data->ioc_inllen2 != sizeof(__u32) || - data->ioc_plen1 != sizeof(struct obd_statfs) || - data->ioc_plen2 != sizeof(struct obd_uuid)) - GOTO(out_statfs, rc = -EINVAL); - - memcpy(&type, data->ioc_inlbuf1, sizeof(__u32)); + struct ll_sb_info *sbi = NULL; + struct obd_export *exp; + struct obd_ioctl_data *data = NULL; + __u32 type; + int len = 0, rc; + + if (inode) + sbi = ll_i2sbi(inode); + if (!sbi) + GOTO(out_statfs, rc = -EINVAL); + + rc = obd_ioctl_getdata(&data, &len, arg); + if (rc) + GOTO(out_statfs, rc); + + if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 || + !data->ioc_pbuf1 || !data->ioc_pbuf2) + GOTO(out_statfs, rc = -EINVAL); + + if (data->ioc_inllen1 != sizeof(__u32) || + data->ioc_inllen2 != sizeof(__u32) || + data->ioc_plen1 != sizeof(struct obd_statfs) || + data->ioc_plen2 != sizeof(struct obd_uuid)) + GOTO(out_statfs, rc = -EINVAL); + + memcpy(&type, data->ioc_inlbuf1, sizeof(__u32)); if (type & LL_STATFS_LMV) - exp = sbi->ll_md_exp; + exp = sbi->ll_md_exp; else if (type & LL_STATFS_LOV) - exp = sbi->ll_dt_exp; - else - GOTO(out_statfs, rc = -ENODEV); + exp = sbi->ll_dt_exp; + else + GOTO(out_statfs, rc = -ENODEV); - rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL); - if (rc) - GOTO(out_statfs, rc); + rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL); + if (rc) + GOTO(out_statfs, rc); out_statfs: - OBD_FREE_LARGE(buf, len); + OBD_FREE_LARGE(data, len); return rc; } @@ -2581,12 +3059,12 @@ out_statfs: void ll_unlock_md_op_lsm(struct md_op_data *op_data) { if (op_data->op_mea2_sem) { - up_read(op_data->op_mea2_sem); + up_read_non_owner(op_data->op_mea2_sem); op_data->op_mea2_sem = NULL; } if (op_data->op_mea1_sem) { - up_read(op_data->op_mea1_sem); + up_read_non_owner(op_data->op_mea1_sem); op_data->op_mea1_sem = NULL; } } @@ -2595,7 +3073,8 @@ void ll_unlock_md_op_lsm(struct md_op_data *op_data) struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, struct inode *i1, struct inode *i2, const char *name, size_t namelen, - __u32 mode, __u32 opc, void *data) + __u32 mode, enum md_op_code opc, + void *data) { LASSERT(i1 != NULL); @@ -2607,7 +3086,9 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, if (namelen > ll_i2sbi(i1)->ll_namelen) return ERR_PTR(-ENAMETOOLONG); - if (!lu_name_is_valid_2(name, namelen)) + /* "/" is not valid name, but it's allowed */ + if (!lu_name_is_valid_2(name, namelen) && + strncmp("/", name, namelen) != 0) return ERR_PTR(-EINVAL); } @@ -2619,22 +3100,23 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, ll_i2gids(op_data->op_suppgids, i1, i2); op_data->op_fid1 = *ll_inode2fid(i1); - op_data->op_default_stripe_offset = -1; + op_data->op_code = opc; if (S_ISDIR(i1->i_mode)) { - down_read(&ll_i2info(i1)->lli_lsm_sem); + down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem); op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem; op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md; - if (opc == LUSTRE_OPC_MKDIR) - op_data->op_default_stripe_offset = - ll_i2info(i1)->lli_def_stripe_offset; + op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md; } if (i2) { op_data->op_fid2 = *ll_inode2fid(i2); if (S_ISDIR(i2->i_mode)) { if (i2 != i1) { - down_read(&ll_i2info(i2)->lli_lsm_sem); + /* i2 is typically a child of i1, and MUST be + * further from the root to avoid deadlocks. + */ + down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem); op_data->op_mea2_sem = &ll_i2info(i2)->lli_lsm_sem; } @@ -2670,41 +3152,38 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, void ll_finish_md_op_data(struct md_op_data *op_data) { ll_unlock_md_op_lsm(op_data); - security_release_secctx(op_data->op_file_secctx, - op_data->op_file_secctx_size); - OBD_FREE_PTR(op_data); + ll_security_release_secctx(op_data->op_file_secctx, + op_data->op_file_secctx_size); + llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size); + OBD_FREE_PTR(op_data); } -#ifdef HAVE_SUPEROPS_USE_DENTRY int ll_show_options(struct seq_file *seq, struct dentry *dentry) -#else -int ll_show_options(struct seq_file *seq, struct vfsmount *vfs) -#endif { - struct ll_sb_info *sbi; + struct ll_sb_info *sbi; -#ifdef HAVE_SUPEROPS_USE_DENTRY - LASSERT((seq != NULL) && (dentry != NULL)); + LASSERT(seq && dentry); sbi = ll_s2sbi(dentry->d_sb); -#else - LASSERT((seq != NULL) && (vfs != NULL)); - sbi = ll_s2sbi(vfs->mnt_sb); -#endif - - if (sbi->ll_flags & LL_SBI_NOLCK) - seq_puts(seq, ",nolock"); - if (sbi->ll_flags & LL_SBI_FLOCK) - seq_puts(seq, ",flock"); + if (sbi->ll_flags & LL_SBI_NOLCK) + seq_puts(seq, ",nolock"); - if (sbi->ll_flags & LL_SBI_LOCALFLOCK) - seq_puts(seq, ",localflock"); + /* "flock" is the default since 2.13, but it wasn't for many years, + * so it is still useful to print this to show it is enabled. + * Start to print "noflock" so it is now clear when flock is disabled. + */ + if (sbi->ll_flags & LL_SBI_FLOCK) + seq_puts(seq, ",flock"); + else if (sbi->ll_flags & LL_SBI_LOCALFLOCK) + seq_puts(seq, ",localflock"); + else + seq_puts(seq, ",noflock"); - if (sbi->ll_flags & LL_SBI_USER_XATTR) - seq_puts(seq, ",user_xattr"); + if (sbi->ll_flags & LL_SBI_USER_XATTR) + seq_puts(seq, ",user_xattr"); - if (sbi->ll_flags & LL_SBI_LAZYSTATFS) - seq_puts(seq, ",lazystatfs"); + if (sbi->ll_flags & LL_SBI_LAZYSTATFS) + seq_puts(seq, ",lazystatfs"); if (sbi->ll_flags & LL_SBI_USER_FID2PATH) seq_puts(seq, ",user_fid2path"); @@ -2712,7 +3191,20 @@ int ll_show_options(struct seq_file *seq, struct vfsmount *vfs) if (sbi->ll_flags & LL_SBI_ALWAYS_PING) seq_puts(seq, ",always_ping"); - RETURN(0); + if (ll_sbi_has_test_dummy_encryption(sbi)) + seq_puts(seq, ",test_dummy_encryption"); + + if (ll_sbi_has_encrypt(sbi)) + seq_puts(seq, ",encrypt"); + else + seq_puts(seq, ",noencrypt"); + + if (sbi->ll_flags & LL_SBI_FOREIGN_SYMLINK) { + seq_puts(seq, ",foreign_symlink="); + seq_puts(seq, sbi->ll_foreign_symlink_prefix); + } + + RETURN(0); } /** @@ -2724,7 +3216,7 @@ int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg) struct obd_device *obd; ENTRY; - if (cmd == OBD_IOC_GETDTNAME) + if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME) obd = class_exp2obd(sbi->ll_dt_exp); else if (cmd == OBD_IOC_GETMDNAME) obd = class_exp2obd(sbi->ll_md_exp); @@ -2769,6 +3261,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret) path = ll_d_path(dentry, buf, PAGE_SIZE); } + /* The below message is checked in recovery-small.sh test_24b */ CDEBUG(D_WARNING, "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted " "(rc %d)\n", ll_i2sbi(inode)->ll_fsname, @@ -2797,12 +3290,12 @@ ssize_t ll_copy_user_md(const struct lov_user_md __user *md, if (lum_size < 0) RETURN(lum_size); - OBD_ALLOC(*kbuf, lum_size); + OBD_ALLOC_LARGE(*kbuf, lum_size); if (*kbuf == NULL) RETURN(-ENOMEM); if (copy_from_user(*kbuf, md, lum_size) != 0) { - OBD_FREE(*kbuf, lum_size); + OBD_FREE_LARGE(*kbuf, lum_size); RETURN(-EFAULT); } @@ -2821,7 +3314,7 @@ void ll_compute_rootsquash_state(struct ll_sb_info *sbi) struct lnet_process_id id; /* Update norootsquash flag */ - down_write(&squash->rsi_sem); + spin_lock(&squash->rsi_lock); if (list_empty(&squash->rsi_nosquash_nids)) sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH; else { @@ -2830,7 +3323,7 @@ void ll_compute_rootsquash_state(struct ll_sb_info *sbi) matched = false; i = 0; while (LNetGetId(i++, &id) != -ENOENT) { - if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND) + if (id.nid == LNET_NID_LO_0) continue; if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) { matched = true; @@ -2842,7 +3335,7 @@ void ll_compute_rootsquash_state(struct ll_sb_info *sbi) else sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH; } - up_write(&squash->rsi_sem); + spin_unlock(&squash->rsi_lock); } /** @@ -2913,7 +3406,7 @@ int ll_getparent(struct file *file, struct getparent __user *arg) ENTRY; - if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) && + if (!capable(CAP_DAC_READ_SEARCH) && !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH)) RETURN(-EPERM); @@ -2934,13 +3427,8 @@ int ll_getparent(struct file *file, struct getparent __user *arg) if (rc < 0) GOTO(ldata_free, rc); -#ifdef HAVE_XATTR_HANDLER_FLAGS rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf, buf.lb_len, OBD_MD_FLXATTR); -#else - rc = ll_getxattr(file_dentry(file), XATTR_NAME_LINK, buf.lb_buf, - buf.lb_len); -#endif /* HAVE_XATTR_HANDLER_FLAGS */ if (rc < 0) GOTO(lb_free, rc);