4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/llite/llite_lib.c
33 * Lustre Light Super operations
36 #define DEBUG_SUBSYSTEM S_LLITE
38 #include <linux/cpu.h>
39 #include <linux/module.h>
40 #include <linux/random.h>
41 #include <linux/statfs.h>
42 #include <linux/time.h>
43 #include <linux/types.h>
44 #include <libcfs/linux/linux-uuid.h>
45 #include <linux/version.h>
47 #include <linux/user_namespace.h>
48 #include <linux/delay.h>
49 #include <linux/uidgid.h>
50 #include <linux/fs_struct.h>
52 #ifndef HAVE_CPUS_READ_LOCK
53 #include <libcfs/linux/linux-cpu.h>
55 #include <uapi/linux/lustre/lustre_ioctl.h>
56 #ifdef HAVE_UAPI_LINUX_MOUNT_H
57 #include <uapi/linux/mount.h>
60 #include <lustre_ha.h>
61 #include <lustre_dlm.h>
62 #include <lprocfs_status.h>
63 #include <lustre_disk.h>
64 #include <uapi/linux/lustre/lustre_param.h>
65 #include <lustre_log.h>
66 #include <cl_object.h>
67 #include <obd_cksum.h>
68 #include "llite_internal.h"
70 struct kmem_cache *ll_file_data_slab;
73 #define log2(n) ffz(~(n))
77 * If there is only one number of core visible to Lustre,
78 * async readahead will be disabled, to avoid massive over
79 * subscription, we use 1/2 of active cores as default max
80 * async readahead requests.
82 static inline unsigned int ll_get_ra_async_max_active(void)
84 return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
87 static struct ll_sb_info *ll_init_sbi(void)
89 struct ll_sb_info *sbi = NULL;
91 unsigned long lru_page_max;
100 RETURN(ERR_PTR(-ENOMEM));
102 rc = pcc_super_init(&sbi->ll_pcc_super);
106 spin_lock_init(&sbi->ll_lock);
107 mutex_init(&sbi->ll_lco.lco_lock);
108 spin_lock_init(&sbi->ll_pp_extent_lock);
109 spin_lock_init(&sbi->ll_process_lock);
110 sbi->ll_rw_stats_on = 0;
111 sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
114 pages = si.totalram - si.totalhigh;
115 lru_page_max = pages / 2;
117 sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
118 sbi->ll_ra_info.ll_readahead_wq =
119 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
121 sbi->ll_ra_info.ra_async_max_active);
122 if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
123 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
125 /* initialize ll_cache data */
126 sbi->ll_cache = cl_cache_init(lru_page_max);
127 if (sbi->ll_cache == NULL)
128 GOTO(out_destroy_ra, rc = -ENOMEM);
130 /* initialize foreign symlink prefix path */
131 OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
132 if (sbi->ll_foreign_symlink_prefix == NULL)
133 GOTO(out_destroy_ra, rc = -ENOMEM);
134 memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
135 sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
137 /* initialize foreign symlink upcall path, none by default */
138 OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
139 if (sbi->ll_foreign_symlink_upcall == NULL)
140 GOTO(out_destroy_ra, rc = -ENOMEM);
141 memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
142 sbi->ll_foreign_symlink_upcall_items = NULL;
143 sbi->ll_foreign_symlink_upcall_nb_items = 0;
144 init_rwsem(&sbi->ll_foreign_symlink_sem);
145 /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
146 * not enabled by default
149 sbi->ll_ra_info.ra_max_pages =
150 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
151 sbi->ll_ra_info.ra_max_pages_per_file =
152 min(sbi->ll_ra_info.ra_max_pages / 4,
153 SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
154 sbi->ll_ra_info.ra_async_pages_per_file_threshold =
155 sbi->ll_ra_info.ra_max_pages_per_file;
156 sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
157 sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
158 atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
160 sbi->ll_flags |= LL_SBI_VERBOSE;
161 #ifdef ENABLE_CHECKSUM
162 sbi->ll_flags |= LL_SBI_CHECKSUM;
165 sbi->ll_flags |= LL_SBI_FLOCK;
168 #ifdef HAVE_LRU_RESIZE_SUPPORT
169 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
171 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
173 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
174 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
176 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
180 /* metadata statahead is enabled by default */
181 sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
182 sbi->ll_sa_max = LL_SA_RPC_DEF;
183 atomic_set(&sbi->ll_sa_total, 0);
184 atomic_set(&sbi->ll_sa_wrong, 0);
185 atomic_set(&sbi->ll_sa_running, 0);
186 atomic_set(&sbi->ll_agl_total, 0);
187 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
188 sbi->ll_flags |= LL_SBI_FAST_READ;
189 sbi->ll_flags |= LL_SBI_TINY_WRITE;
190 sbi->ll_flags |= LL_SBI_PARALLEL_DIO;
191 ll_sbi_set_encrypt(sbi, true);
194 sbi->ll_squash.rsi_uid = 0;
195 sbi->ll_squash.rsi_gid = 0;
196 INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
197 spin_lock_init(&sbi->ll_squash.rsi_lock);
199 /* Per-filesystem file heat */
200 sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
201 sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
203 /* Per-fs open heat level before requesting open lock */
204 sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT;
205 sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS;
206 sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS;
209 if (sbi->ll_foreign_symlink_prefix)
210 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
212 cl_cache_decref(sbi->ll_cache);
213 sbi->ll_cache = NULL;
215 destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
217 pcc_super_fini(&sbi->ll_pcc_super);
223 static void ll_free_sbi(struct super_block *sb)
225 struct ll_sb_info *sbi = ll_s2sbi(sb);
229 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
230 cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
231 if (sbi->ll_ra_info.ll_readahead_wq)
232 destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
233 if (sbi->ll_cache != NULL) {
234 cl_cache_decref(sbi->ll_cache);
235 sbi->ll_cache = NULL;
237 if (sbi->ll_foreign_symlink_prefix) {
238 OBD_FREE(sbi->ll_foreign_symlink_prefix,
239 sbi->ll_foreign_symlink_prefix_size);
240 sbi->ll_foreign_symlink_prefix = NULL;
242 if (sbi->ll_foreign_symlink_upcall) {
243 OBD_FREE(sbi->ll_foreign_symlink_upcall,
244 strlen(sbi->ll_foreign_symlink_upcall) +
246 sbi->ll_foreign_symlink_upcall = NULL;
248 if (sbi->ll_foreign_symlink_upcall_items) {
250 int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
251 struct ll_foreign_symlink_upcall_item *items =
252 sbi->ll_foreign_symlink_upcall_items;
254 for (i = 0 ; i < nb_items; i++)
255 if (items[i].type == STRING_TYPE)
256 OBD_FREE(items[i].string,
259 OBD_FREE_LARGE(items, nb_items *
260 sizeof(struct ll_foreign_symlink_upcall_item));
261 sbi->ll_foreign_symlink_upcall_items = NULL;
263 pcc_super_fini(&sbi->ll_pcc_super);
264 OBD_FREE(sbi, sizeof(*sbi));
269 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
271 struct inode *root = NULL;
272 struct ll_sb_info *sbi = ll_s2sbi(sb);
273 struct obd_statfs *osfs = NULL;
274 struct ptlrpc_request *request = NULL;
275 struct obd_connect_data *data = NULL;
276 struct obd_uuid *uuid;
277 struct md_op_data *op_data;
278 struct lustre_md lmd;
280 int size, err, checksum;
283 sbi->ll_md_obd = class_name2obd(md);
284 if (!sbi->ll_md_obd) {
285 CERROR("MD %s: not setup or attached\n", md);
299 /* pass client page size via ocd_grant_blkbits, the server should report
300 * back its backend blocksize for grant calculation purpose */
301 data->ocd_grant_blkbits = PAGE_SHIFT;
303 /* indicate MDT features supported by this client */
304 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
305 OBD_CONNECT_ATTRFID | OBD_CONNECT_GRANT |
306 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
307 OBD_CONNECT_SRVLOCK |
308 OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
309 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
310 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
311 OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
312 OBD_CONNECT_64BITHASH |
313 OBD_CONNECT_EINPROGRESS |
314 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
315 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
316 OBD_CONNECT_MAX_EASIZE |
317 OBD_CONNECT_FLOCK_DEAD |
318 OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
319 OBD_CONNECT_OPEN_BY_FID |
320 OBD_CONNECT_DIR_STRIPE |
321 OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
322 OBD_CONNECT_SUBTREE |
323 OBD_CONNECT_MULTIMODRPCS |
324 OBD_CONNECT_GRANT_PARAM |
325 OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
327 data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
328 OBD_CONNECT2_SUM_STATFS |
329 OBD_CONNECT2_OVERSTRIPING |
331 OBD_CONNECT2_LOCK_CONVERT |
332 OBD_CONNECT2_ARCHIVE_ID_ARRAY |
333 OBD_CONNECT2_INC_XID |
335 OBD_CONNECT2_ASYNC_DISCARD |
337 OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
338 OBD_CONNECT2_GETATTR_PFID |
339 OBD_CONNECT2_DOM_LVB |
340 OBD_CONNECT2_REP_MBITS |
341 OBD_CONNECT2_ATOMIC_OPEN_LOCK;
343 #ifdef HAVE_LRU_RESIZE_SUPPORT
344 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
345 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
347 data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
349 data->ocd_cksum_types = obd_cksum_types_supported_client();
351 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
352 /* flag mdc connection as lightweight, only used for test
353 * purpose, use with care */
354 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
356 data->ocd_ibits_known = MDS_INODELOCK_FULL;
357 data->ocd_version = LUSTRE_VERSION_CODE;
359 if (sb->s_flags & SB_RDONLY)
360 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
361 if (sbi->ll_flags & LL_SBI_USER_XATTR)
362 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
365 /* Setting this indicates we correctly support S_NOSEC (See kernel
366 * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
368 sb->s_flags |= SB_NOSEC;
370 sbi->ll_fop = ll_select_file_operations(sbi);
372 /* always ping even if server suppress_pings */
373 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
374 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
376 obd_connect_set_secctx(data);
377 if (ll_sbi_has_encrypt(sbi))
378 obd_connect_set_enc(data);
380 #if defined(CONFIG_SECURITY)
381 data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
384 data->ocd_brw_size = MD_MAX_BRW_SIZE;
386 err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
387 &sbi->ll_sb_uuid, data, sbi->ll_cache);
389 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
390 "recovery, of which this client is not a "
391 "part. Please wait for recovery to complete,"
392 " abort, or time out.\n", md);
395 CERROR("cannot connect to %s: rc = %d\n", md, err);
399 sbi->ll_md_exp->exp_connect_data = *data;
401 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
402 LUSTRE_SEQ_METADATA);
404 CERROR("%s: Can't init metadata layer FID infrastructure, "
405 "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
409 /* For mount, we only need fs info from MDT0, and also in DNE, it
410 * can make sure the client can be mounted as long as MDT0 is
412 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
413 ktime_get_seconds() - sbi->ll_statfs_max_age,
414 OBD_STATFS_FOR_MDT0);
416 GOTO(out_md_fid, err);
418 /* This needs to be after statfs to ensure connect has finished.
419 * Note that "data" does NOT contain the valid connect reply.
420 * If connecting to a 1.8 server there will be no LMV device, so
421 * we can access the MDC export directly and exp_connect_flags will
422 * be non-zero, but if accessing an upgraded 2.1 server it will
423 * have the correct flags filled in.
424 * XXX: fill in the LMV exp_connect_flags from MDC(s). */
425 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
426 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
427 valid != CLIENT_CONNECT_MDT_REQD) {
430 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
431 obd_connect_flags2str(buf, PAGE_SIZE,
432 valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
433 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
434 "feature(s) needed for correct operation "
435 "of this client (%s). Please upgrade "
436 "server or downgrade client.\n",
437 sbi->ll_md_exp->exp_obd->obd_name, buf);
438 OBD_FREE(buf, PAGE_SIZE);
439 GOTO(out_md_fid, err = -EPROTO);
442 size = sizeof(*data);
443 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
444 KEY_CONN_DATA, &size, data);
446 CERROR("%s: Get connect data failed: rc = %d\n",
447 sbi->ll_md_exp->exp_obd->obd_name, err);
448 GOTO(out_md_fid, err);
451 LASSERT(osfs->os_bsize);
452 sb->s_blocksize = osfs->os_bsize;
453 sb->s_blocksize_bits = log2(osfs->os_bsize);
454 sb->s_magic = LL_SUPER_MAGIC;
455 sb->s_maxbytes = MAX_LFS_FILESIZE;
456 sbi->ll_namelen = osfs->os_namelen;
457 sbi->ll_mnt.mnt = current->fs->root.mnt;
459 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
460 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
461 LCONSOLE_INFO("Disabling user_xattr feature because "
462 "it is not supported on the server\n");
463 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
466 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
468 sb->s_flags |= SB_POSIXACL;
470 sbi->ll_flags |= LL_SBI_ACL;
472 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
474 sb->s_flags &= ~SB_POSIXACL;
476 sbi->ll_flags &= ~LL_SBI_ACL;
479 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
480 sbi->ll_flags |= LL_SBI_64BIT_HASH;
482 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
483 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
485 if (obd_connect_has_secctx(data))
486 sbi->ll_flags |= LL_SBI_FILE_SECCTX;
488 if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
489 if (ll_sbi_has_test_dummy_encryption(sbi))
490 LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
492 sbi->ll_md_exp->exp_obd->obd_name);
493 ll_sbi_set_encrypt(sbi, false);
496 if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
497 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
498 LCONSOLE_INFO("%s: disabling xattr cache due to "
499 "unknown maximum xattr size.\n", dt);
500 } else if (!sbi->ll_xattr_cache_set) {
501 /* If xattr_cache is already set (no matter 0 or 1)
502 * during processing llog, it won't be enabled here. */
503 sbi->ll_flags |= LL_SBI_XATTR_CACHE;
504 sbi->ll_xattr_cache_enabled = 1;
508 sbi->ll_dt_obd = class_name2obd(dt);
509 if (!sbi->ll_dt_obd) {
510 CERROR("DT %s: not setup or attached\n", dt);
511 GOTO(out_md_fid, err = -ENODEV);
514 /* pass client page size via ocd_grant_blkbits, the server should report
515 * back its backend blocksize for grant calculation purpose */
516 data->ocd_grant_blkbits = PAGE_SHIFT;
518 /* indicate OST features supported by this client */
519 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
520 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
521 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
522 OBD_CONNECT_SRVLOCK |
523 OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
524 OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
525 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
526 OBD_CONNECT_EINPROGRESS |
527 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
528 OBD_CONNECT_LAYOUTLOCK |
529 OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
530 OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
531 OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
532 data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
533 OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK |
534 OBD_CONNECT2_REP_MBITS;
536 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
537 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
539 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
540 * disabled by default, because it can still be enabled on the
541 * fly via /sys. As a consequence, we still need to come to an
542 * agreement on the supported algorithms at connect time
544 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
546 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
547 data->ocd_cksum_types = OBD_CKSUM_ADLER;
549 data->ocd_cksum_types = obd_cksum_types_supported_client();
551 #ifdef HAVE_LRU_RESIZE_SUPPORT
552 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
554 /* always ping even if server suppress_pings */
555 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
556 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
558 if (ll_sbi_has_encrypt(sbi))
559 obd_connect_set_enc(data);
561 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
562 "ocd_grant: %d\n", data->ocd_connect_flags,
563 data->ocd_version, data->ocd_grant);
565 sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
566 sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
568 data->ocd_brw_size = DT_MAX_BRW_SIZE;
570 err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
571 &sbi->ll_sb_uuid, data, sbi->ll_cache);
573 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
574 "recovery, of which this client is not a "
575 "part. Please wait for recovery to "
576 "complete, abort, or time out.\n", dt);
579 CERROR("%s: Cannot connect to %s: rc = %d\n",
580 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
584 if (ll_sbi_has_encrypt(sbi) &&
585 !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
586 if (ll_sbi_has_test_dummy_encryption(sbi))
587 LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
589 ll_sbi_set_encrypt(sbi, false);
590 } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
591 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
594 sbi->ll_dt_exp->exp_connect_data = *data;
596 /* Don't change value if it was specified in the config log */
597 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
598 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
599 max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
600 (data->ocd_brw_size >> PAGE_SHIFT));
601 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
602 sbi->ll_ra_info.ra_max_pages_per_file)
603 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
604 sbi->ll_ra_info.ra_max_pages_per_file;
607 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
608 LUSTRE_SEQ_METADATA);
610 CERROR("%s: Can't init data layer FID infrastructure, "
611 "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
615 mutex_lock(&sbi->ll_lco.lco_lock);
616 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
617 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
618 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
619 mutex_unlock(&sbi->ll_lco.lco_lock);
621 fid_zero(&sbi->ll_root_fid);
622 err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
625 CERROR("cannot mds_connect: rc = %d\n", err);
626 GOTO(out_lock_cn_cb, err);
628 if (!fid_is_sane(&sbi->ll_root_fid)) {
629 CERROR("%s: Invalid root fid "DFID" during mount\n",
630 sbi->ll_md_exp->exp_obd->obd_name,
631 PFID(&sbi->ll_root_fid));
632 GOTO(out_lock_cn_cb, err = -EINVAL);
634 CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
636 sb->s_op = &lustre_super_operations;
637 sb->s_xattr = ll_xattr_handlers;
638 #if THREAD_SIZE >= 8192 /*b=17630*/
639 sb->s_export_op = &lustre_export_operations;
641 #ifdef HAVE_LUSTRE_CRYPTO
642 llcrypt_set_ops(sb, &lustre_cryptops);
646 * XXX: move this to after cbd setup? */
647 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
648 if (sbi->ll_flags & LL_SBI_ACL)
649 valid |= OBD_MD_FLACL;
651 OBD_ALLOC_PTR(op_data);
653 GOTO(out_lock_cn_cb, err = -ENOMEM);
655 op_data->op_fid1 = sbi->ll_root_fid;
656 op_data->op_mode = 0;
657 op_data->op_valid = valid;
659 err = md_getattr(sbi->ll_md_exp, op_data, &request);
661 OBD_FREE_PTR(op_data);
663 CERROR("%s: md_getattr failed for root: rc = %d\n",
664 sbi->ll_md_exp->exp_obd->obd_name, err);
665 GOTO(out_lock_cn_cb, err);
668 err = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill,
669 sbi->ll_dt_exp, sbi->ll_md_exp, &lmd);
671 CERROR("failed to understand root inode md: rc = %d\n", err);
672 ptlrpc_req_finished(request);
673 GOTO(out_lock_cn_cb, err);
676 LASSERT(fid_is_sane(&sbi->ll_root_fid));
677 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
678 sbi->ll_flags & LL_SBI_32BIT_API),
680 md_free_lustre_md(sbi->ll_md_exp, &lmd);
681 ptlrpc_req_finished(request);
685 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
687 CERROR("%s: bad ll_iget() for root: rc = %d\n",
688 sbi->ll_fsname, err);
692 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
693 if (sbi->ll_checksum_set) {
694 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
695 sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
696 sizeof(checksum), &checksum, NULL);
698 CERROR("%s: Set checksum failed: rc = %d\n",
699 sbi->ll_dt_exp->exp_obd->obd_name, err);
705 sb->s_root = d_make_root(root);
706 if (sb->s_root == NULL) {
708 CERROR("%s: can't make root dentry: rc = %d\n",
709 sbi->ll_fsname, err);
713 sbi->ll_sdev_orig = sb->s_dev;
715 /* We set sb->s_dev equal on all lustre clients in order to support
716 * NFS export clustering. NFSD requires that the FSID be the same
718 /* s_dev is also used in lt_compare() to compare two fs, but that is
719 * only a node-local comparison. */
720 uuid = obd_get_uuid(sbi->ll_md_exp);
722 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
729 if (sbi->ll_dt_obd) {
730 err = sysfs_create_link(&sbi->ll_kset.kobj,
731 &sbi->ll_dt_obd->obd_kset.kobj,
732 sbi->ll_dt_obd->obd_type->typ_name);
734 CERROR("%s: could not register %s in llite: rc = %d\n",
735 dt, sbi->ll_fsname, err);
740 if (sbi->ll_md_obd) {
741 err = sysfs_create_link(&sbi->ll_kset.kobj,
742 &sbi->ll_md_obd->obd_kset.kobj,
743 sbi->ll_md_obd->obd_type->typ_name);
745 CERROR("%s: could not register %s in llite: rc = %d\n",
746 md, sbi->ll_fsname, err);
755 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
757 obd_disconnect(sbi->ll_dt_exp);
758 sbi->ll_dt_exp = NULL;
759 sbi->ll_dt_obd = NULL;
761 obd_fid_fini(sbi->ll_md_exp->exp_obd);
763 obd_disconnect(sbi->ll_md_exp);
764 sbi->ll_md_exp = NULL;
765 sbi->ll_md_obd = NULL;
774 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
778 size = sizeof(*lmmsize);
779 rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
780 KEY_MAX_EASIZE, &size, lmmsize);
782 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
783 sbi->ll_dt_exp->exp_obd->obd_name, rc);
787 CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
790 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
791 KEY_MAX_EASIZE, &size, lmmsize);
793 CERROR("Get max mdsize error rc %d\n", rc);
795 CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
801 * Get the value of the default_easize parameter.
803 * \see client_obd::cl_default_mds_easize
805 * \param[in] sbi superblock info for this filesystem
806 * \param[out] lmmsize pointer to storage location for value
808 * \retval 0 on success
809 * \retval negative negated errno on failure
811 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
816 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
817 KEY_DEFAULT_EASIZE, &size, lmmsize);
819 CERROR("Get default mdsize error rc %d\n", rc);
825 * Set the default_easize parameter to the given value.
827 * \see client_obd::cl_default_mds_easize
829 * \param[in] sbi superblock info for this filesystem
830 * \param[in] lmmsize the size to set
832 * \retval 0 on success
833 * \retval negative negated errno on failure
835 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
839 if (lmmsize < sizeof(struct lov_mds_md) ||
840 lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
843 rc = obd_set_info_async(NULL, sbi->ll_md_exp,
844 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
845 sizeof(int), &lmmsize, NULL);
850 static void client_common_put_super(struct super_block *sb)
852 struct ll_sb_info *sbi = ll_s2sbi(sb);
857 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
858 obd_disconnect(sbi->ll_dt_exp);
859 sbi->ll_dt_exp = NULL;
861 ll_debugfs_unregister_super(sb);
863 obd_fid_fini(sbi->ll_md_exp->exp_obd);
864 obd_disconnect(sbi->ll_md_exp);
865 sbi->ll_md_exp = NULL;
870 void ll_kill_super(struct super_block *sb)
872 struct ll_sb_info *sbi;
876 if (!(sb->s_flags & SB_ACTIVE))
880 /* we need restore s_dev from changed for clustred NFS before put_super
881 * because new kernels have cached s_dev and change sb->s_dev in
882 * put_super not affected real removing devices */
884 sb->s_dev = sbi->ll_sdev_orig;
886 /* wait running statahead threads to quit */
887 while (atomic_read(&sbi->ll_sa_running) > 0)
888 schedule_timeout_uninterruptible(
889 cfs_time_seconds(1) >> 3);
895 static inline int ll_set_opt(const char *opt, char *data, int fl)
897 if (strncmp(opt, data, strlen(opt)) != 0)
903 /* non-client-specific mount options are parsed in lmd_parse */
904 static int ll_options(char *options, struct ll_sb_info *sbi)
907 char *s1 = options, *s2;
908 int *flags = &sbi->ll_flags;
914 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
917 CDEBUG(D_SUPER, "next opt=%s\n", s1);
918 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
923 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
925 *flags = (*flags & ~LL_SBI_LOCALFLOCK) | tmp;
928 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
930 *flags = (*flags & ~LL_SBI_FLOCK) | tmp;
933 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
938 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
943 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
948 tmp = ll_set_opt("context", s1, 1);
951 tmp = ll_set_opt("fscontext", s1, 1);
954 tmp = ll_set_opt("defcontext", s1, 1);
957 tmp = ll_set_opt("rootcontext", s1, 1);
960 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
965 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
971 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
974 sbi->ll_checksum_set = 1;
977 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
980 sbi->ll_checksum_set = 1;
983 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
988 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
993 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
998 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
1003 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
1008 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
1013 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
1018 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
1023 tmp = ll_set_opt("test_dummy_encryption", s1,
1024 LL_SBI_TEST_DUMMY_ENCRYPTION);
1026 #ifdef HAVE_LUSTRE_CRYPTO
1029 LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1033 tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT);
1035 #ifdef HAVE_LUSTRE_CRYPTO
1038 LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n");
1042 tmp = ll_set_opt("foreign_symlink", s1, LL_SBI_FOREIGN_SYMLINK);
1044 int prefix_pos = sizeof("foreign_symlink=") - 1;
1045 int equal_pos = sizeof("foreign_symlink=") - 2;
1047 /* non-default prefix provided ? */
1048 if (strlen(s1) >= sizeof("foreign_symlink=") &&
1049 *(s1 + equal_pos) == '=') {
1050 char *old = sbi->ll_foreign_symlink_prefix;
1052 sbi->ll_foreign_symlink_prefix_size;
1054 /* path must be absolute */
1055 if (*(s1 + sizeof("foreign_symlink=")
1057 LCONSOLE_ERROR_MSG(0x152,
1058 "foreign prefix '%s' must be an absolute path\n",
1063 s2 = strchrnul(s1 + prefix_pos, ',');
1065 if (sbi->ll_foreign_symlink_prefix) {
1066 sbi->ll_foreign_symlink_prefix = NULL;
1067 sbi->ll_foreign_symlink_prefix_size = 0;
1069 /* alloc for path length and '\0' */
1070 OBD_ALLOC(sbi->ll_foreign_symlink_prefix,
1071 s2 - (s1 + prefix_pos) + 1);
1072 if (!sbi->ll_foreign_symlink_prefix) {
1073 /* restore previous */
1074 sbi->ll_foreign_symlink_prefix = old;
1075 sbi->ll_foreign_symlink_prefix_size =
1080 OBD_FREE(old, old_len);
1081 strncpy(sbi->ll_foreign_symlink_prefix,
1083 s2 - (s1 + prefix_pos));
1084 sbi->ll_foreign_symlink_prefix_size =
1085 s2 - (s1 + prefix_pos) + 1;
1087 LCONSOLE_ERROR_MSG(0x152,
1088 "invalid %s option\n", s1);
1090 /* enable foreign symlink support */
1094 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
1100 s2 = strchr(s1, ',');
1108 void ll_lli_init(struct ll_inode_info *lli)
1110 lli->lli_inode_magic = LLI_INODE_MAGIC;
1112 spin_lock_init(&lli->lli_lock);
1113 lli->lli_posix_acl = NULL;
1114 /* Do not set lli_fid, it has been initialized already. */
1115 fid_zero(&lli->lli_pfid);
1116 lli->lli_mds_read_och = NULL;
1117 lli->lli_mds_write_och = NULL;
1118 lli->lli_mds_exec_och = NULL;
1119 lli->lli_open_fd_read_count = 0;
1120 lli->lli_open_fd_write_count = 0;
1121 lli->lli_open_fd_exec_count = 0;
1122 mutex_init(&lli->lli_och_mutex);
1123 spin_lock_init(&lli->lli_agl_lock);
1124 spin_lock_init(&lli->lli_layout_lock);
1125 ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1126 lli->lli_clob = NULL;
1128 init_rwsem(&lli->lli_xattrs_list_rwsem);
1129 mutex_init(&lli->lli_xattrs_enq_lock);
1131 LASSERT(lli->lli_vfs_inode.i_mode != 0);
1132 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1133 lli->lli_opendir_key = NULL;
1134 lli->lli_sai = NULL;
1135 spin_lock_init(&lli->lli_sa_lock);
1136 lli->lli_opendir_pid = 0;
1137 lli->lli_sa_enabled = 0;
1138 init_rwsem(&lli->lli_lsm_sem);
1140 mutex_init(&lli->lli_size_mutex);
1141 mutex_init(&lli->lli_setattr_mutex);
1142 lli->lli_symlink_name = NULL;
1143 ll_trunc_sem_init(&lli->lli_trunc_sem);
1144 range_lock_tree_init(&lli->lli_write_tree);
1145 init_rwsem(&lli->lli_glimpse_sem);
1146 lli->lli_glimpse_time = ktime_set(0, 0);
1147 INIT_LIST_HEAD(&lli->lli_agl_list);
1148 lli->lli_agl_index = 0;
1149 lli->lli_async_rc = 0;
1150 spin_lock_init(&lli->lli_heat_lock);
1151 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1152 lli->lli_heat_flags = 0;
1153 mutex_init(&lli->lli_pcc_lock);
1154 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1155 lli->lli_pcc_inode = NULL;
1156 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1157 lli->lli_pcc_generation = 0;
1158 mutex_init(&lli->lli_group_mutex);
1159 lli->lli_group_users = 0;
1160 lli->lli_group_gid = 0;
1162 mutex_init(&lli->lli_layout_mutex);
1163 memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1166 #define MAX_STRING_SIZE 128
1168 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1170 #define LSI_BDI_INITIALIZED 0x00400000
1172 #ifndef HAVE_BDI_CAP_MAP_COPY
1173 # define BDI_CAP_MAP_COPY 0
1176 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1178 struct lustre_sb_info *lsi = s2lsi(sb);
1179 char buf[MAX_STRING_SIZE];
1183 err = bdi_init(&lsi->lsi_bdi);
1187 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1188 lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1189 lsi->lsi_bdi.name = "lustre";
1190 va_start(args, fmt);
1191 vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1193 err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1196 sb->s_bdi = &lsi->lsi_bdi;
1200 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1202 int ll_fill_super(struct super_block *sb)
1204 struct lustre_profile *lprof = NULL;
1205 struct lustre_sb_info *lsi = s2lsi(sb);
1206 struct ll_sb_info *sbi = NULL;
1207 char *dt = NULL, *md = NULL;
1208 char *profilenm = get_profile_name(sb);
1209 struct config_llog_instance *cfg;
1210 /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1211 const int instlen = LUSTRE_MAXINSTANCE + 2;
1212 unsigned long cfg_instance = ll_get_cfg_instance(sb);
1213 char name[MAX_STRING_SIZE];
1222 /* for ASLR, to map between cfg_instance and hashed ptr */
1223 CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1224 profilenm, cfg_instance, sb);
1226 OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
1230 GOTO(out_free_cfg, err = -ENOMEM);
1232 /* client additional sb info */
1233 lsi->lsi_llsbi = sbi = ll_init_sbi();
1235 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1237 err = ll_options(lsi->lsi_lmd->lmd_opts, sbi);
1239 GOTO(out_free_cfg, err);
1241 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1242 sb->s_d_op = &ll_d_ops;
1245 generate_random_uuid(uuid.b);
1246 snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1248 CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1251 len = strlen(profilenm);
1252 ptr = strrchr(profilenm, '-');
1253 if (ptr && (strcmp(ptr, "-client") == 0))
1256 if (len > LUSTRE_MAXFSNAME) {
1257 if (unlikely(len >= MAX_STRING_SIZE))
1258 len = MAX_STRING_SIZE - 1;
1259 strncpy(name, profilenm, len);
1261 err = -ENAMETOOLONG;
1262 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1263 name, LUSTRE_MAXFSNAME, err);
1264 GOTO(out_free_cfg, err);
1266 strncpy(sbi->ll_fsname, profilenm, len);
1267 sbi->ll_fsname[len] = '\0';
1270 snprintf(name, sizeof(name), "%.*s-%016lx", len,
1271 profilenm, cfg_instance);
1273 err = super_setup_bdi_name(sb, "%s", name);
1275 GOTO(out_free_cfg, err);
1277 /* Call ll_debugfs_register_super() before lustre_process_log()
1278 * so that "llite.*.*" params can be processed correctly.
1280 err = ll_debugfs_register_super(sb, name);
1282 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1283 sbi->ll_fsname, err);
1287 /* The cfg_instance is a value unique to this super, in case some
1288 * joker tries to mount the same fs at two mount points.
1290 cfg->cfg_instance = cfg_instance;
1291 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1292 cfg->cfg_callback = class_config_llog_handler;
1293 cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1294 /* set up client obds */
1295 err = lustre_process_log(sb, profilenm, cfg);
1297 GOTO(out_debugfs, err);
1299 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1300 lprof = class_get_profile(profilenm);
1301 if (lprof == NULL) {
1302 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1303 " read from the MGS. Does that filesystem "
1304 "exist?\n", profilenm);
1305 GOTO(out_debugfs, err = -EINVAL);
1307 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1308 lprof->lp_md, lprof->lp_dt);
1310 dt_len = strlen(lprof->lp_dt) + instlen + 2;
1311 OBD_ALLOC(dt, dt_len);
1313 GOTO(out_profile, err = -ENOMEM);
1314 snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1316 md_len = strlen(lprof->lp_md) + instlen + 2;
1317 OBD_ALLOC(md, md_len);
1319 GOTO(out_free_dt, err = -ENOMEM);
1320 snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1322 /* connections, registrations, sb setup */
1323 err = client_common_fill_super(sb, md, dt);
1325 GOTO(out_free_md, err);
1327 sbi->ll_client_common_fill_super_succeeded = 1;
1331 OBD_FREE(md, md_len);
1334 OBD_FREE(dt, dt_len);
1337 class_put_profile(lprof);
1340 ll_debugfs_unregister_super(sb);
1347 else if (sbi->ll_flags & LL_SBI_VERBOSE)
1348 LCONSOLE_WARN("Mounted %s\n", profilenm);
1350 } /* ll_fill_super */
1352 void ll_put_super(struct super_block *sb)
1354 struct config_llog_instance cfg, params_cfg;
1355 struct obd_device *obd;
1356 struct lustre_sb_info *lsi = s2lsi(sb);
1357 struct ll_sb_info *sbi = ll_s2sbi(sb);
1358 char *profilenm = get_profile_name(sb);
1359 unsigned long cfg_instance = ll_get_cfg_instance(sb);
1361 int next, force = 1, rc = 0;
1365 GOTO(out_no_sbi, 0);
1367 /* Should replace instance_id with something better for ASLR */
1368 CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1369 profilenm, cfg_instance, sb);
1371 cfg.cfg_instance = cfg_instance;
1372 lustre_end_log(sb, profilenm, &cfg);
1374 params_cfg.cfg_instance = cfg_instance;
1375 lustre_end_log(sb, PARAMS_FILENAME, ¶ms_cfg);
1377 if (sbi->ll_md_exp) {
1378 obd = class_exp2obd(sbi->ll_md_exp);
1380 force = obd->obd_force;
1383 /* Wait for unstable pages to be committed to stable storage */
1385 rc = l_wait_event_abortable(
1386 sbi->ll_cache->ccc_unstable_waitq,
1387 atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1390 ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1391 if (force == 0 && rc != -ERESTARTSYS)
1392 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1394 /* We need to set force before the lov_disconnect in
1395 * lustre_common_put_super, since l_d cleans up osc's as well.
1399 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1401 obd->obd_force = force;
1405 if (sbi->ll_client_common_fill_super_succeeded) {
1406 /* Only if client_common_fill_super succeeded */
1407 client_common_put_super(sb);
1411 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1412 class_manual_cleanup(obd);
1414 if (sbi->ll_flags & LL_SBI_VERBOSE)
1415 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1418 class_del_profile(profilenm);
1420 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1421 if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1422 bdi_destroy(&lsi->lsi_bdi);
1423 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1428 lsi->lsi_llsbi = NULL;
1430 lustre_common_put_super(sb);
1432 cl_env_cache_purge(~0);
1435 } /* client_put_super */
1437 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1439 struct inode *inode = NULL;
1441 /* NOTE: we depend on atomic igrab() -bzzz */
1442 lock_res_and_lock(lock);
1443 if (lock->l_resource->lr_lvb_inode) {
1444 struct ll_inode_info * lli;
1445 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1446 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1447 inode = igrab(lock->l_resource->lr_lvb_inode);
1449 inode = lock->l_resource->lr_lvb_inode;
1450 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
1451 D_WARNING, lock, "lr_lvb_inode %p is "
1452 "bogus: magic %08x",
1453 lock->l_resource->lr_lvb_inode,
1454 lli->lli_inode_magic);
1458 unlock_res_and_lock(lock);
1462 void ll_dir_clear_lsm_md(struct inode *inode)
1464 struct ll_inode_info *lli = ll_i2info(inode);
1466 LASSERT(S_ISDIR(inode->i_mode));
1468 if (lli->lli_lsm_md) {
1469 lmv_free_memmd(lli->lli_lsm_md);
1470 lli->lli_lsm_md = NULL;
1473 if (lli->lli_default_lsm_md) {
1474 lmv_free_memmd(lli->lli_default_lsm_md);
1475 lli->lli_default_lsm_md = NULL;
1479 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1480 const struct lu_fid *fid,
1481 struct lustre_md *md)
1483 struct ll_sb_info *sbi = ll_s2sbi(sb);
1484 struct ll_inode_info *lli;
1485 struct mdt_body *body = md->body;
1486 struct inode *inode;
1492 ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1493 inode = iget_locked(sb, ino);
1494 if (inode == NULL) {
1495 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1496 sbi->ll_fsname, PFID(fid));
1497 RETURN(ERR_PTR(-ENOENT));
1500 lli = ll_i2info(inode);
1501 if (inode->i_state & I_NEW) {
1502 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1503 (body->mbo_mode & S_IFMT);
1504 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1507 inode->i_mtime.tv_sec = 0;
1508 inode->i_atime.tv_sec = 0;
1509 inode->i_ctime.tv_sec = 0;
1512 #ifdef HAVE_BACKING_DEV_INFO
1513 /* initializing backing dev info. */
1514 inode->i_mapping->backing_dev_info =
1515 &s2lsi(inode->i_sb)->lsi_bdi;
1517 inode->i_op = &ll_dir_inode_operations;
1518 inode->i_fop = &ll_dir_operations;
1519 lli->lli_fid = *fid;
1522 /* master object FID */
1523 lli->lli_pfid = body->mbo_fid1;
1524 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1525 lli, PFID(fid), PFID(&lli->lli_pfid));
1526 unlock_new_inode(inode);
1528 /* in directory restripe/auto-split, a directory will be
1529 * transformed to a stripe if it's plain, set its pfid here,
1530 * otherwise ll_lock_cancel_bits() can't find the master inode.
1532 lli->lli_pfid = body->mbo_fid1;
1538 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1541 struct lmv_stripe_md *lsm = md->lmv;
1542 struct ll_inode_info *lli = ll_i2info(inode);
1545 LASSERT(lsm != NULL);
1547 CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1548 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1549 lsm_md_dump(D_INODE, lsm);
1551 if (!lmv_dir_striped(lsm))
1554 /* XXX sigh, this lsm_root initialization should be in
1555 * LMV layer, but it needs ll_iget right now, so we
1556 * put this here right now. */
1557 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1558 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1559 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1561 if (!fid_is_sane(fid))
1564 /* Unfortunately ll_iget will call ll_update_inode,
1565 * where the initialization of slave inode is slightly
1566 * different, so it reset lsm_md to NULL to avoid
1567 * initializing lsm for slave inode. */
1568 lsm->lsm_md_oinfo[i].lmo_root =
1569 ll_iget_anon_dir(inode->i_sb, fid, md);
1570 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1571 int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1573 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1575 iput(lsm->lsm_md_oinfo[i].lmo_root);
1576 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1582 lli->lli_lsm_md = lsm;
1587 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1589 struct ll_inode_info *lli = ll_i2info(inode);
1591 if (!md->default_lmv) {
1592 /* clear default lsm */
1593 if (lli->lli_default_lsm_md) {
1594 down_write(&lli->lli_lsm_sem);
1595 if (lli->lli_default_lsm_md) {
1596 lmv_free_memmd(lli->lli_default_lsm_md);
1597 lli->lli_default_lsm_md = NULL;
1599 up_write(&lli->lli_lsm_sem);
1604 if (lli->lli_default_lsm_md) {
1605 /* do nonthing if default lsm isn't changed */
1606 down_read(&lli->lli_lsm_sem);
1607 if (lli->lli_default_lsm_md &&
1608 lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1609 up_read(&lli->lli_lsm_sem);
1612 up_read(&lli->lli_lsm_sem);
1615 down_write(&lli->lli_lsm_sem);
1616 if (lli->lli_default_lsm_md)
1617 lmv_free_memmd(lli->lli_default_lsm_md);
1618 lli->lli_default_lsm_md = md->default_lmv;
1619 lsm_md_dump(D_INODE, md->default_lmv);
1620 md->default_lmv = NULL;
1621 up_write(&lli->lli_lsm_sem);
1624 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1626 struct ll_inode_info *lli = ll_i2info(inode);
1627 struct lmv_stripe_md *lsm = md->lmv;
1628 struct cl_attr *attr;
1633 LASSERT(S_ISDIR(inode->i_mode));
1634 CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1635 PFID(ll_inode2fid(inode)));
1637 /* update default LMV */
1638 if (md->default_lmv)
1639 ll_update_default_lsm_md(inode, md);
1641 /* after dir migration/restripe, a stripe may be turned into a
1642 * directory, in this case, zero out its lli_pfid.
1644 if (unlikely(fid_is_norm(&lli->lli_pfid)))
1645 fid_zero(&lli->lli_pfid);
1648 * no striped information from request, lustre_md from req does not
1649 * include stripeEA, see ll_md_setattr()
1655 * normally dir layout doesn't change, only take read lock to check
1656 * that to avoid blocking other MD operations.
1658 down_read(&lli->lli_lsm_sem);
1660 /* some current lookup initialized lsm, and unchanged */
1661 if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1662 GOTO(unlock, rc = 0);
1664 /* if dir layout doesn't match, check whether version is increased,
1665 * which means layout is changed, this happens in dir split/merge and
1668 * foreign LMV should not change.
1670 if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1671 lsm->lsm_md_layout_version <=
1672 lli->lli_lsm_md->lsm_md_layout_version) {
1673 CERROR("%s: "DFID" dir layout mismatch:\n",
1674 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1675 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1676 lsm_md_dump(D_ERROR, lsm);
1677 GOTO(unlock, rc = -EINVAL);
1680 up_read(&lli->lli_lsm_sem);
1681 down_write(&lli->lli_lsm_sem);
1682 /* clear existing lsm */
1683 if (lli->lli_lsm_md) {
1684 lmv_free_memmd(lli->lli_lsm_md);
1685 lli->lli_lsm_md = NULL;
1688 rc = ll_init_lsm_md(inode, md);
1689 up_write(&lli->lli_lsm_sem);
1694 /* set md->lmv to NULL, so the following free lustre_md will not free
1699 /* md_merge_attr() may take long, since lsm is already set, switch to
1702 down_read(&lli->lli_lsm_sem);
1704 if (!lmv_dir_striped(lli->lli_lsm_md))
1705 GOTO(unlock, rc = 0);
1707 OBD_ALLOC_PTR(attr);
1709 GOTO(unlock, rc = -ENOMEM);
1711 /* validate the lsm */
1712 rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1713 ll_md_blocking_ast);
1715 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1716 md->body->mbo_nlink = attr->cat_nlink;
1717 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1718 md->body->mbo_size = attr->cat_size;
1719 if (md->body->mbo_valid & OBD_MD_FLATIME)
1720 md->body->mbo_atime = attr->cat_atime;
1721 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1722 md->body->mbo_ctime = attr->cat_ctime;
1723 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1724 md->body->mbo_mtime = attr->cat_mtime;
1730 up_read(&lli->lli_lsm_sem);
1735 void ll_clear_inode(struct inode *inode)
1737 struct ll_inode_info *lli = ll_i2info(inode);
1738 struct ll_sb_info *sbi = ll_i2sbi(inode);
1742 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1743 PFID(ll_inode2fid(inode)), inode);
1745 if (S_ISDIR(inode->i_mode)) {
1746 /* these should have been cleared in ll_file_release */
1747 LASSERT(lli->lli_opendir_key == NULL);
1748 LASSERT(lli->lli_sai == NULL);
1749 LASSERT(lli->lli_opendir_pid == 0);
1751 pcc_inode_free(inode);
1754 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1756 LASSERT(!lli->lli_open_fd_write_count);
1757 LASSERT(!lli->lli_open_fd_read_count);
1758 LASSERT(!lli->lli_open_fd_exec_count);
1760 if (lli->lli_mds_write_och)
1761 ll_md_real_close(inode, FMODE_WRITE);
1762 if (lli->lli_mds_exec_och)
1763 ll_md_real_close(inode, FMODE_EXEC);
1764 if (lli->lli_mds_read_och)
1765 ll_md_real_close(inode, FMODE_READ);
1767 if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1768 OBD_FREE(lli->lli_symlink_name,
1769 strlen(lli->lli_symlink_name) + 1);
1770 lli->lli_symlink_name = NULL;
1773 ll_xattr_cache_destroy(inode);
1775 forget_all_cached_acls(inode);
1777 lli->lli_inode_magic = LLI_INODE_DEAD;
1779 if (S_ISDIR(inode->i_mode))
1780 ll_dir_clear_lsm_md(inode);
1781 else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1782 LASSERT(list_empty(&lli->lli_agl_list));
1785 * XXX This has to be done before lsm is freed below, because
1786 * cl_object still uses inode lsm.
1788 cl_inode_fini(inode);
1790 llcrypt_put_encryption_info(inode);
1795 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1797 struct lustre_md md;
1798 struct inode *inode = dentry->d_inode;
1799 struct ll_sb_info *sbi = ll_i2sbi(inode);
1800 struct ptlrpc_request *request = NULL;
1804 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1805 LUSTRE_OPC_ANY, NULL);
1806 if (IS_ERR(op_data))
1807 RETURN(PTR_ERR(op_data));
1809 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1811 ptlrpc_req_finished(request);
1812 if (rc == -ENOENT) {
1814 /* Unlinked special device node? Or just a race?
1815 * Pretend we done everything. */
1816 if (!S_ISREG(inode->i_mode) &&
1817 !S_ISDIR(inode->i_mode)) {
1818 ia_valid = op_data->op_attr.ia_valid;
1819 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1820 rc = simple_setattr(dentry, &op_data->op_attr);
1821 op_data->op_attr.ia_valid = ia_valid;
1823 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1824 CERROR("md_setattr fails: rc = %d\n", rc);
1829 rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp,
1830 sbi->ll_md_exp, &md);
1832 ptlrpc_req_finished(request);
1836 ia_valid = op_data->op_attr.ia_valid;
1837 /* inode size will be in ll_setattr_ost, can't do it now since dirty
1838 * cache is not cleared yet. */
1839 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1840 if (S_ISREG(inode->i_mode))
1842 rc = simple_setattr(dentry, &op_data->op_attr);
1843 if (S_ISREG(inode->i_mode))
1844 inode_unlock(inode);
1845 op_data->op_attr.ia_valid = ia_valid;
1847 rc = ll_update_inode(inode, &md);
1848 ptlrpc_req_finished(request);
1854 * Zero portion of page that is part of @inode.
1855 * This implies, if necessary:
1856 * - taking cl_lock on range corresponding to concerned page
1857 * - grabbing vm page
1858 * - associating cl_page
1859 * - proceeding to clio read
1860 * - zeroing range in page
1861 * - proceeding to cl_page flush
1862 * - releasing cl_lock
1864 * \param[in] inode inode
1865 * \param[in] index page index
1866 * \param[in] offset offset in page to start zero from
1867 * \param[in] len len to zero
1869 * \retval 0 on success
1870 * \retval negative errno on failure
1872 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1875 struct ll_inode_info *lli = ll_i2info(inode);
1876 struct cl_object *clob = lli->lli_clob;
1878 struct lu_env *env = NULL;
1879 struct cl_io *io = NULL;
1880 struct cl_page *clpage = NULL;
1881 struct page *vmpage = NULL;
1882 unsigned from = index << PAGE_SHIFT;
1883 struct cl_lock *lock = NULL;
1884 struct cl_lock_descr *descr = NULL;
1885 struct cl_2queue *queue = NULL;
1886 struct cl_sync_io *anchor = NULL;
1887 bool holdinglock = false;
1888 bool lockedbymyself = true;
1893 env = cl_env_get(&refcheck);
1895 RETURN(PTR_ERR(env));
1897 io = vvp_env_thread_io(env);
1899 rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1903 lock = vvp_env_lock(env);
1904 descr = &lock->cll_descr;
1905 descr->cld_obj = io->ci_obj;
1906 descr->cld_start = cl_index(io->ci_obj, from);
1907 descr->cld_end = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1908 descr->cld_mode = CLM_WRITE;
1909 descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1911 /* request lock for page */
1912 rc = cl_lock_request(env, io, lock);
1913 /* -ECANCELED indicates a matching lock with a different extent
1914 * was already present, and -EEXIST indicates a matching lock
1915 * on exactly the same extent was already present.
1916 * In both cases it means we are covered.
1918 if (rc == -ECANCELED || rc == -EEXIST)
1926 vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1928 GOTO(rellock, rc = -EOPNOTSUPP);
1930 if (!PageDirty(vmpage)) {
1931 /* associate cl_page */
1932 clpage = cl_page_find(env, clob, vmpage->index,
1933 vmpage, CPT_CACHEABLE);
1935 GOTO(pagefini, rc = PTR_ERR(clpage));
1937 cl_page_assume(env, io, clpage);
1940 if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1941 !PageWriteback(vmpage)) {
1943 /* set PagePrivate2 to detect special case of empty page
1944 * in osc_brw_fini_request()
1946 SetPagePrivate2(vmpage);
1947 rc = ll_io_read_page(env, io, clpage, NULL);
1948 if (!PagePrivate2(vmpage))
1949 /* PagePrivate2 was cleared in osc_brw_fini_request()
1950 * meaning we read an empty page. In this case, in order
1951 * to avoid allocating unnecessary block in truncated
1952 * file, we must not zero and write as below. Subsequent
1953 * server-side truncate will handle things correctly.
1955 GOTO(clpfini, rc = 0);
1956 ClearPagePrivate2(vmpage);
1959 lockedbymyself = trylock_page(vmpage);
1960 cl_page_assume(env, io, clpage);
1963 /* zero range in page */
1964 zero_user(vmpage, offset, len);
1966 if (holdinglock && clpage) {
1967 /* explicitly write newly modified page */
1968 queue = &io->ci_queue;
1969 cl_2queue_init(queue);
1970 anchor = &vvp_env_info(env)->vti_anchor;
1971 cl_sync_io_init(anchor, 1);
1972 clpage->cp_sync_io = anchor;
1973 cl_2queue_add(queue, clpage, true);
1974 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1976 GOTO(queuefini1, rc);
1977 rc = cl_sync_io_wait(env, anchor, 0);
1979 GOTO(queuefini2, rc);
1980 cl_page_assume(env, io, clpage);
1983 cl_2queue_discard(env, io, queue);
1985 cl_2queue_disown(env, io, queue);
1986 cl_2queue_fini(env, queue);
1991 cl_page_put(env, clpage);
1993 if (lockedbymyself) {
1994 unlock_page(vmpage);
1999 cl_lock_release(env, lock);
2001 cl_io_fini(env, io);
2004 cl_env_put(env, &refcheck);
2009 /* If this inode has objects allocated to it (lsm != NULL), then the OST
2010 * object(s) determine the file size and mtime. Otherwise, the MDS will
2011 * keep these values until such a time that objects are allocated for it.
2012 * We do the MDS operations first, as it is checking permissions for us.
2013 * We don't to the MDS RPC if there is nothing that we want to store there,
2014 * otherwise there is no harm in updating mtime/atime on the MDS if we are
2015 * going to do an RPC anyways.
2017 * If we are doing a truncate, we will send the mtime and ctime updates
2018 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2019 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2022 * In case of HSMimport, we only set attr on MDS.
2024 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2025 enum op_xvalid xvalid, bool hsm_import)
2027 struct inode *inode = dentry->d_inode;
2028 struct ll_inode_info *lli = ll_i2info(inode);
2029 struct md_op_data *op_data = NULL;
2030 ktime_t kstart = ktime_get();
2035 CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2036 "valid %x, hsm_import %d\n",
2037 ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2038 inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2041 if (attr->ia_valid & ATTR_SIZE) {
2042 /* Check new size against VFS/VM file size limit and rlimit */
2043 rc = inode_newsize_ok(inode, attr->ia_size);
2047 /* The maximum Lustre file size is variable, based on the
2048 * OST maximum object size and number of stripes. This
2049 * needs another check in addition to the VFS check above. */
2050 if (attr->ia_size > ll_file_maxbytes(inode)) {
2051 CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2052 PFID(&lli->lli_fid), attr->ia_size,
2053 ll_file_maxbytes(inode));
2057 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2060 /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2061 if (attr->ia_valid & TIMES_SET_FLAGS) {
2062 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2063 !capable(CAP_FOWNER))
2067 /* We mark all of the fields "set" so MDS/OST does not re-set them */
2068 if (!(xvalid & OP_XVALID_CTIME_SET) &&
2069 (attr->ia_valid & ATTR_CTIME)) {
2070 attr->ia_ctime = current_time(inode);
2071 xvalid |= OP_XVALID_CTIME_SET;
2073 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2074 (attr->ia_valid & ATTR_ATIME)) {
2075 attr->ia_atime = current_time(inode);
2076 attr->ia_valid |= ATTR_ATIME_SET;
2078 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2079 (attr->ia_valid & ATTR_MTIME)) {
2080 attr->ia_mtime = current_time(inode);
2081 attr->ia_valid |= ATTR_MTIME_SET;
2084 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2085 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2086 (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2087 ktime_get_real_seconds());
2089 if (S_ISREG(inode->i_mode))
2090 inode_unlock(inode);
2092 /* We always do an MDS RPC, even if we're only changing the size;
2093 * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2095 OBD_ALLOC_PTR(op_data);
2096 if (op_data == NULL)
2097 GOTO(out, rc = -ENOMEM);
2099 if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2100 /* If we are changing file size, file content is
2101 * modified, flag it.
2103 xvalid |= OP_XVALID_OWNEROVERRIDE;
2104 op_data->op_bias |= MDS_DATA_MODIFIED;
2105 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
2108 if (attr->ia_valid & ATTR_FILE) {
2109 struct ll_file_data *fd = attr->ia_file->private_data;
2111 if (fd->fd_lease_och)
2112 op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2115 op_data->op_attr = *attr;
2116 op_data->op_xvalid = xvalid;
2118 rc = ll_md_setattr(dentry, op_data);
2122 if (!S_ISREG(inode->i_mode) || hsm_import)
2125 if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2126 ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2127 xvalid & OP_XVALID_CTIME_SET) {
2128 bool cached = false;
2130 rc = pcc_inode_setattr(inode, attr, &cached);
2133 CERROR("%s: PCC inode "DFID" setattr failed: "
2135 ll_i2sbi(inode)->ll_fsname,
2136 PFID(&lli->lli_fid), rc);
2140 unsigned int flags = 0;
2142 /* For truncate and utimes sending attributes to OSTs,
2143 * setting mtime/atime to the past will be performed
2144 * under PW [0:EOF] extent lock (new_size:EOF for
2145 * truncate). It may seem excessive to send mtime/atime
2146 * updates to OSTs when not setting times to past, but
2147 * it is necessary due to possible time
2148 * de-synchronization between MDT inode and OST objects
2150 if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
2151 xvalid |= OP_XVALID_FLAGS;
2152 flags = LUSTRE_ENCRYPT_FL;
2153 /* Call to ll_io_zero_page is not necessary if
2154 * truncating on PAGE_SIZE boundary, because
2155 * whole pages will be wiped.
2156 * In case of Direct IO, all we need is to set
2159 if (attr->ia_valid & ATTR_SIZE &&
2160 attr->ia_size & ~PAGE_MASK &&
2161 !(attr->ia_valid & ATTR_FILE &&
2162 attr->ia_file->f_flags & O_DIRECT)) {
2164 attr->ia_size & (PAGE_SIZE - 1);
2166 rc = ll_io_zero_page(inode,
2167 attr->ia_size >> PAGE_SHIFT,
2168 offset, PAGE_SIZE - offset);
2173 rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2177 /* If the file was restored, it needs to set dirty flag.
2179 * We've already sent MDS_DATA_MODIFIED flag in
2180 * ll_md_setattr() for truncate. However, the MDT refuses to
2181 * set the HS_DIRTY flag on released files, so we have to set
2182 * it again if the file has been restored. Please check how
2183 * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2185 * Please notice that if the file is not released, the previous
2186 * MDS_DATA_MODIFIED has taken effect and usually
2187 * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2188 * This way we can save an RPC for common open + trunc
2190 if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
2191 struct hsm_state_set hss = {
2192 .hss_valid = HSS_SETMASK,
2193 .hss_setmask = HS_DIRTY,
2197 rc2 = ll_hsm_state_set(inode, &hss);
2198 /* truncate and write can happen at the same time, so that
2199 * the file can be set modified even though the file is not
2200 * restored from released state, and ll_hsm_state_set() is
2201 * not applicable for the file, and rc2 < 0 is normal in this
2204 CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2205 PFID(ll_inode2fid(inode)), rc2);
2210 if (op_data != NULL)
2211 ll_finish_md_op_data(op_data);
2213 if (S_ISREG(inode->i_mode)) {
2215 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2216 inode_dio_wait(inode);
2217 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2218 * flag. ll_update_inode (called from ll_md_setattr), clears
2219 * inode flags, so there is a gap where S_NOSEC is not set.
2220 * This can cause a writer to take the i_mutex unnecessarily,
2221 * but this is safe to do and should be rare. */
2222 inode_has_no_xattr(inode);
2226 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2227 LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2228 ktime_us_delta(ktime_get(), kstart));
2233 int ll_setattr(struct dentry *de, struct iattr *attr)
2235 int mode = de->d_inode->i_mode;
2236 enum op_xvalid xvalid = 0;
2239 rc = llcrypt_prepare_setattr(de, attr);
2243 if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2244 (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2245 xvalid |= OP_XVALID_OWNEROVERRIDE;
2247 if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2248 (ATTR_SIZE|ATTR_MODE)) &&
2249 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2250 (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2251 !(attr->ia_mode & S_ISGID))))
2252 attr->ia_valid |= ATTR_FORCE;
2254 if ((attr->ia_valid & ATTR_MODE) &&
2256 !(attr->ia_mode & S_ISUID) &&
2257 !(attr->ia_valid & ATTR_KILL_SUID))
2258 attr->ia_valid |= ATTR_KILL_SUID;
2260 if ((attr->ia_valid & ATTR_MODE) &&
2261 ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2262 !(attr->ia_mode & S_ISGID) &&
2263 !(attr->ia_valid & ATTR_KILL_SGID))
2264 attr->ia_valid |= ATTR_KILL_SGID;
2266 return ll_setattr_raw(de, attr, xvalid, false);
2269 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2272 struct obd_statfs obd_osfs = { 0 };
2277 max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2279 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2280 flags |= OBD_STATFS_NODELAY;
2282 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2286 osfs->os_type = LL_SUPER_MAGIC;
2288 CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2289 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2291 if (osfs->os_state & OS_STATFS_SUM)
2294 rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2295 if (rc) /* Possibly a filesystem with no OSTs. Report MDT totals. */
2298 CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2299 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2302 osfs->os_bsize = obd_osfs.os_bsize;
2303 osfs->os_blocks = obd_osfs.os_blocks;
2304 osfs->os_bfree = obd_osfs.os_bfree;
2305 osfs->os_bavail = obd_osfs.os_bavail;
2307 /* If we have _some_ OSTs, but don't have as many free objects on the
2308 * OSTs as inodes on the MDTs, reduce the reported number of inodes
2309 * to compensate, so that the "inodes in use" number is correct.
2310 * This should be kept in sync with lod_statfs() behaviour.
2312 if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2313 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2315 osfs->os_ffree = obd_osfs.os_ffree;
2322 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2324 struct if_quotactl qctl = {
2325 .qc_cmd = LUSTRE_Q_GETQUOTA,
2326 .qc_type = PRJQUOTA,
2327 .qc_valid = QC_GENERAL,
2329 u64 limit, curblock;
2332 qctl.qc_id = ll_i2info(inode)->lli_projid;
2333 ret = quotactl_ioctl(ll_i2sbi(inode), &qctl);
2335 /* ignore errors if project ID does not have
2336 * a quota limit or feature unsupported.
2338 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2343 limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2344 qctl.qc_dqblk.dqb_bsoftlimit :
2345 qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2346 if (limit && sfs->f_blocks > limit) {
2347 curblock = (qctl.qc_dqblk.dqb_curspace +
2348 sfs->f_bsize - 1) / sfs->f_bsize;
2349 sfs->f_blocks = limit;
2350 sfs->f_bfree = sfs->f_bavail =
2351 (sfs->f_blocks > curblock) ?
2352 (sfs->f_blocks - curblock) : 0;
2355 limit = qctl.qc_dqblk.dqb_isoftlimit ?
2356 qctl.qc_dqblk.dqb_isoftlimit :
2357 qctl.qc_dqblk.dqb_ihardlimit;
2358 if (limit && sfs->f_files > limit) {
2359 sfs->f_files = limit;
2360 sfs->f_ffree = (sfs->f_files >
2361 qctl.qc_dqblk.dqb_curinodes) ?
2362 (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2368 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2370 struct super_block *sb = de->d_sb;
2371 struct obd_statfs osfs;
2372 __u64 fsid = huge_encode_dev(sb->s_dev);
2373 ktime_t kstart = ktime_get();
2376 CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2378 /* Some amount of caching on the client is allowed */
2379 rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2383 statfs_unpack(sfs, &osfs);
2385 /* We need to downshift for all 32-bit kernels, because we can't
2386 * tell if the kernel is being called via sys_statfs64() or not.
2387 * Stop before overflowing f_bsize - in which case it is better
2388 * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2389 if (sizeof(long) < 8) {
2390 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2393 osfs.os_blocks >>= 1;
2394 osfs.os_bfree >>= 1;
2395 osfs.os_bavail >>= 1;
2399 sfs->f_blocks = osfs.os_blocks;
2400 sfs->f_bfree = osfs.os_bfree;
2401 sfs->f_bavail = osfs.os_bavail;
2402 sfs->f_fsid.val[0] = (__u32)fsid;
2403 sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2404 if (ll_i2info(de->d_inode)->lli_projid)
2405 return ll_statfs_project(de->d_inode, sfs);
2407 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2408 ktime_us_delta(ktime_get(), kstart));
2413 void ll_inode_size_lock(struct inode *inode)
2415 struct ll_inode_info *lli;
2417 LASSERT(!S_ISDIR(inode->i_mode));
2419 lli = ll_i2info(inode);
2420 mutex_lock(&lli->lli_size_mutex);
2423 void ll_inode_size_unlock(struct inode *inode)
2425 struct ll_inode_info *lli;
2427 lli = ll_i2info(inode);
2428 mutex_unlock(&lli->lli_size_mutex);
2431 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
2433 /* do not clear encryption flag */
2434 ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2435 inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2436 if (ext_flags & LUSTRE_PROJINHERIT_FL)
2437 set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2439 clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2442 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2444 struct ll_inode_info *lli = ll_i2info(inode);
2445 struct mdt_body *body = md->body;
2446 struct ll_sb_info *sbi = ll_i2sbi(inode);
2449 if (body->mbo_valid & OBD_MD_FLEASIZE) {
2450 rc = cl_file_inode_init(inode, md);
2455 if (S_ISDIR(inode->i_mode)) {
2456 rc = ll_update_lsm_md(inode, md);
2461 if (body->mbo_valid & OBD_MD_FLACL)
2462 lli_replace_acl(lli, md);
2464 inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
2465 sbi->ll_flags & LL_SBI_32BIT_API);
2466 inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2468 if (body->mbo_valid & OBD_MD_FLATIME) {
2469 if (body->mbo_atime > inode->i_atime.tv_sec)
2470 inode->i_atime.tv_sec = body->mbo_atime;
2471 lli->lli_atime = body->mbo_atime;
2474 if (body->mbo_valid & OBD_MD_FLMTIME) {
2475 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2477 "setting ino %lu mtime from %lld to %llu\n",
2478 inode->i_ino, (s64)inode->i_mtime.tv_sec,
2480 inode->i_mtime.tv_sec = body->mbo_mtime;
2482 lli->lli_mtime = body->mbo_mtime;
2485 if (body->mbo_valid & OBD_MD_FLCTIME) {
2486 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2487 inode->i_ctime.tv_sec = body->mbo_ctime;
2488 lli->lli_ctime = body->mbo_ctime;
2491 if (body->mbo_valid & OBD_MD_FLBTIME)
2492 lli->lli_btime = body->mbo_btime;
2494 /* Clear i_flags to remove S_NOSEC before permissions are updated */
2495 if (body->mbo_valid & OBD_MD_FLFLAGS)
2496 ll_update_inode_flags(inode, body->mbo_flags);
2497 if (body->mbo_valid & OBD_MD_FLMODE)
2498 inode->i_mode = (inode->i_mode & S_IFMT) |
2499 (body->mbo_mode & ~S_IFMT);
2501 if (body->mbo_valid & OBD_MD_FLTYPE)
2502 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2503 (body->mbo_mode & S_IFMT);
2505 LASSERT(inode->i_mode != 0);
2506 if (body->mbo_valid & OBD_MD_FLUID)
2507 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2508 if (body->mbo_valid & OBD_MD_FLGID)
2509 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2510 if (body->mbo_valid & OBD_MD_FLPROJID)
2511 lli->lli_projid = body->mbo_projid;
2512 if (body->mbo_valid & OBD_MD_FLNLINK)
2513 set_nlink(inode, body->mbo_nlink);
2514 if (body->mbo_valid & OBD_MD_FLRDEV)
2515 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2517 if (body->mbo_valid & OBD_MD_FLID) {
2518 /* FID shouldn't be changed! */
2519 if (fid_is_sane(&lli->lli_fid)) {
2520 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2521 "Trying to change FID "DFID
2522 " to the "DFID", inode "DFID"(%p)\n",
2523 PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2524 PFID(ll_inode2fid(inode)), inode);
2526 lli->lli_fid = body->mbo_fid1;
2530 LASSERT(fid_seq(&lli->lli_fid) != 0);
2532 lli->lli_attr_valid = body->mbo_valid;
2533 if (body->mbo_valid & OBD_MD_FLSIZE) {
2534 i_size_write(inode, body->mbo_size);
2536 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2537 PFID(ll_inode2fid(inode)),
2538 (unsigned long long)body->mbo_size);
2540 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2541 inode->i_blocks = body->mbo_blocks;
2543 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2544 lli->lli_lazysize = body->mbo_size;
2545 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2546 lli->lli_lazyblocks = body->mbo_blocks;
2549 if (body->mbo_valid & OBD_MD_TSTATE) {
2550 /* Set LLIF_FILE_RESTORING if restore ongoing and
2551 * clear it when done to ensure to start again
2552 * glimpsing updated attrs
2554 if (body->mbo_t_state & MS_RESTORE)
2555 set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2557 clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2563 void ll_truncate_inode_pages_final(struct inode *inode)
2565 struct address_space *mapping = &inode->i_data;
2566 unsigned long nrpages;
2567 unsigned long flags;
2569 truncate_inode_pages_final(mapping);
2571 /* Workaround for LU-118: Note nrpages may not be totally updated when
2572 * truncate_inode_pages() returns, as there can be a page in the process
2573 * of deletion (inside __delete_from_page_cache()) in the specified
2574 * range. Thus mapping->nrpages can be non-zero when this function
2575 * returns even after truncation of the whole mapping. Only do this if
2576 * npages isn't already zero.
2578 nrpages = mapping->nrpages;
2580 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2581 nrpages = mapping->nrpages;
2582 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2583 } /* Workaround end */
2585 LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2586 "see https://jira.whamcloud.com/browse/LU-118\n",
2587 ll_i2sbi(inode)->ll_fsname,
2588 PFID(ll_inode2fid(inode)), inode, nrpages);
2591 int ll_read_inode2(struct inode *inode, void *opaque)
2593 struct lustre_md *md = opaque;
2594 struct ll_inode_info *lli = ll_i2info(inode);
2598 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2599 PFID(&lli->lli_fid), inode);
2601 /* Core attributes from the MDS first. This is a new inode, and
2602 * the VFS doesn't zero times in the core inode so we have to do
2603 * it ourselves. They will be overwritten by either MDS or OST
2604 * attributes - we just need to make sure they aren't newer.
2606 inode->i_mtime.tv_sec = 0;
2607 inode->i_atime.tv_sec = 0;
2608 inode->i_ctime.tv_sec = 0;
2610 rc = ll_update_inode(inode, md);
2614 /* OIDEBUG(inode); */
2616 #ifdef HAVE_BACKING_DEV_INFO
2617 /* initializing backing dev info. */
2618 inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2620 if (S_ISREG(inode->i_mode)) {
2621 struct ll_sb_info *sbi = ll_i2sbi(inode);
2622 inode->i_op = &ll_file_inode_operations;
2623 inode->i_fop = sbi->ll_fop;
2624 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2626 } else if (S_ISDIR(inode->i_mode)) {
2627 inode->i_op = &ll_dir_inode_operations;
2628 inode->i_fop = &ll_dir_operations;
2630 } else if (S_ISLNK(inode->i_mode)) {
2631 inode->i_op = &ll_fast_symlink_inode_operations;
2634 inode->i_op = &ll_special_inode_operations;
2636 init_special_inode(inode, inode->i_mode,
2645 void ll_delete_inode(struct inode *inode)
2647 struct ll_inode_info *lli = ll_i2info(inode);
2650 if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2651 /* It is last chance to write out dirty pages,
2652 * otherwise we may lose data while umount.
2654 * If i_nlink is 0 then just discard data. This is safe because
2655 * local inode gets i_nlink 0 from server only for the last
2656 * unlink, so that file is not opened somewhere else
2658 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2659 CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2662 ll_truncate_inode_pages_final(inode);
2663 ll_clear_inode(inode);
2669 int ll_iocontrol(struct inode *inode, struct file *file,
2670 unsigned int cmd, unsigned long arg)
2672 struct ll_sb_info *sbi = ll_i2sbi(inode);
2673 struct ptlrpc_request *req = NULL;
2678 case FS_IOC_GETFLAGS: {
2679 struct mdt_body *body;
2680 struct md_op_data *op_data;
2682 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2683 0, 0, LUSTRE_OPC_ANY,
2685 if (IS_ERR(op_data))
2686 RETURN(PTR_ERR(op_data));
2688 op_data->op_valid = OBD_MD_FLFLAGS;
2689 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2690 ll_finish_md_op_data(op_data);
2692 CERROR("%s: failure inode "DFID": rc = %d\n",
2693 sbi->ll_md_exp->exp_obd->obd_name,
2694 PFID(ll_inode2fid(inode)), rc);
2698 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2700 flags = body->mbo_flags;
2702 ptlrpc_req_finished(req);
2704 RETURN(put_user(flags, (int __user *)arg));
2706 case FS_IOC_SETFLAGS: {
2708 struct md_op_data *op_data;
2709 struct cl_object *obj;
2710 struct fsxattr fa = { 0 };
2712 if (get_user(flags, (int __user *)arg))
2715 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2716 if (flags & LUSTRE_PROJINHERIT_FL)
2717 fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2719 rc = ll_ioctl_check_project(inode, fa.fsx_xflags,
2724 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2725 LUSTRE_OPC_ANY, NULL);
2726 if (IS_ERR(op_data))
2727 RETURN(PTR_ERR(op_data));
2729 op_data->op_attr_flags = flags;
2730 op_data->op_xvalid |= OP_XVALID_FLAGS;
2731 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2732 ll_finish_md_op_data(op_data);
2733 ptlrpc_req_finished(req);
2737 ll_update_inode_flags(inode, flags);
2739 obj = ll_i2info(inode)->lli_clob;
2743 OBD_ALLOC_PTR(attr);
2747 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2759 int ll_flush_ctx(struct inode *inode)
2761 struct ll_sb_info *sbi = ll_i2sbi(inode);
2763 CDEBUG(D_SEC, "flush context for user %d\n",
2764 from_kuid(&init_user_ns, current_uid()));
2766 obd_set_info_async(NULL, sbi->ll_md_exp,
2767 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2769 obd_set_info_async(NULL, sbi->ll_dt_exp,
2770 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2775 /* umount -f client means force down, don't save state */
2776 void ll_umount_begin(struct super_block *sb)
2778 struct ll_sb_info *sbi = ll_s2sbi(sb);
2779 struct obd_device *obd;
2780 struct obd_ioctl_data *ioc_data;
2784 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2785 sb->s_count, atomic_read(&sb->s_active));
2787 obd = class_exp2obd(sbi->ll_md_exp);
2789 CERROR("Invalid MDC connection handle %#llx\n",
2790 sbi->ll_md_exp->exp_handle.h_cookie);
2796 obd = class_exp2obd(sbi->ll_dt_exp);
2798 CERROR("Invalid LOV connection handle %#llx\n",
2799 sbi->ll_dt_exp->exp_handle.h_cookie);
2805 OBD_ALLOC_PTR(ioc_data);
2807 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2808 sizeof *ioc_data, ioc_data, NULL);
2810 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2811 sizeof *ioc_data, ioc_data, NULL);
2813 OBD_FREE_PTR(ioc_data);
2816 /* Really, we'd like to wait until there are no requests outstanding,
2817 * and then continue. For now, we just periodically checking for vfs
2818 * to decrement mnt_cnt and hope to finish it within 10sec.
2822 !may_umount(sbi->ll_mnt.mnt)) {
2830 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2832 struct ll_sb_info *sbi = ll_s2sbi(sb);
2833 char *profilenm = get_profile_name(sb);
2837 if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2838 read_only = *flags & MS_RDONLY;
2839 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2840 sizeof(KEY_READ_ONLY),
2841 KEY_READ_ONLY, sizeof(read_only),
2844 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2845 profilenm, read_only ?
2846 "read-only" : "read-write", err);
2851 sb->s_flags |= SB_RDONLY;
2853 sb->s_flags &= ~SB_RDONLY;
2855 if (sbi->ll_flags & LL_SBI_VERBOSE)
2856 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2857 read_only ? "read-only" : "read-write");
2863 * Cleanup the open handle that is cached on MDT-side.
2865 * For open case, the client side open handling thread may hit error
2866 * after the MDT grant the open. Under such case, the client should
2867 * send close RPC to the MDT as cleanup; otherwise, the open handle
2868 * on the MDT will be leaked there until the client umount or evicted.
2870 * In further, if someone unlinked the file, because the open handle
2871 * holds the reference on such file/object, then it will block the
2872 * subsequent threads that want to locate such object via FID.
2874 * \param[in] sb super block for this file-system
2875 * \param[in] open_req pointer to the original open request
2877 void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill)
2879 struct mdt_body *body;
2880 struct md_op_data *op_data;
2881 struct ptlrpc_request *close_req = NULL;
2882 struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
2885 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
2886 OBD_ALLOC_PTR(op_data);
2887 if (op_data == NULL) {
2888 CWARN("%s: cannot allocate op_data to release open handle for "
2889 DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2894 op_data->op_fid1 = body->mbo_fid1;
2895 op_data->op_open_handle = body->mbo_open_handle;
2896 op_data->op_mod_time = ktime_get_real_seconds();
2897 md_close(exp, op_data, NULL, &close_req);
2898 ptlrpc_req_finished(close_req);
2899 ll_finish_md_op_data(op_data);
2904 int ll_prep_inode(struct inode **inode, struct req_capsule *pill,
2905 struct super_block *sb, struct lookup_intent *it)
2907 struct ll_sb_info *sbi = NULL;
2908 struct lustre_md md = { NULL };
2909 bool default_lmv_deleted = false;
2914 LASSERT(*inode || sb);
2915 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2916 rc = md_get_lustre_md(sbi->ll_md_exp, pill, sbi->ll_dt_exp,
2917 sbi->ll_md_exp, &md);
2922 * clear default_lmv only if intent_getattr reply doesn't contain it.
2923 * but it needs to be done after iget, check this early because
2924 * ll_update_lsm_md() may change md.
2926 if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2927 S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2928 default_lmv_deleted = true;
2931 rc = ll_update_inode(*inode, &md);
2935 LASSERT(sb != NULL);
2938 * At this point server returns to client's same fid as client
2939 * generated for creating. So using ->fid1 is okay here.
2941 if (!fid_is_sane(&md.body->mbo_fid1)) {
2942 CERROR("%s: Fid is insane "DFID"\n",
2944 PFID(&md.body->mbo_fid1));
2945 GOTO(out, rc = -EINVAL);
2948 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2949 sbi->ll_flags & LL_SBI_32BIT_API),
2951 if (IS_ERR(*inode)) {
2953 rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2955 CERROR("new_inode -fatal: rc %d\n", rc);
2960 /* Handling piggyback layout lock.
2961 * Layout lock can be piggybacked by getattr and open request.
2962 * The lsm can be applied to inode only if it comes with a layout lock
2963 * otherwise correct layout may be overwritten, for example:
2964 * 1. proc1: mdt returns a lsm but not granting layout
2965 * 2. layout was changed by another client
2966 * 3. proc2: refresh layout and layout lock granted
2967 * 4. proc1: to apply a stale layout */
2968 if (it != NULL && it->it_lock_mode != 0) {
2969 struct lustre_handle lockh;
2970 struct ldlm_lock *lock;
2972 lockh.cookie = it->it_lock_handle;
2973 lock = ldlm_handle2lock(&lockh);
2974 LASSERT(lock != NULL);
2975 if (ldlm_has_layout(lock)) {
2976 struct cl_object_conf conf;
2978 memset(&conf, 0, sizeof(conf));
2979 conf.coc_opc = OBJECT_CONF_SET;
2980 conf.coc_inode = *inode;
2981 conf.coc_lock = lock;
2982 conf.u.coc_layout = md.layout;
2983 (void)ll_layout_conf(*inode, &conf);
2985 LDLM_LOCK_PUT(lock);
2988 if (default_lmv_deleted)
2989 ll_update_default_lsm_md(*inode, &md);
2991 /* we may want to apply some policy for foreign file/dir */
2992 if (ll_sbi_has_foreign_symlink(sbi)) {
2993 rc = ll_manage_foreign(*inode, &md);
3001 /* cleanup will be done if necessary */
3002 md_free_lustre_md(sbi->ll_md_exp, &md);
3004 if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
3005 ll_intent_drop_lock(it);
3006 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, pill);
3012 int ll_obd_statfs(struct inode *inode, void __user *arg)
3014 struct ll_sb_info *sbi = NULL;
3015 struct obd_export *exp;
3016 struct obd_ioctl_data *data = NULL;
3021 sbi = ll_i2sbi(inode);
3023 GOTO(out_statfs, rc = -EINVAL);
3025 rc = obd_ioctl_getdata(&data, &len, arg);
3027 GOTO(out_statfs, rc);
3029 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3030 !data->ioc_pbuf1 || !data->ioc_pbuf2)
3031 GOTO(out_statfs, rc = -EINVAL);
3033 if (data->ioc_inllen1 != sizeof(__u32) ||
3034 data->ioc_inllen2 != sizeof(__u32) ||
3035 data->ioc_plen1 != sizeof(struct obd_statfs) ||
3036 data->ioc_plen2 != sizeof(struct obd_uuid))
3037 GOTO(out_statfs, rc = -EINVAL);
3039 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3040 if (type & LL_STATFS_LMV)
3041 exp = sbi->ll_md_exp;
3042 else if (type & LL_STATFS_LOV)
3043 exp = sbi->ll_dt_exp;
3045 GOTO(out_statfs, rc = -ENODEV);
3047 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3049 GOTO(out_statfs, rc);
3051 OBD_FREE_LARGE(data, len);
3056 * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3057 * be called early to avoid deadlock.
3059 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3061 if (op_data->op_mea2_sem) {
3062 up_read_non_owner(op_data->op_mea2_sem);
3063 op_data->op_mea2_sem = NULL;
3066 if (op_data->op_mea1_sem) {
3067 up_read_non_owner(op_data->op_mea1_sem);
3068 op_data->op_mea1_sem = NULL;
3072 /* this function prepares md_op_data hint for passing it down to MD stack. */
3073 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3074 struct inode *i1, struct inode *i2,
3075 const char *name, size_t namelen,
3076 __u32 mode, enum md_op_code opc,
3079 LASSERT(i1 != NULL);
3082 /* Do not reuse namelen for something else. */
3084 return ERR_PTR(-EINVAL);
3086 if (namelen > ll_i2sbi(i1)->ll_namelen)
3087 return ERR_PTR(-ENAMETOOLONG);
3089 /* "/" is not valid name, but it's allowed */
3090 if (!lu_name_is_valid_2(name, namelen) &&
3091 strncmp("/", name, namelen) != 0)
3092 return ERR_PTR(-EINVAL);
3095 if (op_data == NULL)
3096 OBD_ALLOC_PTR(op_data);
3098 if (op_data == NULL)
3099 return ERR_PTR(-ENOMEM);
3101 ll_i2gids(op_data->op_suppgids, i1, i2);
3102 op_data->op_fid1 = *ll_inode2fid(i1);
3103 op_data->op_code = opc;
3105 if (S_ISDIR(i1->i_mode)) {
3106 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3107 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3108 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3109 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3113 op_data->op_fid2 = *ll_inode2fid(i2);
3114 if (S_ISDIR(i2->i_mode)) {
3116 /* i2 is typically a child of i1, and MUST be
3117 * further from the root to avoid deadlocks.
3119 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3120 op_data->op_mea2_sem =
3121 &ll_i2info(i2)->lli_lsm_sem;
3123 op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3126 fid_zero(&op_data->op_fid2);
3129 if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
3130 op_data->op_cli_flags |= CLI_HASH64;
3132 if (ll_need_32bit_api(ll_i2sbi(i1)))
3133 op_data->op_cli_flags |= CLI_API32;
3135 op_data->op_name = name;
3136 op_data->op_namelen = namelen;
3137 op_data->op_mode = mode;
3138 op_data->op_mod_time = ktime_get_real_seconds();
3139 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3140 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3141 op_data->op_cap = cfs_curproc_cap_pack();
3142 op_data->op_mds = 0;
3143 if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3144 filename_is_volatile(name, namelen, &op_data->op_mds)) {
3145 op_data->op_bias |= MDS_CREATE_VOLATILE;
3147 op_data->op_data = data;
3152 void ll_finish_md_op_data(struct md_op_data *op_data)
3154 ll_unlock_md_op_lsm(op_data);
3155 ll_security_release_secctx(op_data->op_file_secctx,
3156 op_data->op_file_secctx_size);
3157 llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3158 OBD_FREE_PTR(op_data);
3161 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3163 struct ll_sb_info *sbi;
3165 LASSERT(seq && dentry);
3166 sbi = ll_s2sbi(dentry->d_sb);
3168 if (sbi->ll_flags & LL_SBI_NOLCK)
3169 seq_puts(seq, ",nolock");
3171 /* "flock" is the default since 2.13, but it wasn't for many years,
3172 * so it is still useful to print this to show it is enabled.
3173 * Start to print "noflock" so it is now clear when flock is disabled.
3175 if (sbi->ll_flags & LL_SBI_FLOCK)
3176 seq_puts(seq, ",flock");
3177 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
3178 seq_puts(seq, ",localflock");
3180 seq_puts(seq, ",noflock");
3182 if (sbi->ll_flags & LL_SBI_USER_XATTR)
3183 seq_puts(seq, ",user_xattr");
3185 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
3186 seq_puts(seq, ",lazystatfs");
3188 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
3189 seq_puts(seq, ",user_fid2path");
3191 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
3192 seq_puts(seq, ",always_ping");
3194 if (ll_sbi_has_test_dummy_encryption(sbi))
3195 seq_puts(seq, ",test_dummy_encryption");
3197 if (ll_sbi_has_encrypt(sbi))
3198 seq_puts(seq, ",encrypt");
3200 seq_puts(seq, ",noencrypt");
3202 if (sbi->ll_flags & LL_SBI_FOREIGN_SYMLINK) {
3203 seq_puts(seq, ",foreign_symlink=");
3204 seq_puts(seq, sbi->ll_foreign_symlink_prefix);
3211 * Get obd name by cmd, and copy out to user space
3213 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3215 struct ll_sb_info *sbi = ll_i2sbi(inode);
3216 struct obd_device *obd;
3219 if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
3220 obd = class_exp2obd(sbi->ll_dt_exp);
3221 else if (cmd == OBD_IOC_GETMDNAME)
3222 obd = class_exp2obd(sbi->ll_md_exp);
3229 if (copy_to_user((void __user *)arg, obd->obd_name,
3230 strlen(obd->obd_name) + 1))
3236 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3243 p.mnt = current->fs->root.mnt;
3245 path = d_path(&p, buf, bufsize);
3250 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3252 char *buf, *path = NULL;
3253 struct dentry *dentry = NULL;
3254 struct inode *inode = page->mapping->host;
3256 /* this can be called inside spin lock so use GFP_ATOMIC. */
3257 buf = (char *)__get_free_page(GFP_ATOMIC);
3259 dentry = d_find_alias(page->mapping->host);
3261 path = ll_d_path(dentry, buf, PAGE_SIZE);
3264 /* The below message is checked in recovery-small.sh test_24b */
3266 "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3267 "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3268 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3269 PFID(ll_inode2fid(inode)),
3270 (path && !IS_ERR(path)) ? path : "", ioret);
3276 free_page((unsigned long)buf);
3279 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3280 struct lov_user_md **kbuf)
3282 struct lov_user_md lum;
3286 if (copy_from_user(&lum, md, sizeof(lum)))
3289 lum_size = ll_lov_user_md_size(&lum);
3293 OBD_ALLOC_LARGE(*kbuf, lum_size);
3297 if (copy_from_user(*kbuf, md, lum_size) != 0) {
3298 OBD_FREE_LARGE(*kbuf, lum_size);
3306 * Compute llite root squash state after a change of root squash
3307 * configuration setting or add/remove of a lnet nid
3309 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3311 struct root_squash_info *squash = &sbi->ll_squash;
3314 struct lnet_process_id id;
3316 /* Update norootsquash flag */
3317 spin_lock(&squash->rsi_lock);
3318 if (list_empty(&squash->rsi_nosquash_nids))
3319 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3321 /* Do not apply root squash as soon as one of our NIDs is
3322 * in the nosquash_nids list */
3325 while (LNetGetId(i++, &id) != -ENOENT) {
3326 if (id.nid == LNET_NID_LO_0)
3328 if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3334 sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
3336 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3338 spin_unlock(&squash->rsi_lock);
3342 * Parse linkea content to extract information about a given hardlink
3344 * \param[in] ldata - Initialized linkea data
3345 * \param[in] linkno - Link identifier
3346 * \param[out] parent_fid - The entry's parent FID
3347 * \param[out] ln - Entry name destination buffer
3349 * \retval 0 on success
3350 * \retval Appropriate negative error code on failure
3352 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3353 struct lu_fid *parent_fid, struct lu_name *ln)
3359 rc = linkea_init_with_rec(ldata);
3363 if (linkno >= ldata->ld_leh->leh_reccount)
3364 /* beyond last link */
3367 linkea_first_entry(ldata);
3368 for (idx = 0; ldata->ld_lee != NULL; idx++) {
3369 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3374 linkea_next_entry(ldata);
3384 * Get parent FID and name of an identified link. Operation is performed for
3385 * a given link number, letting the caller iterate over linkno to list one or
3386 * all links of an entry.
3388 * \param[in] file - File descriptor against which to perform the operation
3389 * \param[in,out] arg - User-filled structure containing the linkno to operate
3390 * on and the available size. It is eventually filled with
3391 * the requested information or left untouched on error
3393 * \retval - 0 on success
3394 * \retval - Appropriate negative error code on failure
3396 int ll_getparent(struct file *file, struct getparent __user *arg)
3398 struct inode *inode = file_inode(file);
3399 struct linkea_data *ldata;
3400 struct lu_buf buf = LU_BUF_NULL;
3402 struct lu_fid parent_fid;
3409 if (!capable(CAP_DAC_READ_SEARCH) &&
3410 !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
3413 if (get_user(name_size, &arg->gp_name_size))
3416 if (get_user(linkno, &arg->gp_linkno))
3419 if (name_size > PATH_MAX)
3422 OBD_ALLOC(ldata, sizeof(*ldata));
3426 rc = linkea_data_new(ldata, &buf);
3428 GOTO(ldata_free, rc);
3430 rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3431 buf.lb_len, OBD_MD_FLXATTR);
3435 rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3439 if (ln.ln_namelen >= name_size)
3440 GOTO(lb_free, rc = -EOVERFLOW);
3442 if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3443 GOTO(lb_free, rc = -EFAULT);
3445 if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3446 GOTO(lb_free, rc = -EFAULT);
3448 if (put_user('\0', arg->gp_name + ln.ln_namelen))
3449 GOTO(lb_free, rc = -EFAULT);
3454 OBD_FREE(ldata, sizeof(*ldata));