4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/llite/llite_lib.c
38 * Lustre Light Super operations
41 #define DEBUG_SUBSYSTEM S_LLITE
43 #include <linux/module.h>
44 #include <linux/types.h>
45 #include <linux/version.h>
48 #include <lustre_lite.h>
49 #include <lustre_ha.h>
50 #include <lustre_dlm.h>
51 #include <lprocfs_status.h>
52 #include <lustre_disk.h>
53 #include <lustre_param.h>
54 #include <lustre_log.h>
55 #include <cl_object.h>
56 #include <obd_cksum.h>
57 #include "llite_internal.h"
59 cfs_mem_cache_t *ll_file_data_slab;
61 CFS_LIST_HEAD(ll_super_blocks);
62 DEFINE_SPINLOCK(ll_sb_lock);
64 #ifndef MS_HAS_NEW_AOPS
65 extern struct address_space_operations ll_aops;
67 extern struct address_space_operations_ext ll_aops;
71 #define log2(n) ffz(~(n))
74 static struct ll_sb_info *ll_init_sbi(void)
76 struct ll_sb_info *sbi = NULL;
78 unsigned long lru_page_max;
84 OBD_ALLOC(sbi, sizeof(*sbi));
88 spin_lock_init(&sbi->ll_lock);
89 mutex_init(&sbi->ll_lco.lco_lock);
90 spin_lock_init(&sbi->ll_pp_extent_lock);
91 spin_lock_init(&sbi->ll_process_lock);
92 sbi->ll_rw_stats_on = 0;
95 pages = si.totalram - si.totalhigh;
96 if (pages >> (20 - CFS_PAGE_SHIFT) < 512) {
97 lru_page_max = pages / 2;
99 lru_page_max = (pages / 4) * 3;
102 /* initialize lru data */
103 cfs_atomic_set(&sbi->ll_cache.ccc_users, 0);
104 sbi->ll_cache.ccc_lru_max = lru_page_max;
105 cfs_atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
106 spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
107 CFS_INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
109 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
110 SBI_DEFAULT_READAHEAD_MAX);
111 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
112 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
113 SBI_DEFAULT_READAHEAD_WHOLE_MAX;
114 CFS_INIT_LIST_HEAD(&sbi->ll_conn_chain);
115 CFS_INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
117 ll_generate_random_uuid(uuid);
118 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
119 CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
121 spin_lock(&ll_sb_lock);
122 cfs_list_add_tail(&sbi->ll_list, &ll_super_blocks);
123 spin_unlock(&ll_sb_lock);
125 sbi->ll_flags |= LL_SBI_VERBOSE;
126 #ifdef ENABLE_CHECKSUM
127 sbi->ll_flags |= LL_SBI_CHECKSUM;
130 #ifdef HAVE_LRU_RESIZE_SUPPORT
131 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
134 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
135 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
137 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
141 /* metadata statahead is enabled by default */
142 sbi->ll_sa_max = LL_SA_RPC_DEF;
143 cfs_atomic_set(&sbi->ll_sa_total, 0);
144 cfs_atomic_set(&sbi->ll_sa_wrong, 0);
145 cfs_atomic_set(&sbi->ll_agl_total, 0);
146 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
151 void ll_free_sbi(struct super_block *sb)
153 struct ll_sb_info *sbi = ll_s2sbi(sb);
157 spin_lock(&ll_sb_lock);
158 cfs_list_del(&sbi->ll_list);
159 spin_unlock(&ll_sb_lock);
160 OBD_FREE(sbi, sizeof(*sbi));
165 static struct dentry_operations ll_d_root_ops = {
166 .d_compare = ll_dcompare,
167 .d_revalidate = ll_revalidate_nd,
170 static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
171 struct vfsmount *mnt)
173 struct inode *root = 0;
174 struct ll_sb_info *sbi = ll_s2sbi(sb);
175 struct obd_device *obd;
176 struct obd_capa *oc = NULL;
177 struct obd_statfs *osfs = NULL;
178 struct ptlrpc_request *request = NULL;
179 struct obd_connect_data *data = NULL;
180 struct obd_uuid *uuid;
181 struct md_op_data *op_data;
182 struct lustre_md lmd;
184 int size, err, checksum;
187 obd = class_name2obd(md);
189 CERROR("MD %s: not setup or attached\n", md);
203 if (proc_lustre_fs_root) {
204 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
207 CERROR("could not register mount in /proc/fs/lustre\n");
210 /* indicate the features supported by this client */
211 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
212 OBD_CONNECT_ATTRFID |
213 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
214 OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
215 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
216 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
217 OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR |
218 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
219 OBD_CONNECT_EINPROGRESS |
220 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
221 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
223 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
224 data->ocd_connect_flags |= OBD_CONNECT_SOM;
226 #ifdef HAVE_LRU_RESIZE_SUPPORT
227 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
228 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
230 #ifdef CONFIG_FS_POSIX_ACL
231 data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
234 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
235 /* flag mdc connection as lightweight, only used for test
236 * purpose, use with care */
237 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
239 data->ocd_ibits_known = MDS_INODELOCK_FULL;
240 data->ocd_version = LUSTRE_VERSION_CODE;
242 if (sb->s_flags & MS_RDONLY)
243 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
244 if (sbi->ll_flags & LL_SBI_USER_XATTR)
245 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
247 #ifdef HAVE_MS_FLOCK_LOCK
248 /* force vfs to use lustre handler for flock() calls - bug 10743 */
249 sb->s_flags |= MS_FLOCK_LOCK;
251 #ifdef MS_HAS_NEW_AOPS
252 sb->s_flags |= MS_HAS_NEW_AOPS;
255 if (sbi->ll_flags & LL_SBI_FLOCK)
256 sbi->ll_fop = &ll_file_operations_flock;
257 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
258 sbi->ll_fop = &ll_file_operations;
260 sbi->ll_fop = &ll_file_operations_noflock;
263 data->ocd_connect_flags |= OBD_CONNECT_REAL;
264 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
265 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
267 data->ocd_brw_size = MD_MAX_BRW_SIZE;
269 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, data, NULL);
271 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
272 "recovery, of which this client is not a "
273 "part. Please wait for recovery to complete,"
274 " abort, or time out.\n", md);
277 CERROR("cannot connect to %s: rc = %d\n", md, err);
281 sbi->ll_md_exp->exp_connect_data = *data;
283 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
284 LUSTRE_SEQ_METADATA);
286 CERROR("%s: Can't init metadata layer FID infrastructure, "
287 "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
291 /* For mount, we only need fs info from MDT0, and also in DNE, it
292 * can make sure the client can be mounted as long as MDT0 is
294 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
295 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
296 OBD_STATFS_FOR_MDT0);
298 GOTO(out_md_fid, err);
300 /* This needs to be after statfs to ensure connect has finished.
301 * Note that "data" does NOT contain the valid connect reply.
302 * If connecting to a 1.8 server there will be no LMV device, so
303 * we can access the MDC export directly and exp_connect_flags will
304 * be non-zero, but if accessing an upgraded 2.1 server it will
305 * have the correct flags filled in.
306 * XXX: fill in the LMV exp_connect_flags from MDC(s). */
307 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
308 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
309 valid != CLIENT_CONNECT_MDT_REQD) {
312 OBD_ALLOC_WAIT(buf, CFS_PAGE_SIZE);
313 obd_connect_flags2str(buf, CFS_PAGE_SIZE,
314 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
315 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
316 "feature(s) needed for correct operation "
317 "of this client (%s). Please upgrade "
318 "server or downgrade client.\n",
319 sbi->ll_md_exp->exp_obd->obd_name, buf);
320 OBD_FREE(buf, CFS_PAGE_SIZE);
321 GOTO(out_md_fid, err = -EPROTO);
324 size = sizeof(*data);
325 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
326 KEY_CONN_DATA, &size, data, NULL);
328 CERROR("%s: Get connect data failed: rc = %d\n",
329 sbi->ll_md_exp->exp_obd->obd_name, err);
330 GOTO(out_md_fid, err);
333 LASSERT(osfs->os_bsize);
334 sb->s_blocksize = osfs->os_bsize;
335 sb->s_blocksize_bits = log2(osfs->os_bsize);
336 sb->s_magic = LL_SUPER_MAGIC;
337 sb->s_maxbytes = MAX_LFS_FILESIZE;
338 sbi->ll_namelen = osfs->os_namelen;
339 sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
341 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
342 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
343 LCONSOLE_INFO("Disabling user_xattr feature because "
344 "it is not supported on the server\n");
345 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
348 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
350 sb->s_flags |= MS_POSIXACL;
352 sbi->ll_flags |= LL_SBI_ACL;
354 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
356 sb->s_flags &= ~MS_POSIXACL;
358 sbi->ll_flags &= ~LL_SBI_ACL;
361 if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
362 if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
363 sbi->ll_flags |= LL_SBI_RMT_CLIENT;
364 LCONSOLE_INFO("client is set as remote by default.\n");
367 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
368 sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
369 LCONSOLE_INFO("client claims to be remote, but server "
370 "rejected, forced to be local.\n");
374 if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
375 LCONSOLE_INFO("client enabled MDS capability!\n");
376 sbi->ll_flags |= LL_SBI_MDS_CAPA;
379 if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
380 LCONSOLE_INFO("client enabled OSS capability!\n");
381 sbi->ll_flags |= LL_SBI_OSS_CAPA;
384 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
385 sbi->ll_flags |= LL_SBI_64BIT_HASH;
387 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
388 sbi->ll_md_brw_size = data->ocd_brw_size;
390 sbi->ll_md_brw_size = CFS_PAGE_SIZE;
392 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
393 LCONSOLE_INFO("Layout lock feature supported.\n");
394 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
397 obd = class_name2obd(dt);
399 CERROR("DT %s: not setup or attached\n", dt);
400 GOTO(out_md_fid, err = -ENODEV);
403 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
404 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
405 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
406 OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
407 OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
408 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
409 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
410 OBD_CONNECT_MAXBYTES |
411 OBD_CONNECT_EINPROGRESS |
412 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
413 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
415 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
416 data->ocd_connect_flags |= OBD_CONNECT_SOM;
418 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
419 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
420 * disabled by default, because it can still be enabled on the
421 * fly via /proc. As a consequence, we still need to come to an
422 * agreement on the supported algorithms at connect time */
423 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
425 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
426 data->ocd_cksum_types = OBD_CKSUM_ADLER;
428 data->ocd_cksum_types = cksum_types_supported_client();
431 #ifdef HAVE_LRU_RESIZE_SUPPORT
432 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
434 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
435 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
437 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d "
438 "ocd_grant: %d\n", data->ocd_connect_flags,
439 data->ocd_version, data->ocd_grant);
441 obd->obd_upcall.onu_owner = &sbi->ll_lco;
442 obd->obd_upcall.onu_upcall = cl_ocd_update;
444 data->ocd_brw_size = DT_MAX_BRW_SIZE;
446 err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
449 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
450 "recovery, of which this client is not a "
451 "part. Please wait for recovery to "
452 "complete, abort, or time out.\n", dt);
455 CERROR("%s: Cannot connect to %s: rc = %d\n",
456 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
460 sbi->ll_dt_exp->exp_connect_data = *data;
462 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
463 LUSTRE_SEQ_METADATA);
465 CERROR("%s: Can't init data layer FID infrastructure, "
466 "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
470 mutex_lock(&sbi->ll_lco.lco_lock);
471 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
472 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
473 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
474 mutex_unlock(&sbi->ll_lco.lco_lock);
476 fid_zero(&sbi->ll_root_fid);
477 err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
479 CERROR("cannot mds_connect: rc = %d\n", err);
480 GOTO(out_lock_cn_cb, err);
482 if (!fid_is_sane(&sbi->ll_root_fid)) {
483 CERROR("%s: Invalid root fid "DFID" during mount\n",
484 sbi->ll_md_exp->exp_obd->obd_name,
485 PFID(&sbi->ll_root_fid));
486 GOTO(out_lock_cn_cb, err = -EINVAL);
488 CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
490 sb->s_op = &lustre_super_operations;
491 #if THREAD_SIZE >= 8192 /*b=17630*/
492 sb->s_export_op = &lustre_export_operations;
496 * XXX: move this to after cbd setup? */
497 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
498 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
499 valid |= OBD_MD_FLRMTPERM;
500 else if (sbi->ll_flags & LL_SBI_ACL)
501 valid |= OBD_MD_FLACL;
503 OBD_ALLOC_PTR(op_data);
505 GOTO(out_lock_cn_cb, err = -ENOMEM);
507 op_data->op_fid1 = sbi->ll_root_fid;
508 op_data->op_mode = 0;
509 op_data->op_capa1 = oc;
510 op_data->op_valid = valid;
512 err = md_getattr(sbi->ll_md_exp, op_data, &request);
515 OBD_FREE_PTR(op_data);
517 CERROR("%s: md_getattr failed for root: rc = %d\n",
518 sbi->ll_md_exp->exp_obd->obd_name, err);
519 GOTO(out_lock_cn_cb, err);
522 err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
523 sbi->ll_md_exp, &lmd);
525 CERROR("failed to understand root inode md: rc = %d\n", err);
526 ptlrpc_req_finished(request);
527 GOTO(out_lock_cn_cb, err);
530 LASSERT(fid_is_sane(&sbi->ll_root_fid));
531 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
532 ll_need_32bit_api(sbi)),
534 md_free_lustre_md(sbi->ll_md_exp, &lmd);
535 ptlrpc_req_finished(request);
537 if (root == NULL || IS_ERR(root)) {
539 obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
540 #ifdef CONFIG_FS_POSIX_ACL
542 posix_acl_release(lmd.posix_acl);
543 lmd.posix_acl = NULL;
546 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
548 CERROR("lustre_lite: bad iget4 for root\n");
552 err = ll_close_thread_start(&sbi->ll_lcq);
554 CERROR("cannot start close thread: rc %d\n", err);
558 #ifdef CONFIG_FS_POSIX_ACL
559 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
560 rct_init(&sbi->ll_rct);
561 et_init(&sbi->ll_et);
565 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
566 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
567 KEY_CHECKSUM, sizeof(checksum), &checksum,
571 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
572 KEY_CACHE_SET, sizeof(sbi->ll_cache),
573 &sbi->ll_cache, NULL);
575 sb->s_root = d_make_root(root);
576 if (sb->s_root == NULL) {
577 CERROR("%s: can't make root dentry\n",
578 ll_get_fsname(sb, NULL, 0));
579 GOTO(out_root, err = -ENOMEM);
582 #ifdef HAVE_DCACHE_LOCK
583 sb->s_root->d_op = &ll_d_root_ops;
585 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
586 d_set_d_op(sb->s_root, &ll_d_root_ops);
587 sb->s_d_op = &ll_d_ops;
590 sbi->ll_sdev_orig = sb->s_dev;
592 /* We set sb->s_dev equal on all lustre clients in order to support
593 * NFS export clustering. NFSD requires that the FSID be the same
595 /* s_dev is also used in lt_compare() to compare two fs, but that is
596 * only a node-local comparison. */
597 uuid = obd_get_uuid(sbi->ll_md_exp);
599 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
611 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
613 obd_disconnect(sbi->ll_dt_exp);
614 sbi->ll_dt_exp = NULL;
615 /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
616 obd_zombie_barrier();
618 obd_fid_fini(sbi->ll_md_exp->exp_obd);
620 obd_disconnect(sbi->ll_md_exp);
621 sbi->ll_md_exp = NULL;
627 lprocfs_unregister_mountpoint(sbi);
631 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
635 *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
637 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
638 KEY_MAX_EASIZE, &size, lmmsize, NULL);
640 CERROR("Get max mdsize error rc %d \n", rc);
645 void ll_dump_inode(struct inode *inode)
647 struct ll_d_hlist_node *tmp;
648 int dentry_count = 0;
650 LASSERT(inode != NULL);
652 ll_d_hlist_for_each(tmp, &inode->i_dentry)
655 CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
656 inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
657 inode->i_mode, atomic_read(&inode->i_count), dentry_count);
660 void lustre_dump_dentry(struct dentry *dentry, int recur)
662 struct list_head *tmp;
665 LASSERT(dentry != NULL);
667 list_for_each(tmp, &dentry->d_subdirs)
670 CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
671 " flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
672 dentry->d_name.len, dentry->d_name.name,
673 dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
674 dentry->d_parent, dentry->d_inode, d_refcount(dentry),
675 dentry->d_flags, dentry->d_fsdata, subdirs);
676 if (dentry->d_inode != NULL)
677 ll_dump_inode(dentry->d_inode);
682 list_for_each(tmp, &dentry->d_subdirs) {
683 struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child);
684 lustre_dump_dentry(d, recur - 1);
688 void client_common_put_super(struct super_block *sb)
690 struct ll_sb_info *sbi = ll_s2sbi(sb);
693 #ifdef CONFIG_FS_POSIX_ACL
694 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
695 et_fini(&sbi->ll_et);
696 rct_fini(&sbi->ll_rct);
700 ll_close_thread_shutdown(sbi->ll_lcq);
704 cfs_list_del(&sbi->ll_conn_chain);
706 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
707 obd_disconnect(sbi->ll_dt_exp);
708 sbi->ll_dt_exp = NULL;
709 /* wait till all OSCs are gone, since cl_cache is accessing sbi.
711 obd_zombie_barrier();
713 lprocfs_unregister_mountpoint(sbi);
715 obd_fid_fini(sbi->ll_md_exp->exp_obd);
716 obd_disconnect(sbi->ll_md_exp);
717 sbi->ll_md_exp = NULL;
722 void ll_kill_super(struct super_block *sb)
724 struct ll_sb_info *sbi;
729 if (!(sb->s_flags & MS_ACTIVE))
733 /* we need restore s_dev from changed for clustred NFS before put_super
734 * because new kernels have cached s_dev and change sb->s_dev in
735 * put_super not affected real removing devices */
737 sb->s_dev = sbi->ll_sdev_orig;
738 sbi->ll_umounting = 1;
743 char *ll_read_opt(const char *opt, char *data)
749 CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
750 if (strncmp(opt, data, strlen(opt)))
752 if ((value = strchr(data, '=')) == NULL)
756 OBD_ALLOC(retval, strlen(value) + 1);
758 CERROR("out of memory!\n");
762 memcpy(retval, value, strlen(value)+1);
763 CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
767 static inline int ll_set_opt(const char *opt, char *data, int fl)
769 if (strncmp(opt, data, strlen(opt)) != 0)
775 /* non-client-specific mount options are parsed in lmd_parse */
776 static int ll_options(char *options, int *flags)
779 char *s1 = options, *s2;
785 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
788 CDEBUG(D_SUPER, "next opt=%s\n", s1);
789 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
794 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
799 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
804 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
809 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
814 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
819 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 5, 50, 0)
820 tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
822 /* Ignore deprecated mount option. The client will
823 * always try to mount with ACL support, whether this
824 * is used depends on whether server supports it. */
825 LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
826 "mount option 'acl'.\n");
829 tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
831 LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
832 "mount option 'noacl'.\n");
836 #warning "{no}acl options have been deprecated since 1.8, please remove them"
838 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
843 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
848 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
854 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
859 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
864 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
869 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
874 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
879 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
884 tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
889 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
894 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
899 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
904 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
910 s2 = strchr(s1, ',');
918 void ll_lli_init(struct ll_inode_info *lli)
920 lli->lli_inode_magic = LLI_INODE_MAGIC;
922 lli->lli_ioepoch = 0;
923 lli->lli_maxbytes = MAX_LFS_FILESIZE;
924 spin_lock_init(&lli->lli_lock);
925 lli->lli_posix_acl = NULL;
926 lli->lli_remote_perms = NULL;
927 mutex_init(&lli->lli_rmtperm_mutex);
928 /* Do not set lli_fid, it has been initialized already. */
929 fid_zero(&lli->lli_pfid);
930 CFS_INIT_LIST_HEAD(&lli->lli_close_list);
931 CFS_INIT_LIST_HEAD(&lli->lli_oss_capas);
932 cfs_atomic_set(&lli->lli_open_count, 0);
933 lli->lli_mds_capa = NULL;
934 lli->lli_rmtperm_time = 0;
935 lli->lli_pending_och = NULL;
936 lli->lli_mds_read_och = NULL;
937 lli->lli_mds_write_och = NULL;
938 lli->lli_mds_exec_och = NULL;
939 lli->lli_open_fd_read_count = 0;
940 lli->lli_open_fd_write_count = 0;
941 lli->lli_open_fd_exec_count = 0;
942 mutex_init(&lli->lli_och_mutex);
943 spin_lock_init(&lli->lli_agl_lock);
944 lli->lli_has_smd = false;
945 lli->lli_layout_gen = LL_LAYOUT_GEN_NONE;
946 lli->lli_clob = NULL;
948 LASSERT(lli->lli_vfs_inode.i_mode != 0);
949 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
950 mutex_init(&lli->lli_readdir_mutex);
951 lli->lli_opendir_key = NULL;
953 lli->lli_def_acl = NULL;
954 spin_lock_init(&lli->lli_sa_lock);
955 lli->lli_opendir_pid = 0;
957 sema_init(&lli->lli_size_sem, 1);
958 lli->lli_size_sem_owner = NULL;
959 lli->lli_symlink_name = NULL;
960 init_rwsem(&lli->lli_trunc_sem);
961 mutex_init(&lli->lli_write_mutex);
962 init_rwsem(&lli->lli_glimpse_sem);
963 lli->lli_glimpse_time = 0;
964 CFS_INIT_LIST_HEAD(&lli->lli_agl_list);
965 lli->lli_agl_index = 0;
966 lli->lli_async_rc = 0;
967 lli->lli_volatile = false;
969 mutex_init(&lli->lli_layout_mutex);
972 static inline int ll_bdi_register(struct backing_dev_info *bdi)
974 #ifdef HAVE_BDI_REGISTER
975 static atomic_t ll_bdi_num = ATOMIC_INIT(0);
978 bdi->name = "lustre";
980 return bdi_register(bdi, NULL, "lustre-%d",
981 atomic_inc_return(&ll_bdi_num));
987 int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
989 struct lustre_profile *lprof = NULL;
990 struct lustre_sb_info *lsi = s2lsi(sb);
991 struct ll_sb_info *sbi;
992 char *dt = NULL, *md = NULL;
993 char *profilenm = get_profile_name(sb);
994 struct config_llog_instance *cfg;
995 /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
996 const int instlen = sizeof(cfg->cfg_instance) * 2 + 2;
1000 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
1008 /* client additional sb info */
1009 lsi->lsi_llsbi = sbi = ll_init_sbi();
1011 cfs_module_put(THIS_MODULE);
1016 err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
1018 GOTO(out_free, err);
1020 err = bdi_init(&lsi->lsi_bdi);
1022 GOTO(out_free, err);
1023 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1024 lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1025 err = ll_bdi_register(&lsi->lsi_bdi);
1027 GOTO(out_free, err);
1030 sb->s_bdi = &lsi->lsi_bdi;
1033 /* Generate a string unique to this super, in case some joker tries
1034 to mount the same fs at two mount points.
1035 Use the address of the super itself.*/
1036 cfg->cfg_instance = sb;
1037 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1038 cfg->cfg_callback = class_config_llog_handler;
1039 /* set up client obds */
1040 err = lustre_process_log(sb, profilenm, cfg);
1042 CERROR("Unable to process log: %d\n", err);
1043 GOTO(out_free, err);
1046 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1047 lprof = class_get_profile(profilenm);
1048 if (lprof == NULL) {
1049 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1050 " read from the MGS. Does that filesystem "
1051 "exist?\n", profilenm);
1052 GOTO(out_free, err = -EINVAL);
1054 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1055 lprof->lp_md, lprof->lp_dt);
1057 OBD_ALLOC(dt, strlen(lprof->lp_dt) + instlen + 2);
1059 GOTO(out_free, err = -ENOMEM);
1060 sprintf(dt, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
1062 OBD_ALLOC(md, strlen(lprof->lp_md) + instlen + 2);
1064 GOTO(out_free, err = -ENOMEM);
1065 sprintf(md, "%s-%p", lprof->lp_md, cfg->cfg_instance);
1067 /* connections, registrations, sb setup */
1068 err = client_common_fill_super(sb, md, dt, mnt);
1072 OBD_FREE(md, strlen(lprof->lp_md) + instlen + 2);
1074 OBD_FREE(dt, strlen(lprof->lp_dt) + instlen + 2);
1077 else if (sbi->ll_flags & LL_SBI_VERBOSE)
1078 LCONSOLE_WARN("Mounted %s\n", profilenm);
1082 } /* ll_fill_super */
1085 void lu_context_keys_dump(void);
1087 void ll_put_super(struct super_block *sb)
1089 struct config_llog_instance cfg;
1090 struct obd_device *obd;
1091 struct lustre_sb_info *lsi = s2lsi(sb);
1092 struct ll_sb_info *sbi = ll_s2sbi(sb);
1093 char *profilenm = get_profile_name(sb);
1094 int force = 1, next;
1097 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
1099 ll_print_capa_stat(sbi);
1101 cfg.cfg_instance = sb;
1102 lustre_end_log(sb, profilenm, &cfg);
1104 if (sbi->ll_md_exp) {
1105 obd = class_exp2obd(sbi->ll_md_exp);
1107 force = obd->obd_force;
1110 /* We need to set force before the lov_disconnect in
1111 lustre_common_put_super, since l_d cleans up osc's as well. */
1114 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1116 obd->obd_force = force;
1121 /* Only if client_common_fill_super succeeded */
1122 client_common_put_super(sb);
1126 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
1127 class_manual_cleanup(obd);
1130 if (sbi->ll_flags & LL_SBI_VERBOSE)
1131 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1134 class_del_profile(profilenm);
1136 if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1137 bdi_destroy(&lsi->lsi_bdi);
1138 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1142 lsi->lsi_llsbi = NULL;
1144 lustre_common_put_super(sb);
1146 cfs_module_put(THIS_MODULE);
1149 } /* client_put_super */
1151 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1153 struct inode *inode = NULL;
1155 /* NOTE: we depend on atomic igrab() -bzzz */
1156 lock_res_and_lock(lock);
1157 if (lock->l_resource->lr_lvb_inode) {
1158 struct ll_inode_info * lli;
1159 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1160 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1161 inode = igrab(lock->l_resource->lr_lvb_inode);
1163 inode = lock->l_resource->lr_lvb_inode;
1164 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
1165 D_WARNING, lock, "lr_lvb_inode %p is "
1166 "bogus: magic %08x",
1167 lock->l_resource->lr_lvb_inode,
1168 lli->lli_inode_magic);
1172 unlock_res_and_lock(lock);
1176 struct inode *ll_inode_from_lock(struct ldlm_lock *lock)
1178 struct inode *inode = NULL;
1179 /* NOTE: we depend on atomic igrab() -bzzz */
1180 lock_res_and_lock(lock);
1181 if (lock->l_ast_data) {
1182 struct ll_inode_info *lli = ll_i2info(lock->l_ast_data);
1183 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1184 inode = igrab(lock->l_ast_data);
1186 inode = lock->l_ast_data;
1187 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
1188 D_WARNING, lock, "l_ast_data %p is "
1189 "bogus: magic %08x", lock->l_ast_data,
1190 lli->lli_inode_magic);
1194 unlock_res_and_lock(lock);
1198 void ll_clear_inode(struct inode *inode)
1200 struct ll_inode_info *lli = ll_i2info(inode);
1201 struct ll_sb_info *sbi = ll_i2sbi(inode);
1204 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1205 inode->i_generation, inode);
1207 if (S_ISDIR(inode->i_mode)) {
1208 /* these should have been cleared in ll_file_release */
1209 LASSERT(lli->lli_opendir_key == NULL);
1210 LASSERT(lli->lli_sai == NULL);
1211 LASSERT(lli->lli_opendir_pid == 0);
1214 ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1215 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1217 LASSERT(!lli->lli_open_fd_write_count);
1218 LASSERT(!lli->lli_open_fd_read_count);
1219 LASSERT(!lli->lli_open_fd_exec_count);
1221 if (lli->lli_mds_write_och)
1222 ll_md_real_close(inode, FMODE_WRITE);
1223 if (lli->lli_mds_exec_och)
1224 ll_md_real_close(inode, FMODE_EXEC);
1225 if (lli->lli_mds_read_och)
1226 ll_md_real_close(inode, FMODE_READ);
1228 if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1229 OBD_FREE(lli->lli_symlink_name,
1230 strlen(lli->lli_symlink_name) + 1);
1231 lli->lli_symlink_name = NULL;
1234 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1235 LASSERT(lli->lli_posix_acl == NULL);
1236 if (lli->lli_remote_perms) {
1237 free_rmtperm_hash(lli->lli_remote_perms);
1238 lli->lli_remote_perms = NULL;
1241 #ifdef CONFIG_FS_POSIX_ACL
1242 else if (lli->lli_posix_acl) {
1243 LASSERT(cfs_atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1244 LASSERT(lli->lli_remote_perms == NULL);
1245 posix_acl_release(lli->lli_posix_acl);
1246 lli->lli_posix_acl = NULL;
1249 lli->lli_inode_magic = LLI_INODE_DEAD;
1251 ll_clear_inode_capas(inode);
1252 if (!S_ISDIR(inode->i_mode))
1253 LASSERT(cfs_list_empty(&lli->lli_agl_list));
1256 * XXX This has to be done before lsm is freed below, because
1257 * cl_object still uses inode lsm.
1259 cl_inode_fini(inode);
1260 lli->lli_has_smd = false;
1265 int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
1266 struct md_open_data **mod)
1268 struct lustre_md md;
1269 struct inode *inode = dentry->d_inode;
1270 struct ll_sb_info *sbi = ll_i2sbi(inode);
1271 struct ptlrpc_request *request = NULL;
1275 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1276 LUSTRE_OPC_ANY, NULL);
1277 if (IS_ERR(op_data))
1278 RETURN(PTR_ERR(op_data));
1280 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
1283 ptlrpc_req_finished(request);
1284 if (rc == -ENOENT) {
1286 /* Unlinked special device node? Or just a race?
1287 * Pretend we done everything. */
1288 if (!S_ISREG(inode->i_mode) &&
1289 !S_ISDIR(inode->i_mode)) {
1290 ia_valid = op_data->op_attr.ia_valid;
1291 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1292 rc = simple_setattr(dentry, &op_data->op_attr);
1293 op_data->op_attr.ia_valid = ia_valid;
1295 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1296 CERROR("md_setattr fails: rc = %d\n", rc);
1301 rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1302 sbi->ll_md_exp, &md);
1304 ptlrpc_req_finished(request);
1308 ia_valid = op_data->op_attr.ia_valid;
1309 /* inode size will be in ll_setattr_ost, can't do it now since dirty
1310 * cache is not cleared yet. */
1311 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1312 rc = simple_setattr(dentry, &op_data->op_attr);
1313 op_data->op_attr.ia_valid = ia_valid;
1315 /* Extract epoch data if obtained. */
1316 op_data->op_handle = md.body->handle;
1317 op_data->op_ioepoch = md.body->ioepoch;
1319 ll_update_inode(inode, &md);
1320 ptlrpc_req_finished(request);
1325 /* Close IO epoch and send Size-on-MDS attribute update. */
1326 static int ll_setattr_done_writing(struct inode *inode,
1327 struct md_op_data *op_data,
1328 struct md_open_data *mod)
1330 struct ll_inode_info *lli = ll_i2info(inode);
1334 LASSERT(op_data != NULL);
1335 if (!S_ISREG(inode->i_mode))
1338 CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID" for truncate\n",
1339 op_data->op_ioepoch, PFID(&lli->lli_fid));
1341 op_data->op_flags = MF_EPOCH_CLOSE;
1342 ll_done_writing_attr(inode, op_data);
1343 ll_pack_inode2opdata(inode, op_data, NULL);
1345 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1346 if (rc == -EAGAIN) {
1347 /* MDS has instructed us to obtain Size-on-MDS attribute
1348 * from OSTs and send setattr to back to MDS. */
1349 rc = ll_som_update(inode, op_data);
1351 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1357 static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
1359 struct obd_capa *capa;
1362 if (attr->ia_valid & ATTR_SIZE)
1363 capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
1365 capa = ll_mdscapa_get(inode);
1367 rc = cl_setattr_ost(inode, attr, capa);
1369 if (attr->ia_valid & ATTR_SIZE)
1370 ll_truncate_free_capa(capa);
1377 #ifndef HAVE_VFS_INODE_NEWSIZE_OK
1379 * inode_newsize_ok - may this inode be truncated to a given size
1380 * @inode: the inode to be truncated
1381 * @offset: the new size to assign to the inode
1382 * @Returns: 0 on success, -ve errno on failure
1384 * inode_newsize_ok will check filesystem limits and ulimits to check that the
1385 * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ
1386 * when necessary. Caller must not proceed with inode size change if failure is
1387 * returned. @inode must be a file (not directory), with appropriate
1388 * permissions to allow truncate (inode_newsize_ok does NOT check these
1391 * inode_newsize_ok must be called with i_mutex held.
1393 int inode_newsize_ok(const struct inode *inode, loff_t offset)
1395 if (inode->i_size < offset) {
1396 unsigned long limit;
1398 limit = rlimit(RLIMIT_FSIZE);
1399 if (limit != RLIM_INFINITY && offset > limit)
1401 if (offset > inode->i_sb->s_maxbytes)
1405 * truncation of in-use swapfiles is disallowed - it would
1406 * cause subsequent swapout to scribble on the now-freed
1409 if (IS_SWAPFILE(inode))
1415 send_sig(SIGXFSZ, current, 0);
1421 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1422 * object(s) determine the file size and mtime. Otherwise, the MDS will
1423 * keep these values until such a time that objects are allocated for it.
1424 * We do the MDS operations first, as it is checking permissions for us.
1425 * We don't to the MDS RPC if there is nothing that we want to store there,
1426 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1427 * going to do an RPC anyways.
1429 * If we are doing a truncate, we will send the mtime and ctime updates
1430 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1431 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1434 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
1436 struct inode *inode = dentry->d_inode;
1437 struct ll_inode_info *lli = ll_i2info(inode);
1438 struct md_op_data *op_data = NULL;
1439 struct md_open_data *mod = NULL;
1440 int rc = 0, rc1 = 0;
1443 CDEBUG(D_VFSTRACE, "%s: setattr inode %p/fid:"DFID" from %llu to %llu, "
1444 "valid %x\n", ll_get_fsname(inode->i_sb, NULL, 0), inode,
1445 PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
1448 if (attr->ia_valid & ATTR_SIZE) {
1449 /* Check new size against VFS/VM file size limit and rlimit */
1450 rc = inode_newsize_ok(inode, attr->ia_size);
1454 /* The maximum Lustre file size is variable, based on the
1455 * OST maximum object size and number of stripes. This
1456 * needs another check in addition to the VFS check above. */
1457 if (attr->ia_size > ll_file_maxbytes(inode)) {
1458 CDEBUG(D_INODE,"file "DFID" too large %llu > "LPU64"\n",
1459 PFID(&lli->lli_fid), attr->ia_size,
1460 ll_file_maxbytes(inode));
1464 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1467 /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1468 if (attr->ia_valid & TIMES_SET_FLAGS) {
1469 if (cfs_curproc_fsuid() != inode->i_uid &&
1470 !cfs_capable(CFS_CAP_FOWNER))
1474 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1475 if (attr->ia_valid & ATTR_CTIME) {
1476 attr->ia_ctime = CFS_CURRENT_TIME;
1477 attr->ia_valid |= ATTR_CTIME_SET;
1479 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1480 (attr->ia_valid & ATTR_ATIME)) {
1481 attr->ia_atime = CFS_CURRENT_TIME;
1482 attr->ia_valid |= ATTR_ATIME_SET;
1484 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1485 (attr->ia_valid & ATTR_MTIME)) {
1486 attr->ia_mtime = CFS_CURRENT_TIME;
1487 attr->ia_valid |= ATTR_MTIME_SET;
1490 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1491 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
1492 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1493 cfs_time_current_sec());
1495 /* If we are changing file size, file content is modified, flag it. */
1496 if (attr->ia_valid & ATTR_SIZE) {
1497 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1498 spin_lock(&lli->lli_lock);
1499 lli->lli_flags |= LLIF_DATA_MODIFIED;
1500 spin_unlock(&lli->lli_lock);
1503 /* We always do an MDS RPC, even if we're only changing the size;
1504 * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1506 OBD_ALLOC_PTR(op_data);
1507 if (op_data == NULL)
1510 if (!S_ISDIR(inode->i_mode)) {
1511 if (attr->ia_valid & ATTR_SIZE)
1512 inode_dio_write_done(inode);
1513 mutex_unlock(&inode->i_mutex);
1514 down_write(&lli->lli_trunc_sem);
1517 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1519 /* Open epoch for truncate. */
1520 if (exp_connect_som(ll_i2mdexp(inode)) &&
1521 (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1522 op_data->op_flags = MF_EPOCH_OPEN;
1524 rc = ll_md_setattr(dentry, op_data, &mod);
1528 /* RPC to MDT is sent, cancel data modification flag */
1529 if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
1530 spin_lock(&lli->lli_lock);
1531 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1532 spin_unlock(&lli->lli_lock);
1535 ll_ioepoch_open(lli, op_data->op_ioepoch);
1536 if (!S_ISREG(inode->i_mode))
1539 if (attr->ia_valid & (ATTR_SIZE |
1540 ATTR_ATIME | ATTR_ATIME_SET |
1541 ATTR_MTIME | ATTR_MTIME_SET))
1542 /* For truncate and utimes sending attributes to OSTs, setting
1543 * mtime/atime to the past will be performed under PW [0:EOF]
1544 * extent lock (new_size:EOF for truncate). It may seem
1545 * excessive to send mtime/atime updates to OSTs when not
1546 * setting times to past, but it is necessary due to possible
1547 * time de-synchronization between MDT inode and OST objects */
1548 rc = ll_setattr_ost(inode, attr);
1552 if (op_data->op_ioepoch) {
1553 rc1 = ll_setattr_done_writing(inode, op_data, mod);
1557 ll_finish_md_op_data(op_data);
1559 if (!S_ISDIR(inode->i_mode)) {
1560 up_write(&lli->lli_trunc_sem);
1561 mutex_lock(&inode->i_mutex);
1562 if (attr->ia_valid & ATTR_SIZE)
1563 inode_dio_wait(inode);
1566 ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1567 LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1572 int ll_setattr(struct dentry *de, struct iattr *attr)
1574 int mode = de->d_inode->i_mode;
1576 if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1577 (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1578 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1580 if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
1581 (ATTR_SIZE|ATTR_MODE)) &&
1582 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
1583 (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1584 !(attr->ia_mode & S_ISGID))))
1585 attr->ia_valid |= ATTR_FORCE;
1587 if ((mode & S_ISUID) &&
1588 !(attr->ia_mode & S_ISUID) &&
1589 !(attr->ia_valid & ATTR_KILL_SUID))
1590 attr->ia_valid |= ATTR_KILL_SUID;
1592 if (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1593 !(attr->ia_mode & S_ISGID) &&
1594 !(attr->ia_valid & ATTR_KILL_SGID))
1595 attr->ia_valid |= ATTR_KILL_SGID;
1597 return ll_setattr_raw(de, attr);
1600 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1601 __u64 max_age, __u32 flags)
1603 struct ll_sb_info *sbi = ll_s2sbi(sb);
1604 struct obd_statfs obd_osfs;
1608 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1610 CERROR("md_statfs fails: rc = %d\n", rc);
1614 osfs->os_type = sb->s_magic;
1616 CDEBUG(D_SUPER, "MDC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1617 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,osfs->os_files);
1619 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1620 flags |= OBD_STATFS_NODELAY;
1622 rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1624 CERROR("obd_statfs fails: rc = %d\n", rc);
1628 CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1629 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1632 osfs->os_bsize = obd_osfs.os_bsize;
1633 osfs->os_blocks = obd_osfs.os_blocks;
1634 osfs->os_bfree = obd_osfs.os_bfree;
1635 osfs->os_bavail = obd_osfs.os_bavail;
1637 /* If we don't have as many objects free on the OST as inodes
1638 * on the MDS, we reduce the total number of inodes to
1639 * compensate, so that the "inodes in use" number is correct.
1641 if (obd_osfs.os_ffree < osfs->os_ffree) {
1642 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1644 osfs->os_ffree = obd_osfs.os_ffree;
1649 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1651 struct super_block *sb = de->d_sb;
1652 struct obd_statfs osfs;
1655 CDEBUG(D_VFSTRACE, "VFS Op: at "LPU64" jiffies\n", get_jiffies_64());
1656 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1658 /* Some amount of caching on the client is allowed */
1659 rc = ll_statfs_internal(sb, &osfs,
1660 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1665 statfs_unpack(sfs, &osfs);
1667 /* We need to downshift for all 32-bit kernels, because we can't
1668 * tell if the kernel is being called via sys_statfs64() or not.
1669 * Stop before overflowing f_bsize - in which case it is better
1670 * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1671 if (sizeof(long) < 8) {
1672 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1675 osfs.os_blocks >>= 1;
1676 osfs.os_bfree >>= 1;
1677 osfs.os_bavail >>= 1;
1681 sfs->f_blocks = osfs.os_blocks;
1682 sfs->f_bfree = osfs.os_bfree;
1683 sfs->f_bavail = osfs.os_bavail;
1688 void ll_inode_size_lock(struct inode *inode)
1690 struct ll_inode_info *lli;
1692 LASSERT(!S_ISDIR(inode->i_mode));
1694 lli = ll_i2info(inode);
1695 LASSERT(lli->lli_size_sem_owner != current);
1696 down(&lli->lli_size_sem);
1697 LASSERT(lli->lli_size_sem_owner == NULL);
1698 lli->lli_size_sem_owner = current;
1701 void ll_inode_size_unlock(struct inode *inode)
1703 struct ll_inode_info *lli;
1705 lli = ll_i2info(inode);
1706 LASSERT(lli->lli_size_sem_owner == current);
1707 lli->lli_size_sem_owner = NULL;
1708 up(&lli->lli_size_sem);
1711 void ll_update_inode(struct inode *inode, struct lustre_md *md)
1713 struct ll_inode_info *lli = ll_i2info(inode);
1714 struct mdt_body *body = md->body;
1715 struct lov_stripe_md *lsm = md->lsm;
1716 struct ll_sb_info *sbi = ll_i2sbi(inode);
1718 LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1720 if (!lli->lli_has_smd &&
1721 !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
1722 cl_file_inode_init(inode, md);
1724 lli->lli_maxbytes = lsm->lsm_maxbytes;
1725 if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
1726 lli->lli_maxbytes = MAX_LFS_FILESIZE;
1729 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1730 if (body->valid & OBD_MD_FLRMTPERM)
1731 ll_update_remote_perm(inode, md->remote_perm);
1733 #ifdef CONFIG_FS_POSIX_ACL
1734 else if (body->valid & OBD_MD_FLACL) {
1735 spin_lock(&lli->lli_lock);
1736 if (lli->lli_posix_acl)
1737 posix_acl_release(lli->lli_posix_acl);
1738 lli->lli_posix_acl = md->posix_acl;
1739 spin_unlock(&lli->lli_lock);
1742 inode->i_ino = cl_fid_build_ino(&body->fid1, ll_need_32bit_api(sbi));
1743 inode->i_generation = cl_fid_build_gen(&body->fid1);
1745 if (body->valid & OBD_MD_FLATIME) {
1746 if (body->atime > LTIME_S(inode->i_atime))
1747 LTIME_S(inode->i_atime) = body->atime;
1748 lli->lli_lvb.lvb_atime = body->atime;
1750 if (body->valid & OBD_MD_FLMTIME) {
1751 if (body->mtime > LTIME_S(inode->i_mtime)) {
1752 CDEBUG(D_INODE, "setting ino %lu mtime from %lu "
1753 "to "LPU64"\n", inode->i_ino,
1754 LTIME_S(inode->i_mtime), body->mtime);
1755 LTIME_S(inode->i_mtime) = body->mtime;
1757 lli->lli_lvb.lvb_mtime = body->mtime;
1759 if (body->valid & OBD_MD_FLCTIME) {
1760 if (body->ctime > LTIME_S(inode->i_ctime))
1761 LTIME_S(inode->i_ctime) = body->ctime;
1762 lli->lli_lvb.lvb_ctime = body->ctime;
1764 if (body->valid & OBD_MD_FLMODE)
1765 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1766 if (body->valid & OBD_MD_FLTYPE)
1767 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1768 LASSERT(inode->i_mode != 0);
1769 if (S_ISREG(inode->i_mode)) {
1770 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
1772 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1774 if (body->valid & OBD_MD_FLUID)
1775 inode->i_uid = body->uid;
1776 if (body->valid & OBD_MD_FLGID)
1777 inode->i_gid = body->gid;
1778 if (body->valid & OBD_MD_FLFLAGS)
1779 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1780 if (body->valid & OBD_MD_FLNLINK)
1781 set_nlink(inode, body->nlink);
1782 if (body->valid & OBD_MD_FLRDEV)
1783 inode->i_rdev = old_decode_dev(body->rdev);
1785 if (body->valid & OBD_MD_FLID) {
1786 /* FID shouldn't be changed! */
1787 if (fid_is_sane(&lli->lli_fid)) {
1788 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1789 "Trying to change FID "DFID
1790 " to the "DFID", inode %lu/%u(%p)\n",
1791 PFID(&lli->lli_fid), PFID(&body->fid1),
1792 inode->i_ino, inode->i_generation, inode);
1794 lli->lli_fid = body->fid1;
1797 LASSERT(fid_seq(&lli->lli_fid) != 0);
1799 if (body->valid & OBD_MD_FLSIZE) {
1800 if (exp_connect_som(ll_i2mdexp(inode)) &&
1801 S_ISREG(inode->i_mode)) {
1802 struct lustre_handle lockh;
1805 /* As it is possible a blocking ast has been processed
1806 * by this time, we need to check there is an UPDATE
1807 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1809 mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1810 &lockh, LDLM_FL_CBPENDING);
1812 if (lli->lli_flags & (LLIF_DONE_WRITING |
1813 LLIF_EPOCH_PENDING |
1815 CERROR("ino %lu flags %u still has "
1816 "size authority! do not trust "
1817 "the size got from MDS\n",
1818 inode->i_ino, lli->lli_flags);
1820 /* Use old size assignment to avoid
1821 * deadlock bz14138 & bz14326 */
1822 i_size_write(inode, body->size);
1823 lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1825 ldlm_lock_decref(&lockh, mode);
1828 /* Use old size assignment to avoid
1829 * deadlock bz14138 & bz14326 */
1830 i_size_write(inode, body->size);
1832 CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
1833 inode->i_ino, (unsigned long long)body->size);
1836 if (body->valid & OBD_MD_FLBLOCKS)
1837 inode->i_blocks = body->blocks;
1840 if (body->valid & OBD_MD_FLMDSCAPA) {
1841 LASSERT(md->mds_capa);
1842 ll_add_capa(inode, md->mds_capa);
1844 if (body->valid & OBD_MD_FLOSSCAPA) {
1845 LASSERT(md->oss_capa);
1846 ll_add_capa(inode, md->oss_capa);
1850 void ll_read_inode2(struct inode *inode, void *opaque)
1852 struct lustre_md *md = opaque;
1853 struct ll_inode_info *lli = ll_i2info(inode);
1856 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1857 PFID(&lli->lli_fid), inode);
1859 LASSERT(!lli->lli_has_smd);
1861 /* Core attributes from the MDS first. This is a new inode, and
1862 * the VFS doesn't zero times in the core inode so we have to do
1863 * it ourselves. They will be overwritten by either MDS or OST
1864 * attributes - we just need to make sure they aren't newer. */
1865 LTIME_S(inode->i_mtime) = 0;
1866 LTIME_S(inode->i_atime) = 0;
1867 LTIME_S(inode->i_ctime) = 0;
1869 ll_update_inode(inode, md);
1871 /* OIDEBUG(inode); */
1873 /* initializing backing dev info. */
1874 inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
1877 if (S_ISREG(inode->i_mode)) {
1878 struct ll_sb_info *sbi = ll_i2sbi(inode);
1879 inode->i_op = &ll_file_inode_operations;
1880 inode->i_fop = sbi->ll_fop;
1881 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
1883 } else if (S_ISDIR(inode->i_mode)) {
1884 inode->i_op = &ll_dir_inode_operations;
1885 inode->i_fop = &ll_dir_operations;
1887 } else if (S_ISLNK(inode->i_mode)) {
1888 inode->i_op = &ll_fast_symlink_inode_operations;
1891 inode->i_op = &ll_special_inode_operations;
1893 init_special_inode(inode, inode->i_mode,
1900 void ll_delete_inode(struct inode *inode)
1902 struct cl_inode_info *lli = cl_i2info(inode);
1905 if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
1906 /* discard all dirty pages before truncating them, required by
1907 * osc_extent implementation at LU-1030. */
1908 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
1909 CL_FSYNC_DISCARD, 1);
1911 truncate_inode_pages(&inode->i_data, 0);
1913 /* Workaround for LU-118 */
1914 if (inode->i_data.nrpages) {
1915 TREE_READ_LOCK_IRQ(&inode->i_data);
1916 TREE_READ_UNLOCK_IRQ(&inode->i_data);
1917 LASSERTF(inode->i_data.nrpages == 0,
1918 "inode=%lu/%u(%p) nrpages=%lu, see "
1919 "http://jira.whamcloud.com/browse/LU-118\n",
1920 inode->i_ino, inode->i_generation, inode,
1921 inode->i_data.nrpages);
1923 /* Workaround end */
1925 #ifdef HAVE_SBOPS_EVICT_INODE
1926 ll_clear_inode(inode);
1933 int ll_iocontrol(struct inode *inode, struct file *file,
1934 unsigned int cmd, unsigned long arg)
1936 struct ll_sb_info *sbi = ll_i2sbi(inode);
1937 struct ptlrpc_request *req = NULL;
1942 case FSFILT_IOC_GETFLAGS: {
1943 struct mdt_body *body;
1944 struct md_op_data *op_data;
1946 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1947 0, 0, LUSTRE_OPC_ANY,
1949 if (IS_ERR(op_data))
1950 RETURN(PTR_ERR(op_data));
1952 op_data->op_valid = OBD_MD_FLFLAGS;
1953 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1954 ll_finish_md_op_data(op_data);
1956 CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1960 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1962 flags = body->flags;
1964 ptlrpc_req_finished(req);
1966 RETURN(put_user(flags, (int *)arg));
1968 case FSFILT_IOC_SETFLAGS: {
1969 struct lov_stripe_md *lsm;
1970 struct obd_info oinfo = { { { 0 } } };
1971 struct md_op_data *op_data;
1973 if (get_user(flags, (int *)arg))
1976 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1977 LUSTRE_OPC_ANY, NULL);
1978 if (IS_ERR(op_data))
1979 RETURN(PTR_ERR(op_data));
1981 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
1982 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1983 rc = md_setattr(sbi->ll_md_exp, op_data,
1984 NULL, 0, NULL, 0, &req, NULL);
1985 ll_finish_md_op_data(op_data);
1986 ptlrpc_req_finished(req);
1990 inode->i_flags = ll_ext_to_inode_flags(flags);
1992 lsm = ccc_inode_lsm_get(inode);
1996 OBDO_ALLOC(oinfo.oi_oa);
1998 ccc_inode_lsm_put(inode, lsm);
2002 oinfo.oi_oa->o_oi = lsm->lsm_oi;
2003 oinfo.oi_oa->o_flags = flags;
2004 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
2006 oinfo.oi_capa = ll_mdscapa_get(inode);
2007 obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
2008 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
2009 capa_put(oinfo.oi_capa);
2010 OBDO_FREE(oinfo.oi_oa);
2011 ccc_inode_lsm_put(inode, lsm);
2013 if (rc && rc != -EPERM && rc != -EACCES)
2014 CERROR("osc_setattr_async fails: rc = %d\n", rc);
2025 int ll_flush_ctx(struct inode *inode)
2027 struct ll_sb_info *sbi = ll_i2sbi(inode);
2029 CDEBUG(D_SEC, "flush context for user %d\n", cfs_curproc_uid());
2031 obd_set_info_async(NULL, sbi->ll_md_exp,
2032 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2034 obd_set_info_async(NULL, sbi->ll_dt_exp,
2035 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2040 /* umount -f client means force down, don't save state */
2041 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
2042 void ll_umount_begin(struct vfsmount *vfsmnt, int flags)
2044 struct super_block *sb = vfsmnt->mnt_sb;
2046 void ll_umount_begin(struct super_block *sb)
2049 struct ll_sb_info *sbi = ll_s2sbi(sb);
2050 struct obd_device *obd;
2051 struct obd_ioctl_data *ioc_data;
2054 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
2055 if (!(flags & MNT_FORCE)) {
2061 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2062 sb->s_count, atomic_read(&sb->s_active));
2064 obd = class_exp2obd(sbi->ll_md_exp);
2066 CERROR("Invalid MDC connection handle "LPX64"\n",
2067 sbi->ll_md_exp->exp_handle.h_cookie);
2073 obd = class_exp2obd(sbi->ll_dt_exp);
2075 CERROR("Invalid LOV connection handle "LPX64"\n",
2076 sbi->ll_dt_exp->exp_handle.h_cookie);
2082 OBD_ALLOC_PTR(ioc_data);
2084 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2085 sizeof *ioc_data, ioc_data, NULL);
2087 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2088 sizeof *ioc_data, ioc_data, NULL);
2090 OBD_FREE_PTR(ioc_data);
2093 /* Really, we'd like to wait until there are no requests outstanding,
2094 * and then continue. For now, we just invalidate the requests,
2095 * schedule() and sleep one second if needed, and hope.
2098 #ifdef HAVE_UMOUNTBEGIN_VFSMOUNT
2099 if (atomic_read(&vfsmnt->mnt_count) > 2) {
2100 cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
2101 cfs_time_seconds(1));
2102 if (atomic_read(&vfsmnt->mnt_count) > 2)
2103 LCONSOLE_WARN("Mount still busy with %d refs! You "
2104 "may try to umount it a bit later\n",
2105 atomic_read(&vfsmnt->mnt_count));
2112 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2114 struct ll_sb_info *sbi = ll_s2sbi(sb);
2115 char *profilenm = get_profile_name(sb);
2119 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2120 read_only = *flags & MS_RDONLY;
2121 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2122 sizeof(KEY_READ_ONLY),
2123 KEY_READ_ONLY, sizeof(read_only),
2126 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2127 profilenm, read_only ?
2128 "read-only" : "read-write", err);
2133 sb->s_flags |= MS_RDONLY;
2135 sb->s_flags &= ~MS_RDONLY;
2137 if (sbi->ll_flags & LL_SBI_VERBOSE)
2138 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2139 read_only ? "read-only" : "read-write");
2144 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2145 struct super_block *sb, struct lookup_intent *it)
2147 struct ll_sb_info *sbi = NULL;
2148 struct lustre_md md;
2152 LASSERT(*inode || sb);
2153 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2154 rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2155 sbi->ll_md_exp, &md);
2160 ll_update_inode(*inode, &md);
2162 LASSERT(sb != NULL);
2165 * At this point server returns to client's same fid as client
2166 * generated for creating. So using ->fid1 is okay here.
2168 LASSERT(fid_is_sane(&md.body->fid1));
2170 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
2171 ll_need_32bit_api(sbi)),
2173 if (*inode == NULL || IS_ERR(*inode)) {
2174 #ifdef CONFIG_FS_POSIX_ACL
2176 posix_acl_release(md.posix_acl);
2177 md.posix_acl = NULL;
2180 rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2182 CERROR("new_inode -fatal: rc %d\n", rc);
2187 /* Handling piggyback layout lock.
2188 * Layout lock can be piggybacked by getattr and open request.
2189 * The lsm can be applied to inode only if it comes with a layout lock
2190 * otherwise correct layout may be overwritten, for example:
2191 * 1. proc1: mdt returns a lsm but not granting layout
2192 * 2. layout was changed by another client
2193 * 3. proc2: refresh layout and layout lock granted
2194 * 4. proc1: to apply a stale layout */
2195 if (it != NULL && it->d.lustre.it_lock_mode != 0) {
2196 struct lustre_handle lockh;
2197 struct ldlm_lock *lock;
2199 lockh.cookie = it->d.lustre.it_lock_handle;
2200 lock = ldlm_handle2lock(&lockh);
2201 LASSERT(lock != NULL);
2202 if (ldlm_has_layout(lock)) {
2203 struct cl_object_conf conf;
2205 memset(&conf, 0, sizeof(conf));
2206 conf.coc_opc = OBJECT_CONF_SET;
2207 conf.coc_inode = *inode;
2208 conf.coc_lock = lock;
2209 conf.u.coc_md = &md;
2210 (void)ll_layout_conf(*inode, &conf);
2212 LDLM_LOCK_PUT(lock);
2217 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2218 md_free_lustre_md(sbi->ll_md_exp, &md);
2222 int ll_obd_statfs(struct inode *inode, void *arg)
2224 struct ll_sb_info *sbi = NULL;
2225 struct obd_export *exp;
2227 struct obd_ioctl_data *data = NULL;
2232 if (!inode || !(sbi = ll_i2sbi(inode)))
2233 GOTO(out_statfs, rc = -EINVAL);
2235 rc = obd_ioctl_getdata(&buf, &len, arg);
2237 GOTO(out_statfs, rc);
2240 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2241 !data->ioc_pbuf1 || !data->ioc_pbuf2)
2242 GOTO(out_statfs, rc = -EINVAL);
2244 if (data->ioc_inllen1 != sizeof(__u32) ||
2245 data->ioc_inllen2 != sizeof(__u32) ||
2246 data->ioc_plen1 != sizeof(struct obd_statfs) ||
2247 data->ioc_plen2 != sizeof(struct obd_uuid))
2248 GOTO(out_statfs, rc = -EINVAL);
2250 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2251 if (type & LL_STATFS_LMV)
2252 exp = sbi->ll_md_exp;
2253 else if (type & LL_STATFS_LOV)
2254 exp = sbi->ll_dt_exp;
2256 GOTO(out_statfs, rc = -ENODEV);
2258 flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0;
2259 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags);
2261 GOTO(out_statfs, rc);
2264 obd_ioctl_freedata(buf, len);
2268 int ll_process_config(struct lustre_cfg *lcfg)
2272 struct lprocfs_static_vars lvars;
2276 lprocfs_llite_init_vars(&lvars);
2278 /* The instance name contains the sb: lustre-client-aacfe000 */
2279 ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2280 if (!ptr || !*(++ptr))
2282 if (sscanf(ptr, "%lx", &x) != 1)
2285 /* This better be a real Lustre superblock! */
2286 LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2288 /* Note we have not called client_common_fill_super yet, so
2289 proc fns must be able to handle that! */
2290 rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2297 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2298 struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
2299 struct inode *i1, struct inode *i2,
2300 const char *name, int namelen,
2301 int mode, __u32 opc, void *data)
2303 LASSERT(i1 != NULL);
2305 if (namelen > ll_i2sbi(i1)->ll_namelen)
2306 return ERR_PTR(-ENAMETOOLONG);
2308 if (op_data == NULL)
2309 OBD_ALLOC_PTR(op_data);
2311 if (op_data == NULL)
2312 return ERR_PTR(-ENOMEM);
2314 ll_i2gids(op_data->op_suppgids, i1, i2);
2315 op_data->op_fid1 = *ll_inode2fid(i1);
2316 op_data->op_capa1 = ll_mdscapa_get(i1);
2319 op_data->op_fid2 = *ll_inode2fid(i2);
2320 op_data->op_capa2 = ll_mdscapa_get(i2);
2322 fid_zero(&op_data->op_fid2);
2323 op_data->op_capa2 = NULL;
2326 op_data->op_name = name;
2327 op_data->op_namelen = namelen;
2328 op_data->op_mode = mode;
2329 op_data->op_mod_time = cfs_time_current_sec();
2330 op_data->op_fsuid = cfs_curproc_fsuid();
2331 op_data->op_fsgid = cfs_curproc_fsgid();
2332 op_data->op_cap = cfs_curproc_cap_pack();
2333 op_data->op_bias = 0;
2334 op_data->op_cli_flags = 0;
2335 if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
2336 filename_is_volatile(name, namelen, NULL))
2337 op_data->op_bias |= MDS_CREATE_VOLATILE;
2338 op_data->op_opc = opc;
2339 op_data->op_mds = 0;
2340 op_data->op_data = data;
2342 /* If the file is being opened after mknod() (normally due to NFS)
2343 * try to use the default stripe data from parent directory for
2344 * allocating OST objects. Try to pass the parent FID to MDS. */
2345 if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
2346 !ll_i2info(i2)->lli_has_smd) {
2347 struct ll_inode_info *lli = ll_i2info(i2);
2349 spin_lock(&lli->lli_lock);
2350 if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
2351 op_data->op_fid1 = lli->lli_pfid;
2352 spin_unlock(&lli->lli_lock);
2353 /** We ignore parent's capability temporary. */
2356 /* When called by ll_setattr_raw, file is i1. */
2357 if (LLIF_DATA_MODIFIED & ll_i2info(i1)->lli_flags)
2358 op_data->op_bias |= MDS_DATA_MODIFIED;
2363 void ll_finish_md_op_data(struct md_op_data *op_data)
2365 capa_put(op_data->op_capa1);
2366 capa_put(op_data->op_capa2);
2367 OBD_FREE_PTR(op_data);
2370 #ifdef HAVE_SUPEROPS_USE_DENTRY
2371 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2373 int ll_show_options(struct seq_file *seq, struct vfsmount *vfs)
2376 struct ll_sb_info *sbi;
2378 #ifdef HAVE_SUPEROPS_USE_DENTRY
2379 LASSERT((seq != NULL) && (dentry != NULL));
2380 sbi = ll_s2sbi(dentry->d_sb);
2382 LASSERT((seq != NULL) && (vfs != NULL));
2383 sbi = ll_s2sbi(vfs->mnt_sb);
2386 if (sbi->ll_flags & LL_SBI_NOLCK)
2387 seq_puts(seq, ",nolock");
2389 if (sbi->ll_flags & LL_SBI_FLOCK)
2390 seq_puts(seq, ",flock");
2392 if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2393 seq_puts(seq, ",localflock");
2395 if (sbi->ll_flags & LL_SBI_USER_XATTR)
2396 seq_puts(seq, ",user_xattr");
2398 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2399 seq_puts(seq, ",lazystatfs");
2401 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2402 seq_puts(seq, ",user_fid2path");
2408 * Get obd name by cmd, and copy out to user space
2410 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2412 struct ll_sb_info *sbi = ll_i2sbi(inode);
2413 struct obd_device *obd;
2416 if (cmd == OBD_IOC_GETDTNAME)
2417 obd = class_exp2obd(sbi->ll_dt_exp);
2418 else if (cmd == OBD_IOC_GETMDNAME)
2419 obd = class_exp2obd(sbi->ll_md_exp);
2426 if (cfs_copy_to_user((void *)arg, obd->obd_name,
2427 strlen(obd->obd_name) + 1))
2434 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2435 * fsname will be returned in this buffer; otherwise, a static buffer will be
2436 * used to store the fsname and returned to caller.
2438 char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2440 static char fsname_static[MTI_NAME_MAXLEN];
2441 struct lustre_sb_info *lsi = s2lsi(sb);
2446 /* this means the caller wants to use static buffer
2447 * and it doesn't care about race. Usually this is
2448 * in error reporting path */
2449 buf = fsname_static;
2450 buflen = sizeof(fsname_static);
2453 len = strlen(lsi->lsi_lmd->lmd_profile);
2454 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2455 if (ptr && (strcmp(ptr, "-client") == 0))
2458 if (unlikely(len >= buflen))
2460 strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2466 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
2470 #ifdef HAVE_FS_STRUCT_USE_PATH
2474 p.mnt = current->fs->root.mnt;
2476 path = d_path(&p, buf, bufsize);
2479 path = d_path(dentry, current->fs->rootmnt, buf, bufsize);
2485 void ll_dirty_page_discard_warn(cfs_page_t *page, int ioret)
2487 char *buf, *path = NULL;
2488 struct dentry *dentry = NULL;
2489 struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
2491 /* this can be called inside spin lock so use GFP_ATOMIC. */
2492 buf = (char *)__get_free_page(GFP_ATOMIC);
2494 dentry = d_find_alias(page->mapping->host);
2496 path = ll_d_path(dentry, buf, PAGE_SIZE);
2499 CWARN("%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
2500 "(rc %d)\n", ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
2501 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
2502 PFID(&obj->cob_header.coh_lu.loh_fid),
2503 (path && !IS_ERR(path)) ? path : "", ioret);
2509 free_page((unsigned long)buf);