Whamcloud - gitweb
LU-9679 lustre: remove some "#ifdef CONFIG*" from .c files.
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/llite/llite_lib.c
33  *
34  * Lustre Light Super operations
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/cpu.h>
40 #include <linux/module.h>
41 #include <linux/random.h>
42 #include <linux/statfs.h>
43 #include <linux/time.h>
44 #include <linux/types.h>
45 #include <libcfs/linux/linux-uuid.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48 #include <linux/user_namespace.h>
49 #include <linux/delay.h>
50 #include <linux/uidgid.h>
51 #include <linux/security.h>
52
53 #ifndef HAVE_CPUS_READ_LOCK
54 #include <libcfs/linux/linux-cpu.h>
55 #endif
56 #include <uapi/linux/lustre/lustre_ioctl.h>
57 #ifdef HAVE_UAPI_LINUX_MOUNT_H
58 #include <uapi/linux/mount.h>
59 #endif
60
61 #include <lustre_ha.h>
62 #include <lustre_dlm.h>
63 #include <lprocfs_status.h>
64 #include <lustre_disk.h>
65 #include <uapi/linux/lustre/lustre_param.h>
66 #include <lustre_log.h>
67 #include <cl_object.h>
68 #include <obd_cksum.h>
69 #include "llite_internal.h"
70
71 struct kmem_cache *ll_file_data_slab;
72
73 #ifndef log2
74 #define log2(n) ffz(~(n))
75 #endif
76
77 /**
78  * If there is only one number of core visible to Lustre,
79  * async readahead will be disabled, to avoid massive over
80  * subscription, we use 1/2 of active cores as default max
81  * async readahead requests.
82  */
83 static inline unsigned int ll_get_ra_async_max_active(void)
84 {
85         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
86 }
87
88 static struct ll_sb_info *ll_init_sbi(void)
89 {
90         struct ll_sb_info *sbi = NULL;
91         unsigned long pages;
92         unsigned long lru_page_max;
93         struct sysinfo si;
94         int rc;
95         int i;
96
97         ENTRY;
98
99         OBD_ALLOC_PTR(sbi);
100         if (sbi == NULL)
101                 RETURN(ERR_PTR(-ENOMEM));
102
103         rc = pcc_super_init(&sbi->ll_pcc_super);
104         if (rc < 0)
105                 GOTO(out_sbi, rc);
106
107         spin_lock_init(&sbi->ll_lock);
108         mutex_init(&sbi->ll_lco.lco_lock);
109         spin_lock_init(&sbi->ll_pp_extent_lock);
110         spin_lock_init(&sbi->ll_process_lock);
111         sbi->ll_rw_stats_on = 0;
112         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
113
114         si_meminfo(&si);
115         pages = si.totalram - si.totalhigh;
116         lru_page_max = pages / 2;
117
118         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
119         sbi->ll_ra_info.ll_readahead_wq =
120                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
121                                        0, CFS_CPT_ANY,
122                                        sbi->ll_ra_info.ra_async_max_active);
123         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
124                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
125
126         /* initialize ll_cache data */
127         sbi->ll_cache = cl_cache_init(lru_page_max);
128         if (sbi->ll_cache == NULL)
129                 GOTO(out_destroy_ra, rc = -ENOMEM);
130
131         sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
132                                                     SBI_DEFAULT_READ_AHEAD_MAX);
133         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
134                                 sbi->ll_ra_info.ra_max_pages_per_file;
135         sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
136         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
137         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
138
139         sbi->ll_flags |= LL_SBI_VERBOSE;
140 #ifdef ENABLE_CHECKSUM
141         sbi->ll_flags |= LL_SBI_CHECKSUM;
142 #endif
143 #ifdef ENABLE_FLOCK
144         sbi->ll_flags |= LL_SBI_FLOCK;
145 #endif
146
147 #ifdef HAVE_LRU_RESIZE_SUPPORT
148         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
149 #endif
150         sbi->ll_flags |= LL_SBI_LAZYSTATFS;
151
152         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
153                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
154                                pp_r_hist.oh_lock);
155                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
156                                pp_w_hist.oh_lock);
157         }
158
159         /* metadata statahead is enabled by default */
160         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
161         sbi->ll_sa_max = LL_SA_RPC_DEF;
162         atomic_set(&sbi->ll_sa_total, 0);
163         atomic_set(&sbi->ll_sa_wrong, 0);
164         atomic_set(&sbi->ll_sa_running, 0);
165         atomic_set(&sbi->ll_agl_total, 0);
166         sbi->ll_flags |= LL_SBI_AGL_ENABLED;
167         sbi->ll_flags |= LL_SBI_FAST_READ;
168         sbi->ll_flags |= LL_SBI_TINY_WRITE;
169         ll_sbi_set_encrypt(sbi, true);
170
171         /* root squash */
172         sbi->ll_squash.rsi_uid = 0;
173         sbi->ll_squash.rsi_gid = 0;
174         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
175         spin_lock_init(&sbi->ll_squash.rsi_lock);
176
177         /* Per-filesystem file heat */
178         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
179         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
180         RETURN(sbi);
181 out_destroy_ra:
182         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
183 out_pcc:
184         pcc_super_fini(&sbi->ll_pcc_super);
185 out_sbi:
186         OBD_FREE_PTR(sbi);
187         RETURN(ERR_PTR(rc));
188 }
189
190 static void ll_free_sbi(struct super_block *sb)
191 {
192         struct ll_sb_info *sbi = ll_s2sbi(sb);
193         ENTRY;
194
195         if (sbi != NULL) {
196                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
197                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
198                 if (sbi->ll_ra_info.ll_readahead_wq)
199                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
200                 if (sbi->ll_cache != NULL) {
201                         cl_cache_decref(sbi->ll_cache);
202                         sbi->ll_cache = NULL;
203                 }
204                 pcc_super_fini(&sbi->ll_pcc_super);
205                 OBD_FREE(sbi, sizeof(*sbi));
206         }
207         EXIT;
208 }
209
210 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
211 {
212         struct inode *root = NULL;
213         struct ll_sb_info *sbi = ll_s2sbi(sb);
214         struct obd_statfs *osfs = NULL;
215         struct ptlrpc_request *request = NULL;
216         struct obd_connect_data *data = NULL;
217         struct obd_uuid *uuid;
218         struct md_op_data *op_data;
219         struct lustre_md lmd;
220         u64 valid;
221         int size, err, checksum;
222
223         ENTRY;
224         sbi->ll_md_obd = class_name2obd(md);
225         if (!sbi->ll_md_obd) {
226                 CERROR("MD %s: not setup or attached\n", md);
227                 RETURN(-EINVAL);
228         }
229
230         OBD_ALLOC_PTR(data);
231         if (data == NULL)
232                 RETURN(-ENOMEM);
233
234         OBD_ALLOC_PTR(osfs);
235         if (osfs == NULL) {
236                 OBD_FREE_PTR(data);
237                 RETURN(-ENOMEM);
238         }
239
240         /* pass client page size via ocd_grant_blkbits, the server should report
241          * back its backend blocksize for grant calculation purpose */
242         data->ocd_grant_blkbits = PAGE_SHIFT;
243
244         /* indicate MDT features supported by this client */
245         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
246                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
247                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
248                                   OBD_CONNECT_SRVLOCK  | OBD_CONNECT_TRUNCLOCK|
249                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
250                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
251                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
252                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
253                                   OBD_CONNECT_64BITHASH |
254                                   OBD_CONNECT_EINPROGRESS |
255                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
256                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
257                                   OBD_CONNECT_MAX_EASIZE |
258                                   OBD_CONNECT_FLOCK_DEAD |
259                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
260                                   OBD_CONNECT_OPEN_BY_FID |
261                                   OBD_CONNECT_DIR_STRIPE |
262                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
263                                   OBD_CONNECT_SUBTREE |
264                                   OBD_CONNECT_MULTIMODRPCS |
265                                   OBD_CONNECT_GRANT_PARAM |
266                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
267
268         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
269                                    OBD_CONNECT2_SUM_STATFS |
270                                    OBD_CONNECT2_OVERSTRIPING |
271                                    OBD_CONNECT2_FLR |
272                                    OBD_CONNECT2_LOCK_CONVERT |
273                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
274                                    OBD_CONNECT2_INC_XID |
275                                    OBD_CONNECT2_LSOM |
276                                    OBD_CONNECT2_ASYNC_DISCARD |
277                                    OBD_CONNECT2_PCC |
278                                    OBD_CONNECT2_CRUSH;
279
280 #ifdef HAVE_LRU_RESIZE_SUPPORT
281         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
282                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
283 #endif
284         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
285
286         data->ocd_cksum_types = obd_cksum_types_supported_client();
287
288         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
289                 /* flag mdc connection as lightweight, only used for test
290                  * purpose, use with care */
291                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
292
293         data->ocd_ibits_known = MDS_INODELOCK_FULL;
294         data->ocd_version = LUSTRE_VERSION_CODE;
295
296         if (sb->s_flags & SB_RDONLY)
297                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
298         if (sbi->ll_flags & LL_SBI_USER_XATTR)
299                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
300
301 #ifdef SB_NOSEC
302         /* Setting this indicates we correctly support S_NOSEC (See kernel
303          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
304          */
305         sb->s_flags |= SB_NOSEC;
306 #endif
307
308         if (sbi->ll_flags & LL_SBI_FLOCK)
309                 sbi->ll_fop = &ll_file_operations_flock;
310         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
311                 sbi->ll_fop = &ll_file_operations;
312         else
313                 sbi->ll_fop = &ll_file_operations_noflock;
314
315         /* always ping even if server suppress_pings */
316         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
317                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
318
319         obd_connect_set_secctx(data);
320         if (ll_sbi_has_encrypt(sbi))
321                 obd_connect_set_enc(data);
322
323 #if defined(CONFIG_SECURITY)
324         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
325 #endif
326
327         data->ocd_brw_size = MD_MAX_BRW_SIZE;
328
329         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
330                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
331         if (err == -EBUSY) {
332                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
333                                    "recovery, of which this client is not a "
334                                    "part. Please wait for recovery to complete,"
335                                    " abort, or time out.\n", md);
336                 GOTO(out, err);
337         } else if (err) {
338                 CERROR("cannot connect to %s: rc = %d\n", md, err);
339                 GOTO(out, err);
340         }
341
342         sbi->ll_md_exp->exp_connect_data = *data;
343
344         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
345                            LUSTRE_SEQ_METADATA);
346         if (err) {
347                 CERROR("%s: Can't init metadata layer FID infrastructure, "
348                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
349                 GOTO(out_md, err);
350         }
351
352         /* For mount, we only need fs info from MDT0, and also in DNE, it
353          * can make sure the client can be mounted as long as MDT0 is
354          * avaible */
355         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
356                         ktime_get_seconds() - sbi->ll_statfs_max_age,
357                         OBD_STATFS_FOR_MDT0);
358         if (err)
359                 GOTO(out_md_fid, err);
360
361         /* This needs to be after statfs to ensure connect has finished.
362          * Note that "data" does NOT contain the valid connect reply.
363          * If connecting to a 1.8 server there will be no LMV device, so
364          * we can access the MDC export directly and exp_connect_flags will
365          * be non-zero, but if accessing an upgraded 2.1 server it will
366          * have the correct flags filled in.
367          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
368         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
369         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
370             valid != CLIENT_CONNECT_MDT_REQD) {
371                 char *buf;
372
373                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
374                 obd_connect_flags2str(buf, PAGE_SIZE,
375                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
376                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
377                                    "feature(s) needed for correct operation "
378                                    "of this client (%s). Please upgrade "
379                                    "server or downgrade client.\n",
380                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
381                 OBD_FREE(buf, PAGE_SIZE);
382                 GOTO(out_md_fid, err = -EPROTO);
383         }
384
385         size = sizeof(*data);
386         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
387                            KEY_CONN_DATA,  &size, data);
388         if (err) {
389                 CERROR("%s: Get connect data failed: rc = %d\n",
390                        sbi->ll_md_exp->exp_obd->obd_name, err);
391                 GOTO(out_md_fid, err);
392         }
393
394         LASSERT(osfs->os_bsize);
395         sb->s_blocksize = osfs->os_bsize;
396         sb->s_blocksize_bits = log2(osfs->os_bsize);
397         sb->s_magic = LL_SUPER_MAGIC;
398         sb->s_maxbytes = MAX_LFS_FILESIZE;
399         sbi->ll_namelen = osfs->os_namelen;
400         sbi->ll_mnt.mnt = current->fs->root.mnt;
401
402         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
403             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
404                 LCONSOLE_INFO("Disabling user_xattr feature because "
405                               "it is not supported on the server\n");
406                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
407         }
408
409         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
410 #ifdef SB_POSIXACL
411                 sb->s_flags |= SB_POSIXACL;
412 #endif
413                 sbi->ll_flags |= LL_SBI_ACL;
414         } else {
415                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
416 #ifdef SB_POSIXACL
417                 sb->s_flags &= ~SB_POSIXACL;
418 #endif
419                 sbi->ll_flags &= ~LL_SBI_ACL;
420         }
421
422         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
423                 sbi->ll_flags |= LL_SBI_64BIT_HASH;
424
425         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
426                 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
427
428         if (obd_connect_has_secctx(data))
429                 sbi->ll_flags |= LL_SBI_FILE_SECCTX;
430
431         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
432                 if (ll_sbi_has_test_dummy_encryption(sbi))
433                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
434                                       sbi->ll_fsname,
435                                       sbi->ll_md_exp->exp_obd->obd_name);
436                 ll_sbi_set_encrypt(sbi, false);
437         }
438
439         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
440                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
441                         LCONSOLE_INFO("%s: disabling xattr cache due to "
442                                       "unknown maximum xattr size.\n", dt);
443                 } else if (!sbi->ll_xattr_cache_set) {
444                         /* If xattr_cache is already set (no matter 0 or 1)
445                          * during processing llog, it won't be enabled here. */
446                         sbi->ll_flags |= LL_SBI_XATTR_CACHE;
447                         sbi->ll_xattr_cache_enabled = 1;
448                 }
449         }
450
451         sbi->ll_dt_obd = class_name2obd(dt);
452         if (!sbi->ll_dt_obd) {
453                 CERROR("DT %s: not setup or attached\n", dt);
454                 GOTO(out_md_fid, err = -ENODEV);
455         }
456
457         /* pass client page size via ocd_grant_blkbits, the server should report
458          * back its backend blocksize for grant calculation purpose */
459         data->ocd_grant_blkbits = PAGE_SHIFT;
460
461         /* indicate OST features supported by this client */
462         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
463                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
464                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
465                                   OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
466                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
467                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
468                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
469                                   OBD_CONNECT_EINPROGRESS |
470                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
471                                   OBD_CONNECT_LAYOUTLOCK |
472                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
473                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
474                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
475         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
476                                    OBD_CONNECT2_INC_XID;
477
478         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
479                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
480
481         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
482          * disabled by default, because it can still be enabled on the
483          * fly via /sys. As a consequence, we still need to come to an
484          * agreement on the supported algorithms at connect time
485          */
486         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
487
488         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
489                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
490         else
491                 data->ocd_cksum_types = obd_cksum_types_supported_client();
492
493 #ifdef HAVE_LRU_RESIZE_SUPPORT
494         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
495 #endif
496         /* always ping even if server suppress_pings */
497         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
498                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
499
500         if (ll_sbi_has_encrypt(sbi))
501                 obd_connect_set_enc(data);
502
503         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
504                "ocd_grant: %d\n", data->ocd_connect_flags,
505                data->ocd_version, data->ocd_grant);
506
507         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
508         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
509
510         data->ocd_brw_size = DT_MAX_BRW_SIZE;
511
512         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
513                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
514         if (err == -EBUSY) {
515                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
516                                    "recovery, of which this client is not a "
517                                    "part.  Please wait for recovery to "
518                                    "complete, abort, or time out.\n", dt);
519                 GOTO(out_md, err);
520         } else if (err) {
521                 CERROR("%s: Cannot connect to %s: rc = %d\n",
522                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
523                 GOTO(out_md, err);
524         }
525
526         if (ll_sbi_has_encrypt(sbi) &&
527             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
528                 if (ll_sbi_has_test_dummy_encryption(sbi))
529                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
530                                       sbi->ll_fsname, dt);
531                 ll_sbi_set_encrypt(sbi, false);
532         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
533                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
534         }
535
536         sbi->ll_dt_exp->exp_connect_data = *data;
537
538         /* Don't change value if it was specified in the config log */
539         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
540                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
541                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
542                               (data->ocd_brw_size >> PAGE_SHIFT));
543                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
544                     sbi->ll_ra_info.ra_max_pages_per_file)
545                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
546                                 sbi->ll_ra_info.ra_max_pages_per_file;
547         }
548
549         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
550                            LUSTRE_SEQ_METADATA);
551         if (err) {
552                 CERROR("%s: Can't init data layer FID infrastructure, "
553                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
554                 GOTO(out_dt, err);
555         }
556
557         mutex_lock(&sbi->ll_lco.lco_lock);
558         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
559         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
560         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
561         mutex_unlock(&sbi->ll_lco.lco_lock);
562
563         fid_zero(&sbi->ll_root_fid);
564         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
565                            &sbi->ll_root_fid);
566         if (err) {
567                 CERROR("cannot mds_connect: rc = %d\n", err);
568                 GOTO(out_lock_cn_cb, err);
569         }
570         if (!fid_is_sane(&sbi->ll_root_fid)) {
571                 CERROR("%s: Invalid root fid "DFID" during mount\n",
572                        sbi->ll_md_exp->exp_obd->obd_name,
573                        PFID(&sbi->ll_root_fid));
574                 GOTO(out_lock_cn_cb, err = -EINVAL);
575         }
576         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
577
578         sb->s_op = &lustre_super_operations;
579         sb->s_xattr = ll_xattr_handlers;
580 #if THREAD_SIZE >= 8192 /*b=17630*/
581         sb->s_export_op = &lustre_export_operations;
582 #endif
583 #ifdef HAVE_LUSTRE_CRYPTO
584         llcrypt_set_ops(sb, &lustre_cryptops);
585 #endif
586
587         /* make root inode
588          * XXX: move this to after cbd setup? */
589         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
590         if (sbi->ll_flags & LL_SBI_ACL)
591                 valid |= OBD_MD_FLACL;
592
593         OBD_ALLOC_PTR(op_data);
594         if (op_data == NULL)
595                 GOTO(out_lock_cn_cb, err = -ENOMEM);
596
597         op_data->op_fid1 = sbi->ll_root_fid;
598         op_data->op_mode = 0;
599         op_data->op_valid = valid;
600
601         err = md_getattr(sbi->ll_md_exp, op_data, &request);
602
603         OBD_FREE_PTR(op_data);
604         if (err) {
605                 CERROR("%s: md_getattr failed for root: rc = %d\n",
606                        sbi->ll_md_exp->exp_obd->obd_name, err);
607                 GOTO(out_lock_cn_cb, err);
608         }
609
610         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
611                                sbi->ll_md_exp, &lmd);
612         if (err) {
613                 CERROR("failed to understand root inode md: rc = %d\n", err);
614                 ptlrpc_req_finished(request);
615                 GOTO(out_lock_cn_cb, err);
616         }
617
618         LASSERT(fid_is_sane(&sbi->ll_root_fid));
619         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
620                                             sbi->ll_flags & LL_SBI_32BIT_API),
621                        &lmd);
622         md_free_lustre_md(sbi->ll_md_exp, &lmd);
623         ptlrpc_req_finished(request);
624
625         if (IS_ERR(root)) {
626                 lmd_clear_acl(&lmd);
627                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
628                 root = NULL;
629                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
630                        sbi->ll_fsname, err);
631                 GOTO(out_root, err);
632         }
633
634         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
635         if (sbi->ll_checksum_set) {
636                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
637                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
638                                          sizeof(checksum), &checksum, NULL);
639                 if (err) {
640                         CERROR("%s: Set checksum failed: rc = %d\n",
641                                sbi->ll_dt_exp->exp_obd->obd_name, err);
642                         GOTO(out_root, err);
643                 }
644         }
645         cl_sb_init(sb);
646
647         sb->s_root = d_make_root(root);
648         if (sb->s_root == NULL) {
649                 err = -ENOMEM;
650                 CERROR("%s: can't make root dentry: rc = %d\n",
651                        sbi->ll_fsname, err);
652                 GOTO(out_root, err);
653         }
654
655         sbi->ll_sdev_orig = sb->s_dev;
656
657         /* We set sb->s_dev equal on all lustre clients in order to support
658          * NFS export clustering.  NFSD requires that the FSID be the same
659          * on all clients. */
660         /* s_dev is also used in lt_compare() to compare two fs, but that is
661          * only a node-local comparison. */
662         uuid = obd_get_uuid(sbi->ll_md_exp);
663         if (uuid != NULL)
664                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
665
666         if (data != NULL)
667                 OBD_FREE_PTR(data);
668         if (osfs != NULL)
669                 OBD_FREE_PTR(osfs);
670
671         if (sbi->ll_dt_obd) {
672                 err = sysfs_create_link(&sbi->ll_kset.kobj,
673                                         &sbi->ll_dt_obd->obd_kset.kobj,
674                                         sbi->ll_dt_obd->obd_type->typ_name);
675                 if (err < 0) {
676                         CERROR("%s: could not register %s in llite: rc = %d\n",
677                                dt, sbi->ll_fsname, err);
678                         err = 0;
679                 }
680         }
681
682         if (sbi->ll_md_obd) {
683                 err = sysfs_create_link(&sbi->ll_kset.kobj,
684                                         &sbi->ll_md_obd->obd_kset.kobj,
685                                         sbi->ll_md_obd->obd_type->typ_name);
686                 if (err < 0) {
687                         CERROR("%s: could not register %s in llite: rc = %d\n",
688                                md, sbi->ll_fsname, err);
689                         err = 0;
690                 }
691         }
692
693         RETURN(err);
694 out_root:
695         if (root)
696                 iput(root);
697 out_lock_cn_cb:
698         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
699 out_dt:
700         obd_disconnect(sbi->ll_dt_exp);
701         sbi->ll_dt_exp = NULL;
702         sbi->ll_dt_obd = NULL;
703 out_md_fid:
704         obd_fid_fini(sbi->ll_md_exp->exp_obd);
705 out_md:
706         obd_disconnect(sbi->ll_md_exp);
707         sbi->ll_md_exp = NULL;
708         sbi->ll_md_obd = NULL;
709 out:
710         if (data != NULL)
711                 OBD_FREE_PTR(data);
712         if (osfs != NULL)
713                 OBD_FREE_PTR(osfs);
714         return err;
715 }
716
717 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
718 {
719         int size, rc;
720
721         size = sizeof(*lmmsize);
722         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
723                           KEY_MAX_EASIZE, &size, lmmsize);
724         if (rc != 0) {
725                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
726                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
727                 RETURN(rc);
728         }
729
730         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
731
732         size = sizeof(int);
733         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
734                           KEY_MAX_EASIZE, &size, lmmsize);
735         if (rc)
736                 CERROR("Get max mdsize error rc %d\n", rc);
737
738         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
739
740         RETURN(rc);
741 }
742
743 /**
744  * Get the value of the default_easize parameter.
745  *
746  * \see client_obd::cl_default_mds_easize
747  *
748  * \param[in] sbi       superblock info for this filesystem
749  * \param[out] lmmsize  pointer to storage location for value
750  *
751  * \retval 0            on success
752  * \retval negative     negated errno on failure
753  */
754 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
755 {
756         int size, rc;
757
758         size = sizeof(int);
759         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
760                          KEY_DEFAULT_EASIZE, &size, lmmsize);
761         if (rc)
762                 CERROR("Get default mdsize error rc %d\n", rc);
763
764         RETURN(rc);
765 }
766
767 /**
768  * Set the default_easize parameter to the given value.
769  *
770  * \see client_obd::cl_default_mds_easize
771  *
772  * \param[in] sbi       superblock info for this filesystem
773  * \param[in] lmmsize   the size to set
774  *
775  * \retval 0            on success
776  * \retval negative     negated errno on failure
777  */
778 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
779 {
780         int rc;
781
782         if (lmmsize < sizeof(struct lov_mds_md) ||
783             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
784                 return -EINVAL;
785
786         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
787                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
788                                 sizeof(int), &lmmsize, NULL);
789
790         RETURN(rc);
791 }
792
793 static void client_common_put_super(struct super_block *sb)
794 {
795         struct ll_sb_info *sbi = ll_s2sbi(sb);
796         ENTRY;
797
798         cl_sb_fini(sb);
799
800         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
801         obd_disconnect(sbi->ll_dt_exp);
802         sbi->ll_dt_exp = NULL;
803
804         ll_debugfs_unregister_super(sb);
805
806         obd_fid_fini(sbi->ll_md_exp->exp_obd);
807         obd_disconnect(sbi->ll_md_exp);
808         sbi->ll_md_exp = NULL;
809
810         EXIT;
811 }
812
813 void ll_kill_super(struct super_block *sb)
814 {
815         struct ll_sb_info *sbi;
816         ENTRY;
817
818         /* not init sb ?*/
819         if (!(sb->s_flags & SB_ACTIVE))
820                 return;
821
822         sbi = ll_s2sbi(sb);
823         /* we need restore s_dev from changed for clustred NFS before put_super
824          * because new kernels have cached s_dev and change sb->s_dev in
825          * put_super not affected real removing devices */
826         if (sbi) {
827                 sb->s_dev = sbi->ll_sdev_orig;
828
829                 /* wait running statahead threads to quit */
830                 while (atomic_read(&sbi->ll_sa_running) > 0)
831                         schedule_timeout_uninterruptible(
832                                 cfs_time_seconds(1) >> 3);
833         }
834
835         EXIT;
836 }
837
838 static inline int ll_set_opt(const char *opt, char *data, int fl)
839 {
840         if (strncmp(opt, data, strlen(opt)) != 0)
841                 return 0;
842         else
843                 return fl;
844 }
845
846 /* non-client-specific mount options are parsed in lmd_parse */
847 static int ll_options(char *options, struct ll_sb_info *sbi)
848 {
849         int tmp;
850         char *s1 = options, *s2;
851         int *flags = &sbi->ll_flags;
852         ENTRY;
853
854         if (!options)
855                 RETURN(0);
856
857         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
858
859         while (*s1) {
860                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
861                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
862                 if (tmp) {
863                         *flags |= tmp;
864                         goto next;
865                 }
866                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
867                 if (tmp) {
868                         *flags = (*flags & ~LL_SBI_LOCALFLOCK) | tmp;
869                         goto next;
870                 }
871                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
872                 if (tmp) {
873                         *flags = (*flags & ~LL_SBI_FLOCK) | tmp;
874                         goto next;
875                 }
876                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
877                 if (tmp) {
878                         *flags &= ~tmp;
879                         goto next;
880                 }
881                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
882                 if (tmp) {
883                         *flags |= tmp;
884                         goto next;
885                 }
886                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
887                 if (tmp) {
888                         *flags &= ~tmp;
889                         goto next;
890                 }
891                 tmp = ll_set_opt("context", s1, 1);
892                 if (tmp)
893                         goto next;
894                 tmp = ll_set_opt("fscontext", s1, 1);
895                 if (tmp)
896                         goto next;
897                 tmp = ll_set_opt("defcontext", s1, 1);
898                 if (tmp)
899                         goto next;
900                 tmp = ll_set_opt("rootcontext", s1, 1);
901                 if (tmp)
902                         goto next;
903                 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
904                 if (tmp) {
905                         *flags |= tmp;
906                         goto next;
907                 }
908                 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
909                 if (tmp) {
910                         *flags &= ~tmp;
911                         goto next;
912                 }
913
914                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
915                 if (tmp) {
916                         *flags |= tmp;
917                         sbi->ll_checksum_set = 1;
918                         goto next;
919                 }
920                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
921                 if (tmp) {
922                         *flags &= ~tmp;
923                         sbi->ll_checksum_set = 1;
924                         goto next;
925                 }
926                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
927                 if (tmp) {
928                         *flags |= tmp;
929                         goto next;
930                 }
931                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
932                 if (tmp) {
933                         *flags &= ~tmp;
934                         goto next;
935                 }
936                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
937                 if (tmp) {
938                         *flags |= tmp;
939                         goto next;
940                 }
941                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
942                 if (tmp) {
943                         *flags &= ~tmp;
944                         goto next;
945                 }
946                 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
947                 if (tmp) {
948                         *flags |= tmp;
949                         goto next;
950                 }
951                 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
952                 if (tmp) {
953                         *flags |= tmp;
954                         goto next;
955                 }
956                 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
957                 if (tmp) {
958                         *flags &= ~tmp;
959                         goto next;
960                 }
961                 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
962                 if (tmp) {
963                         *flags |= tmp;
964                         goto next;
965                 }
966                 tmp = ll_set_opt("test_dummy_encryption", s1,
967                                  LL_SBI_TEST_DUMMY_ENCRYPTION);
968                 if (tmp) {
969 #ifdef HAVE_LUSTRE_CRYPTO
970                         *flags |= tmp;
971 #else
972                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
973 #endif
974                         goto next;
975                 }
976                 tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT);
977                 if (tmp) {
978 #ifdef HAVE_LUSTRE_CRYPTO
979                         *flags &= ~tmp;
980 #else
981                         LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n");
982 #endif
983                         goto next;
984                 }
985                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
986                                    s1);
987                 RETURN(-EINVAL);
988
989 next:
990                 /* Find next opt */
991                 s2 = strchr(s1, ',');
992                 if (s2 == NULL)
993                         break;
994                 s1 = s2 + 1;
995         }
996         RETURN(0);
997 }
998
999 void ll_lli_init(struct ll_inode_info *lli)
1000 {
1001         lli->lli_inode_magic = LLI_INODE_MAGIC;
1002         lli->lli_flags = 0;
1003         spin_lock_init(&lli->lli_lock);
1004         lli->lli_posix_acl = NULL;
1005         /* Do not set lli_fid, it has been initialized already. */
1006         fid_zero(&lli->lli_pfid);
1007         lli->lli_mds_read_och = NULL;
1008         lli->lli_mds_write_och = NULL;
1009         lli->lli_mds_exec_och = NULL;
1010         lli->lli_open_fd_read_count = 0;
1011         lli->lli_open_fd_write_count = 0;
1012         lli->lli_open_fd_exec_count = 0;
1013         mutex_init(&lli->lli_och_mutex);
1014         spin_lock_init(&lli->lli_agl_lock);
1015         spin_lock_init(&lli->lli_layout_lock);
1016         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1017         lli->lli_clob = NULL;
1018
1019         init_rwsem(&lli->lli_xattrs_list_rwsem);
1020         mutex_init(&lli->lli_xattrs_enq_lock);
1021
1022         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1023         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1024                 lli->lli_opendir_key = NULL;
1025                 lli->lli_sai = NULL;
1026                 spin_lock_init(&lli->lli_sa_lock);
1027                 lli->lli_opendir_pid = 0;
1028                 lli->lli_sa_enabled = 0;
1029                 init_rwsem(&lli->lli_lsm_sem);
1030         } else {
1031                 mutex_init(&lli->lli_size_mutex);
1032                 mutex_init(&lli->lli_setattr_mutex);
1033                 lli->lli_symlink_name = NULL;
1034                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1035                 range_lock_tree_init(&lli->lli_write_tree);
1036                 init_rwsem(&lli->lli_glimpse_sem);
1037                 lli->lli_glimpse_time = ktime_set(0, 0);
1038                 INIT_LIST_HEAD(&lli->lli_agl_list);
1039                 lli->lli_agl_index = 0;
1040                 lli->lli_async_rc = 0;
1041                 spin_lock_init(&lli->lli_heat_lock);
1042                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1043                 lli->lli_heat_flags = 0;
1044                 mutex_init(&lli->lli_pcc_lock);
1045                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1046                 lli->lli_pcc_inode = NULL;
1047                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1048                 lli->lli_pcc_generation = 0;
1049                 mutex_init(&lli->lli_group_mutex);
1050                 lli->lli_group_users = 0;
1051                 lli->lli_group_gid = 0;
1052         }
1053         mutex_init(&lli->lli_layout_mutex);
1054         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1055 }
1056
1057 #define MAX_STRING_SIZE 128
1058
1059 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1060
1061 #define LSI_BDI_INITIALIZED     0x00400000
1062
1063 #ifndef HAVE_BDI_CAP_MAP_COPY
1064 # define BDI_CAP_MAP_COPY       0
1065 #endif
1066
1067 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1068 {
1069         struct  lustre_sb_info *lsi = s2lsi(sb);
1070         char buf[MAX_STRING_SIZE];
1071         va_list args;
1072         int err;
1073
1074         err = bdi_init(&lsi->lsi_bdi);
1075         if (err)
1076                 return err;
1077
1078         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1079         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1080         lsi->lsi_bdi.name = "lustre";
1081         va_start(args, fmt);
1082         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1083         va_end(args);
1084         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1085         va_end(args);
1086         if (!err)
1087                 sb->s_bdi = &lsi->lsi_bdi;
1088
1089         return err;
1090 }
1091 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1092
1093 int ll_fill_super(struct super_block *sb)
1094 {
1095         struct  lustre_profile *lprof = NULL;
1096         struct  lustre_sb_info *lsi = s2lsi(sb);
1097         struct  ll_sb_info *sbi = NULL;
1098         char    *dt = NULL, *md = NULL;
1099         char    *profilenm = get_profile_name(sb);
1100         struct config_llog_instance *cfg;
1101         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1102         const int instlen = LUSTRE_MAXINSTANCE + 2;
1103         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1104         char name[MAX_STRING_SIZE];
1105         int md_len = 0;
1106         int dt_len = 0;
1107         uuid_t uuid;
1108         char *ptr;
1109         int len;
1110         int err;
1111
1112         ENTRY;
1113         /* for ASLR, to map between cfg_instance and hashed ptr */
1114         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1115                profilenm, cfg_instance, sb);
1116
1117         OBD_ALLOC_PTR(cfg);
1118         if (cfg == NULL)
1119                 GOTO(out_free_cfg, err = -ENOMEM);
1120
1121         /* client additional sb info */
1122         lsi->lsi_llsbi = sbi = ll_init_sbi();
1123         if (IS_ERR(sbi))
1124                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1125
1126         err = ll_options(lsi->lsi_lmd->lmd_opts, sbi);
1127         if (err)
1128                 GOTO(out_free_cfg, err);
1129
1130         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1131         sb->s_d_op = &ll_d_ops;
1132
1133         /* UUID handling */
1134         generate_random_uuid(uuid.b);
1135         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1136
1137         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1138
1139         /* Get fsname */
1140         len = strlen(profilenm);
1141         ptr = strrchr(profilenm, '-');
1142         if (ptr && (strcmp(ptr, "-client") == 0))
1143                 len -= 7;
1144
1145         if (len > LUSTRE_MAXFSNAME) {
1146                 if (unlikely(len >= MAX_STRING_SIZE))
1147                         len = MAX_STRING_SIZE - 1;
1148                 strncpy(name, profilenm, len);
1149                 name[len] = '\0';
1150                 err = -ENAMETOOLONG;
1151                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1152                        name, LUSTRE_MAXFSNAME, err);
1153                 GOTO(out_free_cfg, err);
1154         }
1155         strncpy(sbi->ll_fsname, profilenm, len);
1156         sbi->ll_fsname[len] = '\0';
1157
1158         /* Mount info */
1159         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1160                  profilenm, cfg_instance);
1161
1162         err = super_setup_bdi_name(sb, "%s", name);
1163         if (err)
1164                 GOTO(out_free_cfg, err);
1165
1166         /* Call ll_debugfs_register_super() before lustre_process_log()
1167          * so that "llite.*.*" params can be processed correctly.
1168          */
1169         err = ll_debugfs_register_super(sb, name);
1170         if (err < 0) {
1171                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1172                        sbi->ll_fsname, err);
1173                 err = 0;
1174         }
1175
1176         /* The cfg_instance is a value unique to this super, in case some
1177          * joker tries to mount the same fs at two mount points.
1178          */
1179         cfg->cfg_instance = cfg_instance;
1180         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1181         cfg->cfg_callback = class_config_llog_handler;
1182         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1183         /* set up client obds */
1184         err = lustre_process_log(sb, profilenm, cfg);
1185         if (err < 0)
1186                 GOTO(out_debugfs, err);
1187
1188         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1189         lprof = class_get_profile(profilenm);
1190         if (lprof == NULL) {
1191                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1192                                    " read from the MGS.  Does that filesystem "
1193                                    "exist?\n", profilenm);
1194                 GOTO(out_debugfs, err = -EINVAL);
1195         }
1196         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1197                lprof->lp_md, lprof->lp_dt);
1198
1199         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1200         OBD_ALLOC(dt, dt_len);
1201         if (!dt)
1202                 GOTO(out_profile, err = -ENOMEM);
1203         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1204
1205         md_len = strlen(lprof->lp_md) + instlen + 2;
1206         OBD_ALLOC(md, md_len);
1207         if (!md)
1208                 GOTO(out_free_dt, err = -ENOMEM);
1209         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1210
1211         /* connections, registrations, sb setup */
1212         err = client_common_fill_super(sb, md, dt);
1213         if (err < 0)
1214                 GOTO(out_free_md, err);
1215
1216         sbi->ll_client_common_fill_super_succeeded = 1;
1217
1218 out_free_md:
1219         if (md)
1220                 OBD_FREE(md, md_len);
1221 out_free_dt:
1222         if (dt)
1223                 OBD_FREE(dt, dt_len);
1224 out_profile:
1225         if (lprof)
1226                 class_put_profile(lprof);
1227 out_debugfs:
1228         if (err < 0)
1229                 ll_debugfs_unregister_super(sb);
1230 out_free_cfg:
1231         if (cfg)
1232                 OBD_FREE_PTR(cfg);
1233
1234         if (err)
1235                 ll_put_super(sb);
1236         else if (sbi->ll_flags & LL_SBI_VERBOSE)
1237                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1238         RETURN(err);
1239 } /* ll_fill_super */
1240
1241 void ll_put_super(struct super_block *sb)
1242 {
1243         struct config_llog_instance cfg, params_cfg;
1244         struct obd_device *obd;
1245         struct lustre_sb_info *lsi = s2lsi(sb);
1246         struct ll_sb_info *sbi = ll_s2sbi(sb);
1247         char *profilenm = get_profile_name(sb);
1248         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1249         long ccc_count;
1250         int next, force = 1, rc = 0;
1251         ENTRY;
1252
1253         if (IS_ERR(sbi))
1254                 GOTO(out_no_sbi, 0);
1255
1256         /* Should replace instance_id with something better for ASLR */
1257         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1258                profilenm, cfg_instance, sb);
1259
1260         cfg.cfg_instance = cfg_instance;
1261         lustre_end_log(sb, profilenm, &cfg);
1262
1263         params_cfg.cfg_instance = cfg_instance;
1264         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1265
1266         if (sbi->ll_md_exp) {
1267                 obd = class_exp2obd(sbi->ll_md_exp);
1268                 if (obd)
1269                         force = obd->obd_force;
1270         }
1271
1272         /* Wait for unstable pages to be committed to stable storage */
1273         if (force == 0) {
1274                 rc = l_wait_event_abortable(
1275                         sbi->ll_cache->ccc_unstable_waitq,
1276                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1277         }
1278
1279         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1280         if (force == 0 && rc != -ERESTARTSYS)
1281                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1282
1283         /* We need to set force before the lov_disconnect in
1284          * lustre_common_put_super, since l_d cleans up osc's as well.
1285          */
1286         if (force) {
1287                 next = 0;
1288                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1289                                                      &next)) != NULL) {
1290                         obd->obd_force = force;
1291                 }
1292         }
1293
1294         if (sbi->ll_client_common_fill_super_succeeded) {
1295                 /* Only if client_common_fill_super succeeded */
1296                 client_common_put_super(sb);
1297         }
1298
1299         next = 0;
1300         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1301                 class_manual_cleanup(obd);
1302
1303         if (sbi->ll_flags & LL_SBI_VERBOSE)
1304                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1305
1306         if (profilenm)
1307                 class_del_profile(profilenm);
1308
1309 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1310         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1311                 bdi_destroy(&lsi->lsi_bdi);
1312                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1313         }
1314 #endif
1315
1316         ll_free_sbi(sb);
1317         lsi->lsi_llsbi = NULL;
1318 out_no_sbi:
1319         lustre_common_put_super(sb);
1320
1321         cl_env_cache_purge(~0);
1322
1323         module_put(THIS_MODULE);
1324
1325         EXIT;
1326 } /* client_put_super */
1327
1328 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1329 {
1330         struct inode *inode = NULL;
1331
1332         /* NOTE: we depend on atomic igrab() -bzzz */
1333         lock_res_and_lock(lock);
1334         if (lock->l_resource->lr_lvb_inode) {
1335                 struct ll_inode_info * lli;
1336                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1337                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1338                         inode = igrab(lock->l_resource->lr_lvb_inode);
1339                 } else {
1340                         inode = lock->l_resource->lr_lvb_inode;
1341                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1342                                          D_WARNING, lock, "lr_lvb_inode %p is "
1343                                          "bogus: magic %08x",
1344                                          lock->l_resource->lr_lvb_inode,
1345                                          lli->lli_inode_magic);
1346                         inode = NULL;
1347                 }
1348         }
1349         unlock_res_and_lock(lock);
1350         return inode;
1351 }
1352
1353 void ll_dir_clear_lsm_md(struct inode *inode)
1354 {
1355         struct ll_inode_info *lli = ll_i2info(inode);
1356
1357         LASSERT(S_ISDIR(inode->i_mode));
1358
1359         if (lli->lli_lsm_md) {
1360                 lmv_free_memmd(lli->lli_lsm_md);
1361                 lli->lli_lsm_md = NULL;
1362         }
1363
1364         if (lli->lli_default_lsm_md) {
1365                 lmv_free_memmd(lli->lli_default_lsm_md);
1366                 lli->lli_default_lsm_md = NULL;
1367         }
1368 }
1369
1370 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1371                                       const struct lu_fid *fid,
1372                                       struct lustre_md *md)
1373 {
1374         struct ll_sb_info       *sbi = ll_s2sbi(sb);
1375         struct mdt_body         *body = md->body;
1376         struct inode            *inode;
1377         ino_t                   ino;
1378         ENTRY;
1379
1380         ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1381         inode = iget_locked(sb, ino);
1382         if (inode == NULL) {
1383                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1384                        sbi->ll_fsname, PFID(fid));
1385                 RETURN(ERR_PTR(-ENOENT));
1386         }
1387
1388         if (inode->i_state & I_NEW) {
1389                 struct ll_inode_info *lli = ll_i2info(inode);
1390                 struct lmv_stripe_md *lsm = md->lmv;
1391
1392                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1393                                 (body->mbo_mode & S_IFMT);
1394                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1395                          PFID(fid));
1396
1397                 inode->i_mtime.tv_sec = 0;
1398                 inode->i_atime.tv_sec = 0;
1399                 inode->i_ctime.tv_sec = 0;
1400                 inode->i_rdev = 0;
1401
1402 #ifdef HAVE_BACKING_DEV_INFO
1403                 /* initializing backing dev info. */
1404                 inode->i_mapping->backing_dev_info =
1405                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1406 #endif
1407                 inode->i_op = &ll_dir_inode_operations;
1408                 inode->i_fop = &ll_dir_operations;
1409                 lli->lli_fid = *fid;
1410                 ll_lli_init(lli);
1411
1412                 LASSERT(lsm != NULL);
1413                 /* master object FID */
1414                 lli->lli_pfid = body->mbo_fid1;
1415                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1416                        lli, PFID(fid), PFID(&lli->lli_pfid));
1417                 unlock_new_inode(inode);
1418         }
1419
1420         RETURN(inode);
1421 }
1422
1423 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1424 {
1425         struct lu_fid *fid;
1426         struct lmv_stripe_md *lsm = md->lmv;
1427         struct ll_inode_info *lli = ll_i2info(inode);
1428         int i;
1429
1430         LASSERT(lsm != NULL);
1431
1432         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1433                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1434         lsm_md_dump(D_INODE, lsm);
1435
1436         if (!lmv_dir_striped(lsm))
1437                 goto out;
1438
1439         /* XXX sigh, this lsm_root initialization should be in
1440          * LMV layer, but it needs ll_iget right now, so we
1441          * put this here right now. */
1442         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1443                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1444                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1445
1446                 if (!fid_is_sane(fid))
1447                         continue;
1448
1449                 /* Unfortunately ll_iget will call ll_update_inode,
1450                  * where the initialization of slave inode is slightly
1451                  * different, so it reset lsm_md to NULL to avoid
1452                  * initializing lsm for slave inode. */
1453                 lsm->lsm_md_oinfo[i].lmo_root =
1454                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1455                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1456                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1457
1458                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1459                         while (i-- > 0) {
1460                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1461                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1462                         }
1463                         return rc;
1464                 }
1465         }
1466 out:
1467         lli->lli_lsm_md = lsm;
1468
1469         return 0;
1470 }
1471
1472 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1473 {
1474         struct ll_inode_info *lli = ll_i2info(inode);
1475
1476         if (!md->default_lmv) {
1477                 /* clear default lsm */
1478                 if (lli->lli_default_lsm_md) {
1479                         down_write(&lli->lli_lsm_sem);
1480                         if (lli->lli_default_lsm_md) {
1481                                 lmv_free_memmd(lli->lli_default_lsm_md);
1482                                 lli->lli_default_lsm_md = NULL;
1483                         }
1484                         up_write(&lli->lli_lsm_sem);
1485                 }
1486         } else if (lli->lli_default_lsm_md) {
1487                 /* update default lsm if it changes */
1488                 down_read(&lli->lli_lsm_sem);
1489                 if (lli->lli_default_lsm_md &&
1490                     !lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1491                         up_read(&lli->lli_lsm_sem);
1492                         down_write(&lli->lli_lsm_sem);
1493                         if (lli->lli_default_lsm_md)
1494                                 lmv_free_memmd(lli->lli_default_lsm_md);
1495                         lli->lli_default_lsm_md = md->default_lmv;
1496                         lsm_md_dump(D_INODE, md->default_lmv);
1497                         md->default_lmv = NULL;
1498                         up_write(&lli->lli_lsm_sem);
1499                 } else {
1500                         up_read(&lli->lli_lsm_sem);
1501                 }
1502         } else {
1503                 /* init default lsm */
1504                 down_write(&lli->lli_lsm_sem);
1505                 lli->lli_default_lsm_md = md->default_lmv;
1506                 lsm_md_dump(D_INODE, md->default_lmv);
1507                 md->default_lmv = NULL;
1508                 up_write(&lli->lli_lsm_sem);
1509         }
1510 }
1511
1512 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1513 {
1514         struct ll_inode_info *lli = ll_i2info(inode);
1515         struct lmv_stripe_md *lsm = md->lmv;
1516         struct cl_attr  *attr;
1517         int rc = 0;
1518
1519         ENTRY;
1520
1521         LASSERT(S_ISDIR(inode->i_mode));
1522         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1523                PFID(ll_inode2fid(inode)));
1524
1525         /* update default LMV */
1526         if (md->default_lmv)
1527                 ll_update_default_lsm_md(inode, md);
1528
1529         /*
1530          * no striped information from request, lustre_md from req does not
1531          * include stripeEA, see ll_md_setattr()
1532          */
1533         if (!lsm)
1534                 RETURN(0);
1535
1536         /*
1537          * normally dir layout doesn't change, only take read lock to check
1538          * that to avoid blocking other MD operations.
1539          */
1540         down_read(&lli->lli_lsm_sem);
1541
1542         /* some current lookup initialized lsm, and unchanged */
1543         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1544                 GOTO(unlock, rc = 0);
1545
1546         /* if dir layout doesn't match, check whether version is increased,
1547          * which means layout is changed, this happens in dir split/merge and
1548          * lfsck.
1549          *
1550          * foreign LMV should not change.
1551          */
1552         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1553             lsm->lsm_md_layout_version <=
1554             lli->lli_lsm_md->lsm_md_layout_version) {
1555                 CERROR("%s: "DFID" dir layout mismatch:\n",
1556                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1557                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1558                 lsm_md_dump(D_ERROR, lsm);
1559                 GOTO(unlock, rc = -EINVAL);
1560         }
1561
1562         up_read(&lli->lli_lsm_sem);
1563         down_write(&lli->lli_lsm_sem);
1564         /* clear existing lsm */
1565         if (lli->lli_lsm_md) {
1566                 lmv_free_memmd(lli->lli_lsm_md);
1567                 lli->lli_lsm_md = NULL;
1568         }
1569
1570         rc = ll_init_lsm_md(inode, md);
1571         up_write(&lli->lli_lsm_sem);
1572
1573         if (rc)
1574                 RETURN(rc);
1575
1576         /* set md->lmv to NULL, so the following free lustre_md will not free
1577          * this lsm.
1578          */
1579         md->lmv = NULL;
1580
1581         /* md_merge_attr() may take long, since lsm is already set, switch to
1582          * read lock.
1583          */
1584         down_read(&lli->lli_lsm_sem);
1585
1586         if (!lmv_dir_striped(lli->lli_lsm_md))
1587                 GOTO(unlock, rc = 0);
1588
1589         OBD_ALLOC_PTR(attr);
1590         if (!attr)
1591                 GOTO(unlock, rc = -ENOMEM);
1592
1593         /* validate the lsm */
1594         rc = md_merge_attr(ll_i2mdexp(inode), &lli->lli_fid, lli->lli_lsm_md,
1595                            attr, ll_md_blocking_ast);
1596         if (!rc) {
1597                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1598                         md->body->mbo_nlink = attr->cat_nlink;
1599                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1600                         md->body->mbo_size = attr->cat_size;
1601                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1602                         md->body->mbo_atime = attr->cat_atime;
1603                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1604                         md->body->mbo_ctime = attr->cat_ctime;
1605                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1606                         md->body->mbo_mtime = attr->cat_mtime;
1607         }
1608
1609         OBD_FREE_PTR(attr);
1610         GOTO(unlock, rc);
1611 unlock:
1612         up_read(&lli->lli_lsm_sem);
1613
1614         return rc;
1615 }
1616
1617 void ll_clear_inode(struct inode *inode)
1618 {
1619         struct ll_inode_info *lli = ll_i2info(inode);
1620         struct ll_sb_info *sbi = ll_i2sbi(inode);
1621
1622         ENTRY;
1623
1624         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1625                PFID(ll_inode2fid(inode)), inode);
1626
1627         if (S_ISDIR(inode->i_mode)) {
1628                 /* these should have been cleared in ll_file_release */
1629                 LASSERT(lli->lli_opendir_key == NULL);
1630                 LASSERT(lli->lli_sai == NULL);
1631                 LASSERT(lli->lli_opendir_pid == 0);
1632         } else {
1633                 pcc_inode_free(inode);
1634         }
1635
1636         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1637
1638         LASSERT(!lli->lli_open_fd_write_count);
1639         LASSERT(!lli->lli_open_fd_read_count);
1640         LASSERT(!lli->lli_open_fd_exec_count);
1641
1642         if (lli->lli_mds_write_och)
1643                 ll_md_real_close(inode, FMODE_WRITE);
1644         if (lli->lli_mds_exec_och)
1645                 ll_md_real_close(inode, FMODE_EXEC);
1646         if (lli->lli_mds_read_och)
1647                 ll_md_real_close(inode, FMODE_READ);
1648
1649         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1650                 OBD_FREE(lli->lli_symlink_name,
1651                          strlen(lli->lli_symlink_name) + 1);
1652                 lli->lli_symlink_name = NULL;
1653         }
1654
1655         ll_xattr_cache_destroy(inode);
1656
1657         forget_all_cached_acls(inode);
1658         lli_clear_acl(lli);
1659         lli->lli_inode_magic = LLI_INODE_DEAD;
1660
1661         if (S_ISDIR(inode->i_mode))
1662                 ll_dir_clear_lsm_md(inode);
1663         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1664                 LASSERT(list_empty(&lli->lli_agl_list));
1665
1666         /*
1667          * XXX This has to be done before lsm is freed below, because
1668          * cl_object still uses inode lsm.
1669          */
1670         cl_inode_fini(inode);
1671
1672         llcrypt_put_encryption_info(inode);
1673
1674         EXIT;
1675 }
1676
1677 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1678 {
1679         struct lustre_md md;
1680         struct inode *inode = dentry->d_inode;
1681         struct ll_sb_info *sbi = ll_i2sbi(inode);
1682         struct ptlrpc_request *request = NULL;
1683         int rc, ia_valid;
1684         ENTRY;
1685
1686         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1687                                      LUSTRE_OPC_ANY, NULL);
1688         if (IS_ERR(op_data))
1689                 RETURN(PTR_ERR(op_data));
1690
1691         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1692         if (rc) {
1693                 ptlrpc_req_finished(request);
1694                 if (rc == -ENOENT) {
1695                         clear_nlink(inode);
1696                         /* Unlinked special device node? Or just a race?
1697                          * Pretend we done everything. */
1698                         if (!S_ISREG(inode->i_mode) &&
1699                             !S_ISDIR(inode->i_mode)) {
1700                                 ia_valid = op_data->op_attr.ia_valid;
1701                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1702                                 rc = simple_setattr(dentry, &op_data->op_attr);
1703                                 op_data->op_attr.ia_valid = ia_valid;
1704                         }
1705                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1706                         CERROR("md_setattr fails: rc = %d\n", rc);
1707                 }
1708                 RETURN(rc);
1709         }
1710
1711         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1712                               sbi->ll_md_exp, &md);
1713         if (rc) {
1714                 ptlrpc_req_finished(request);
1715                 RETURN(rc);
1716         }
1717
1718         ia_valid = op_data->op_attr.ia_valid;
1719         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1720          * cache is not cleared yet. */
1721         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1722         if (S_ISREG(inode->i_mode))
1723                 inode_lock(inode);
1724         rc = simple_setattr(dentry, &op_data->op_attr);
1725         if (S_ISREG(inode->i_mode))
1726                 inode_unlock(inode);
1727         op_data->op_attr.ia_valid = ia_valid;
1728
1729         rc = ll_update_inode(inode, &md);
1730         ptlrpc_req_finished(request);
1731
1732         RETURN(rc);
1733 }
1734
1735 /**
1736  * Zero portion of page that is part of @inode.
1737  * This implies, if necessary:
1738  * - taking cl_lock on range corresponding to concerned page
1739  * - grabbing vm page
1740  * - associating cl_page
1741  * - proceeding to clio read
1742  * - zeroing range in page
1743  * - proceeding to cl_page flush
1744  * - releasing cl_lock
1745  *
1746  * \param[in] inode     inode
1747  * \param[in] index     page index
1748  * \param[in] offset    offset in page to start zero from
1749  * \param[in] len       len to zero
1750  *
1751  * \retval 0            on success
1752  * \retval negative     errno on failure
1753  */
1754 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1755                     unsigned len)
1756 {
1757         struct ll_inode_info *lli = ll_i2info(inode);
1758         struct cl_object *clob = lli->lli_clob;
1759         __u16 refcheck;
1760         struct lu_env *env = NULL;
1761         struct cl_io *io = NULL;
1762         struct cl_page *clpage = NULL;
1763         struct page *vmpage = NULL;
1764         unsigned from = index << PAGE_SHIFT;
1765         struct cl_lock *lock = NULL;
1766         struct cl_lock_descr *descr = NULL;
1767         struct cl_2queue *queue = NULL;
1768         struct cl_sync_io *anchor = NULL;
1769         bool holdinglock = false;
1770         bool lockedbymyself = true;
1771         int rc;
1772
1773         ENTRY;
1774
1775         env = cl_env_get(&refcheck);
1776         if (IS_ERR(env))
1777                 RETURN(PTR_ERR(env));
1778
1779         io = vvp_env_thread_io(env);
1780         io->ci_obj = clob;
1781         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1782         if (rc)
1783                 GOTO(putenv, rc);
1784
1785         lock = vvp_env_lock(env);
1786         descr = &lock->cll_descr;
1787         descr->cld_obj   = io->ci_obj;
1788         descr->cld_start = cl_index(io->ci_obj, from);
1789         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1790         descr->cld_mode  = CLM_WRITE;
1791         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1792
1793         /* request lock for page */
1794         rc = cl_lock_request(env, io, lock);
1795         /* -ECANCELED indicates a matching lock with a different extent
1796          * was already present, and -EEXIST indicates a matching lock
1797          * on exactly the same extent was already present.
1798          * In both cases it means we are covered.
1799          */
1800         if (rc == -ECANCELED || rc == -EEXIST)
1801                 rc = 0;
1802         else if (rc < 0)
1803                 GOTO(iofini, rc);
1804         else
1805                 holdinglock = true;
1806
1807         /* grab page */
1808         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1809         if (vmpage == NULL)
1810                 GOTO(rellock, rc = -EOPNOTSUPP);
1811
1812         if (!PageDirty(vmpage)) {
1813                 /* associate cl_page */
1814                 clpage = cl_page_find(env, clob, vmpage->index,
1815                                       vmpage, CPT_CACHEABLE);
1816                 if (IS_ERR(clpage))
1817                         GOTO(pagefini, rc = PTR_ERR(clpage));
1818
1819                 cl_page_assume(env, io, clpage);
1820         }
1821
1822         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1823             !PageWriteback(vmpage)) {
1824                 /* read page */
1825                 /* set PagePrivate2 to detect special case of empty page
1826                  * in osc_brw_fini_request()
1827                  */
1828                 SetPagePrivate2(vmpage);
1829                 rc = ll_io_read_page(env, io, clpage, NULL);
1830                 if (!PagePrivate2(vmpage))
1831                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1832                          * meaning we read an empty page. In this case, in order
1833                          * to avoid allocating unnecessary block in truncated
1834                          * file, we must not zero and write as below. Subsequent
1835                          * server-side truncate will handle things correctly.
1836                          */
1837                         GOTO(clpfini, rc = 0);
1838                 ClearPagePrivate2(vmpage);
1839                 if (rc)
1840                         GOTO(clpfini, rc);
1841                 lockedbymyself = trylock_page(vmpage);
1842                 cl_page_assume(env, io, clpage);
1843         }
1844
1845         /* zero range in page */
1846         zero_user(vmpage, offset, len);
1847
1848         if (holdinglock && clpage) {
1849                 /* explicitly write newly modified page */
1850                 queue = &io->ci_queue;
1851                 cl_2queue_init(queue);
1852                 anchor = &vvp_env_info(env)->vti_anchor;
1853                 cl_sync_io_init(anchor, 1);
1854                 clpage->cp_sync_io = anchor;
1855                 cl_2queue_add(queue, clpage);
1856                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1857                 if (rc)
1858                         GOTO(queuefini1, rc);
1859                 rc = cl_sync_io_wait(env, anchor, 0);
1860                 if (rc)
1861                         GOTO(queuefini2, rc);
1862                 cl_page_assume(env, io, clpage);
1863
1864 queuefini2:
1865                 cl_2queue_discard(env, io, queue);
1866 queuefini1:
1867                 cl_2queue_disown(env, io, queue);
1868                 cl_2queue_fini(env, queue);
1869         }
1870
1871 clpfini:
1872         if (clpage)
1873                 cl_page_put(env, clpage);
1874 pagefini:
1875         if (lockedbymyself) {
1876                 unlock_page(vmpage);
1877                 put_page(vmpage);
1878         }
1879 rellock:
1880         if (holdinglock)
1881                 cl_lock_release(env, lock);
1882 iofini:
1883         cl_io_fini(env, io);
1884 putenv:
1885         if (env)
1886                 cl_env_put(env, &refcheck);
1887
1888         RETURN(rc);
1889 }
1890
1891 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1892  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1893  * keep these values until such a time that objects are allocated for it.
1894  * We do the MDS operations first, as it is checking permissions for us.
1895  * We don't to the MDS RPC if there is nothing that we want to store there,
1896  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1897  * going to do an RPC anyways.
1898  *
1899  * If we are doing a truncate, we will send the mtime and ctime updates
1900  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1901  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1902  * at the same time.
1903  *
1904  * In case of HSMimport, we only set attr on MDS.
1905  */
1906 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
1907                    enum op_xvalid xvalid, bool hsm_import)
1908 {
1909         struct inode *inode = dentry->d_inode;
1910         struct ll_inode_info *lli = ll_i2info(inode);
1911         struct md_op_data *op_data = NULL;
1912         ktime_t kstart = ktime_get();
1913         int rc = 0;
1914
1915         ENTRY;
1916
1917         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
1918                "valid %x, hsm_import %d\n",
1919                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
1920                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
1921                hsm_import);
1922
1923         if (attr->ia_valid & ATTR_SIZE) {
1924                 /* Check new size against VFS/VM file size limit and rlimit */
1925                 rc = inode_newsize_ok(inode, attr->ia_size);
1926                 if (rc)
1927                         RETURN(rc);
1928
1929                 /* The maximum Lustre file size is variable, based on the
1930                  * OST maximum object size and number of stripes.  This
1931                  * needs another check in addition to the VFS check above. */
1932                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1933                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
1934                                PFID(&lli->lli_fid), attr->ia_size,
1935                                ll_file_maxbytes(inode));
1936                         RETURN(-EFBIG);
1937                 }
1938
1939                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1940         }
1941
1942         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1943         if (attr->ia_valid & TIMES_SET_FLAGS) {
1944                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
1945                     !cfs_capable(CFS_CAP_FOWNER))
1946                         RETURN(-EPERM);
1947         }
1948
1949         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1950         if (!(xvalid & OP_XVALID_CTIME_SET) &&
1951              (attr->ia_valid & ATTR_CTIME)) {
1952                 attr->ia_ctime = current_time(inode);
1953                 xvalid |= OP_XVALID_CTIME_SET;
1954         }
1955         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1956             (attr->ia_valid & ATTR_ATIME)) {
1957                 attr->ia_atime = current_time(inode);
1958                 attr->ia_valid |= ATTR_ATIME_SET;
1959         }
1960         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1961             (attr->ia_valid & ATTR_MTIME)) {
1962                 attr->ia_mtime = current_time(inode);
1963                 attr->ia_valid |= ATTR_MTIME_SET;
1964         }
1965
1966         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1967                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
1968                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
1969                        ktime_get_real_seconds());
1970
1971         if (S_ISREG(inode->i_mode))
1972                 inode_unlock(inode);
1973
1974         /* We always do an MDS RPC, even if we're only changing the size;
1975          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1976
1977         OBD_ALLOC_PTR(op_data);
1978         if (op_data == NULL)
1979                 GOTO(out, rc = -ENOMEM);
1980
1981         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
1982                 /* If we are changing file size, file content is
1983                  * modified, flag it.
1984                  */
1985                 xvalid |= OP_XVALID_OWNEROVERRIDE;
1986                 op_data->op_bias |= MDS_DATA_MODIFIED;
1987                 ll_file_clear_flag(lli, LLIF_DATA_MODIFIED);
1988         }
1989
1990         if (attr->ia_valid & ATTR_FILE) {
1991                 struct ll_file_data *fd = attr->ia_file->private_data;
1992
1993                 if (fd->fd_lease_och)
1994                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
1995         }
1996
1997         op_data->op_attr = *attr;
1998         op_data->op_xvalid = xvalid;
1999
2000         rc = ll_md_setattr(dentry, op_data);
2001         if (rc)
2002                 GOTO(out, rc);
2003
2004         if (!S_ISREG(inode->i_mode) || hsm_import)
2005                 GOTO(out, rc = 0);
2006
2007         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2008                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2009             xvalid & OP_XVALID_CTIME_SET) {
2010                 bool cached = false;
2011
2012                 rc = pcc_inode_setattr(inode, attr, &cached);
2013                 if (cached) {
2014                         if (rc) {
2015                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2016                                        "rc = %d\n",
2017                                        ll_i2sbi(inode)->ll_fsname,
2018                                        PFID(&lli->lli_fid), rc);
2019                                 GOTO(out, rc);
2020                         }
2021                 } else {
2022                         unsigned int flags = 0;
2023
2024                         /* For truncate and utimes sending attributes to OSTs,
2025                          * setting mtime/atime to the past will be performed
2026                          * under PW [0:EOF] extent lock (new_size:EOF for
2027                          * truncate). It may seem excessive to send mtime/atime
2028                          * updates to OSTs when not setting times to past, but
2029                          * it is necessary due to possible time
2030                          * de-synchronization between MDT inode and OST objects
2031                          */
2032                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2033                             attr->ia_valid & ATTR_SIZE) {
2034                                 xvalid |= OP_XVALID_FLAGS;
2035                                 flags = LUSTRE_ENCRYPT_FL;
2036                                 if (attr->ia_size & ~PAGE_MASK) {
2037                                         pgoff_t offset =
2038                                                 attr->ia_size & (PAGE_SIZE - 1);
2039
2040                                         rc = ll_io_zero_page(inode,
2041                                                     attr->ia_size >> PAGE_SHIFT,
2042                                                     offset, PAGE_SIZE - offset);
2043                                         if (rc)
2044                                                 GOTO(out, rc);
2045                                 }
2046                         }
2047                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2048                 }
2049         }
2050
2051         /* If the file was restored, it needs to set dirty flag.
2052          *
2053          * We've already sent MDS_DATA_MODIFIED flag in
2054          * ll_md_setattr() for truncate. However, the MDT refuses to
2055          * set the HS_DIRTY flag on released files, so we have to set
2056          * it again if the file has been restored. Please check how
2057          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2058          *
2059          * Please notice that if the file is not released, the previous
2060          * MDS_DATA_MODIFIED has taken effect and usually
2061          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2062          * This way we can save an RPC for common open + trunc
2063          * operation. */
2064         if (ll_file_test_and_clear_flag(lli, LLIF_DATA_MODIFIED)) {
2065                 struct hsm_state_set hss = {
2066                         .hss_valid = HSS_SETMASK,
2067                         .hss_setmask = HS_DIRTY,
2068                 };
2069                 int rc2;
2070
2071                 rc2 = ll_hsm_state_set(inode, &hss);
2072                 /* truncate and write can happen at the same time, so that
2073                  * the file can be set modified even though the file is not
2074                  * restored from released state, and ll_hsm_state_set() is
2075                  * not applicable for the file, and rc2 < 0 is normal in this
2076                  * case. */
2077                 if (rc2 < 0)
2078                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2079                                PFID(ll_inode2fid(inode)), rc2);
2080         }
2081
2082         EXIT;
2083 out:
2084         if (op_data != NULL)
2085                 ll_finish_md_op_data(op_data);
2086
2087         if (S_ISREG(inode->i_mode)) {
2088                 inode_lock(inode);
2089                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2090                         inode_dio_wait(inode);
2091                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2092                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2093                  * inode flags, so there is a gap where S_NOSEC is not set.
2094                  * This can cause a writer to take the i_mutex unnecessarily,
2095                  * but this is safe to do and should be rare. */
2096                 inode_has_no_xattr(inode);
2097         }
2098
2099         if (!rc)
2100                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2101                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2102                                    ktime_us_delta(ktime_get(), kstart));
2103
2104         return rc;
2105 }
2106
2107 int ll_setattr(struct dentry *de, struct iattr *attr)
2108 {
2109         int mode = de->d_inode->i_mode;
2110         enum op_xvalid xvalid = 0;
2111         int rc;
2112
2113         rc = llcrypt_prepare_setattr(de, attr);
2114         if (rc)
2115                 return rc;
2116
2117         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2118                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2119                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2120
2121         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2122                                (ATTR_SIZE|ATTR_MODE)) &&
2123             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2124              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2125               !(attr->ia_mode & S_ISGID))))
2126                 attr->ia_valid |= ATTR_FORCE;
2127
2128         if ((attr->ia_valid & ATTR_MODE) &&
2129             (mode & S_ISUID) &&
2130             !(attr->ia_mode & S_ISUID) &&
2131             !(attr->ia_valid & ATTR_KILL_SUID))
2132                 attr->ia_valid |= ATTR_KILL_SUID;
2133
2134         if ((attr->ia_valid & ATTR_MODE) &&
2135             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2136             !(attr->ia_mode & S_ISGID) &&
2137             !(attr->ia_valid & ATTR_KILL_SGID))
2138                 attr->ia_valid |= ATTR_KILL_SGID;
2139
2140         return ll_setattr_raw(de, attr, xvalid, false);
2141 }
2142
2143 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2144                        u32 flags)
2145 {
2146         struct obd_statfs obd_osfs = { 0 };
2147         time64_t max_age;
2148         int rc;
2149
2150         ENTRY;
2151         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2152
2153         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2154                 flags |= OBD_STATFS_NODELAY;
2155
2156         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2157         if (rc)
2158                 RETURN(rc);
2159
2160         osfs->os_type = LL_SUPER_MAGIC;
2161
2162         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2163               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2164
2165         if (osfs->os_state & OS_STATFS_SUM)
2166                 GOTO(out, rc);
2167
2168         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2169         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2170                 GOTO(out, rc = 0);
2171
2172         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2173                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2174                obd_osfs.os_files);
2175
2176         osfs->os_bsize = obd_osfs.os_bsize;
2177         osfs->os_blocks = obd_osfs.os_blocks;
2178         osfs->os_bfree = obd_osfs.os_bfree;
2179         osfs->os_bavail = obd_osfs.os_bavail;
2180
2181         /* If we have _some_ OSTs, but don't have as many free objects on the
2182          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2183          * to compensate, so that the "inodes in use" number is correct.
2184          * This should be kept in sync with lod_statfs() behaviour.
2185          */
2186         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2187                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2188                                  obd_osfs.os_ffree;
2189                 osfs->os_ffree = obd_osfs.os_ffree;
2190         }
2191
2192 out:
2193         RETURN(rc);
2194 }
2195
2196 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2197 {
2198         struct super_block *sb = de->d_sb;
2199         struct obd_statfs osfs;
2200         __u64 fsid = huge_encode_dev(sb->s_dev);
2201         ktime_t kstart = ktime_get();
2202         int rc;
2203
2204         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2205
2206         /* Some amount of caching on the client is allowed */
2207         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2208         if (rc)
2209                 return rc;
2210
2211         statfs_unpack(sfs, &osfs);
2212
2213         /* We need to downshift for all 32-bit kernels, because we can't
2214          * tell if the kernel is being called via sys_statfs64() or not.
2215          * Stop before overflowing f_bsize - in which case it is better
2216          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2217         if (sizeof(long) < 8) {
2218                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2219                         sfs->f_bsize <<= 1;
2220
2221                         osfs.os_blocks >>= 1;
2222                         osfs.os_bfree >>= 1;
2223                         osfs.os_bavail >>= 1;
2224                 }
2225         }
2226
2227         sfs->f_blocks = osfs.os_blocks;
2228         sfs->f_bfree = osfs.os_bfree;
2229         sfs->f_bavail = osfs.os_bavail;
2230         sfs->f_fsid.val[0] = (__u32)fsid;
2231         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2232
2233         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2234                            ktime_us_delta(ktime_get(), kstart));
2235
2236         return 0;
2237 }
2238
2239 void ll_inode_size_lock(struct inode *inode)
2240 {
2241         struct ll_inode_info *lli;
2242
2243         LASSERT(!S_ISDIR(inode->i_mode));
2244
2245         lli = ll_i2info(inode);
2246         mutex_lock(&lli->lli_size_mutex);
2247 }
2248
2249 void ll_inode_size_unlock(struct inode *inode)
2250 {
2251         struct ll_inode_info *lli;
2252
2253         lli = ll_i2info(inode);
2254         mutex_unlock(&lli->lli_size_mutex);
2255 }
2256
2257 void ll_update_inode_flags(struct inode *inode, int ext_flags)
2258 {
2259         /* do not clear encryption flag */
2260         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2261         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2262         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2263                 ll_file_set_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
2264         else
2265                 ll_file_clear_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
2266 }
2267
2268 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2269 {
2270         struct ll_inode_info *lli = ll_i2info(inode);
2271         struct mdt_body *body = md->body;
2272         struct ll_sb_info *sbi = ll_i2sbi(inode);
2273         int rc = 0;
2274
2275         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2276                 rc = cl_file_inode_init(inode, md);
2277                 if (rc)
2278                         return rc;
2279         }
2280
2281         if (S_ISDIR(inode->i_mode)) {
2282                 rc = ll_update_lsm_md(inode, md);
2283                 if (rc != 0)
2284                         return rc;
2285         }
2286
2287         if (body->mbo_valid & OBD_MD_FLACL)
2288                 lli_replace_acl(lli, md);
2289
2290         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
2291                                         sbi->ll_flags & LL_SBI_32BIT_API);
2292         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2293
2294         if (body->mbo_valid & OBD_MD_FLATIME) {
2295                 if (body->mbo_atime > inode->i_atime.tv_sec)
2296                         inode->i_atime.tv_sec = body->mbo_atime;
2297                 lli->lli_atime = body->mbo_atime;
2298         }
2299
2300         if (body->mbo_valid & OBD_MD_FLMTIME) {
2301                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2302                         CDEBUG(D_INODE,
2303                                "setting ino %lu mtime from %lld to %llu\n",
2304                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2305                                body->mbo_mtime);
2306                         inode->i_mtime.tv_sec = body->mbo_mtime;
2307                 }
2308                 lli->lli_mtime = body->mbo_mtime;
2309         }
2310
2311         if (body->mbo_valid & OBD_MD_FLCTIME) {
2312                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2313                         inode->i_ctime.tv_sec = body->mbo_ctime;
2314                 lli->lli_ctime = body->mbo_ctime;
2315         }
2316
2317         if (body->mbo_valid & OBD_MD_FLBTIME)
2318                 lli->lli_btime = body->mbo_btime;
2319
2320         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2321         if (body->mbo_valid & OBD_MD_FLFLAGS)
2322                 ll_update_inode_flags(inode, body->mbo_flags);
2323         if (body->mbo_valid & OBD_MD_FLMODE)
2324                 inode->i_mode = (inode->i_mode & S_IFMT) |
2325                                 (body->mbo_mode & ~S_IFMT);
2326
2327         if (body->mbo_valid & OBD_MD_FLTYPE)
2328                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2329                                 (body->mbo_mode & S_IFMT);
2330
2331         LASSERT(inode->i_mode != 0);
2332         if (body->mbo_valid & OBD_MD_FLUID)
2333                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2334         if (body->mbo_valid & OBD_MD_FLGID)
2335                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2336         if (body->mbo_valid & OBD_MD_FLPROJID)
2337                 lli->lli_projid = body->mbo_projid;
2338         if (body->mbo_valid & OBD_MD_FLNLINK)
2339                 set_nlink(inode, body->mbo_nlink);
2340         if (body->mbo_valid & OBD_MD_FLRDEV)
2341                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2342
2343         if (body->mbo_valid & OBD_MD_FLID) {
2344                 /* FID shouldn't be changed! */
2345                 if (fid_is_sane(&lli->lli_fid)) {
2346                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2347                                  "Trying to change FID "DFID
2348                                  " to the "DFID", inode "DFID"(%p)\n",
2349                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2350                                  PFID(ll_inode2fid(inode)), inode);
2351                 } else {
2352                         lli->lli_fid = body->mbo_fid1;
2353                 }
2354         }
2355
2356         LASSERT(fid_seq(&lli->lli_fid) != 0);
2357
2358         lli->lli_attr_valid = body->mbo_valid;
2359         if (body->mbo_valid & OBD_MD_FLSIZE) {
2360                 i_size_write(inode, body->mbo_size);
2361
2362                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2363                        PFID(ll_inode2fid(inode)),
2364                        (unsigned long long)body->mbo_size);
2365
2366                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2367                         inode->i_blocks = body->mbo_blocks;
2368         } else {
2369                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2370                         lli->lli_lazysize = body->mbo_size;
2371                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2372                         lli->lli_lazyblocks = body->mbo_blocks;
2373         }
2374
2375         if (body->mbo_valid & OBD_MD_TSTATE) {
2376                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2377                  * clear it when done to ensure to start again
2378                  * glimpsing updated attrs
2379                  */
2380                 if (body->mbo_t_state & MS_RESTORE)
2381                         ll_file_set_flag(lli, LLIF_FILE_RESTORING);
2382                 else
2383                         ll_file_clear_flag(lli, LLIF_FILE_RESTORING);
2384         }
2385
2386         return 0;
2387 }
2388
2389 int ll_read_inode2(struct inode *inode, void *opaque)
2390 {
2391         struct lustre_md *md = opaque;
2392         struct ll_inode_info *lli = ll_i2info(inode);
2393         int     rc;
2394         ENTRY;
2395
2396         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2397                PFID(&lli->lli_fid), inode);
2398
2399         /* Core attributes from the MDS first.  This is a new inode, and
2400          * the VFS doesn't zero times in the core inode so we have to do
2401          * it ourselves.  They will be overwritten by either MDS or OST
2402          * attributes - we just need to make sure they aren't newer.
2403          */
2404         inode->i_mtime.tv_sec = 0;
2405         inode->i_atime.tv_sec = 0;
2406         inode->i_ctime.tv_sec = 0;
2407         inode->i_rdev = 0;
2408         rc = ll_update_inode(inode, md);
2409         if (rc != 0)
2410                 RETURN(rc);
2411
2412         /* OIDEBUG(inode); */
2413
2414 #ifdef HAVE_BACKING_DEV_INFO
2415         /* initializing backing dev info. */
2416         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2417 #endif
2418         if (S_ISREG(inode->i_mode)) {
2419                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2420                 inode->i_op = &ll_file_inode_operations;
2421                 inode->i_fop = sbi->ll_fop;
2422                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2423                 EXIT;
2424         } else if (S_ISDIR(inode->i_mode)) {
2425                 inode->i_op = &ll_dir_inode_operations;
2426                 inode->i_fop = &ll_dir_operations;
2427                 EXIT;
2428         } else if (S_ISLNK(inode->i_mode)) {
2429                 inode->i_op = &ll_fast_symlink_inode_operations;
2430                 EXIT;
2431         } else {
2432                 inode->i_op = &ll_special_inode_operations;
2433
2434                 init_special_inode(inode, inode->i_mode,
2435                                    inode->i_rdev);
2436
2437                 EXIT;
2438         }
2439
2440         return 0;
2441 }
2442
2443 void ll_delete_inode(struct inode *inode)
2444 {
2445         struct ll_inode_info *lli = ll_i2info(inode);
2446         struct address_space *mapping = &inode->i_data;
2447         unsigned long nrpages;
2448         unsigned long flags;
2449
2450         ENTRY;
2451
2452         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2453                 /* It is last chance to write out dirty pages,
2454                  * otherwise we may lose data while umount.
2455                  *
2456                  * If i_nlink is 0 then just discard data. This is safe because
2457                  * local inode gets i_nlink 0 from server only for the last
2458                  * unlink, so that file is not opened somewhere else
2459                  */
2460                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2461                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2462         }
2463         truncate_inode_pages_final(mapping);
2464
2465         /* Workaround for LU-118: Note nrpages may not be totally updated when
2466          * truncate_inode_pages() returns, as there can be a page in the process
2467          * of deletion (inside __delete_from_page_cache()) in the specified
2468          * range. Thus mapping->nrpages can be non-zero when this function
2469          * returns even after truncation of the whole mapping.  Only do this if
2470          * npages isn't already zero.
2471          */
2472         nrpages = mapping->nrpages;
2473         if (nrpages) {
2474                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2475                 nrpages = mapping->nrpages;
2476                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2477         } /* Workaround end */
2478
2479         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2480                  "see https://jira.whamcloud.com/browse/LU-118\n",
2481                  ll_i2sbi(inode)->ll_fsname,
2482                  PFID(ll_inode2fid(inode)), inode, nrpages);
2483
2484         ll_clear_inode(inode);
2485         clear_inode(inode);
2486
2487         EXIT;
2488 }
2489
2490 int ll_iocontrol(struct inode *inode, struct file *file,
2491                  unsigned int cmd, unsigned long arg)
2492 {
2493         struct ll_sb_info *sbi = ll_i2sbi(inode);
2494         struct ptlrpc_request *req = NULL;
2495         int rc, flags = 0;
2496         ENTRY;
2497
2498         switch (cmd) {
2499         case FS_IOC_GETFLAGS: {
2500                 struct mdt_body *body;
2501                 struct md_op_data *op_data;
2502
2503                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2504                                              0, 0, LUSTRE_OPC_ANY,
2505                                              NULL);
2506                 if (IS_ERR(op_data))
2507                         RETURN(PTR_ERR(op_data));
2508
2509                 op_data->op_valid = OBD_MD_FLFLAGS;
2510                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2511                 ll_finish_md_op_data(op_data);
2512                 if (rc) {
2513                         CERROR("%s: failure inode "DFID": rc = %d\n",
2514                                sbi->ll_md_exp->exp_obd->obd_name,
2515                                PFID(ll_inode2fid(inode)), rc);
2516                         RETURN(-abs(rc));
2517                 }
2518
2519                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2520
2521                 flags = body->mbo_flags;
2522
2523                 ptlrpc_req_finished(req);
2524
2525                 RETURN(put_user(flags, (int __user *)arg));
2526         }
2527         case FS_IOC_SETFLAGS: {
2528                 struct iattr *attr;
2529                 struct md_op_data *op_data;
2530                 struct cl_object *obj;
2531                 struct fsxattr fa = { 0 };
2532
2533                 if (get_user(flags, (int __user *)arg))
2534                         RETURN(-EFAULT);
2535
2536                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2537                 if (flags & LUSTRE_PROJINHERIT_FL)
2538                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2539
2540                 rc = ll_ioctl_check_project(inode, &fa);
2541                 if (rc)
2542                         RETURN(rc);
2543
2544                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2545                                              LUSTRE_OPC_ANY, NULL);
2546                 if (IS_ERR(op_data))
2547                         RETURN(PTR_ERR(op_data));
2548
2549                 op_data->op_attr_flags = flags;
2550                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2551                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2552                 ll_finish_md_op_data(op_data);
2553                 ptlrpc_req_finished(req);
2554                 if (rc)
2555                         RETURN(rc);
2556
2557                 ll_update_inode_flags(inode, flags);
2558
2559                 obj = ll_i2info(inode)->lli_clob;
2560                 if (obj == NULL)
2561                         RETURN(0);
2562
2563                 OBD_ALLOC_PTR(attr);
2564                 if (attr == NULL)
2565                         RETURN(-ENOMEM);
2566
2567                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2568
2569                 OBD_FREE_PTR(attr);
2570                 RETURN(rc);
2571         }
2572         default:
2573                 RETURN(-ENOSYS);
2574         }
2575
2576         RETURN(0);
2577 }
2578
2579 int ll_flush_ctx(struct inode *inode)
2580 {
2581         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2582
2583         CDEBUG(D_SEC, "flush context for user %d\n",
2584                from_kuid(&init_user_ns, current_uid()));
2585
2586         obd_set_info_async(NULL, sbi->ll_md_exp,
2587                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2588                            0, NULL, NULL);
2589         obd_set_info_async(NULL, sbi->ll_dt_exp,
2590                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2591                            0, NULL, NULL);
2592         return 0;
2593 }
2594
2595 /* umount -f client means force down, don't save state */
2596 void ll_umount_begin(struct super_block *sb)
2597 {
2598         struct ll_sb_info *sbi = ll_s2sbi(sb);
2599         struct obd_device *obd;
2600         struct obd_ioctl_data *ioc_data;
2601         int cnt;
2602         ENTRY;
2603
2604         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2605                sb->s_count, atomic_read(&sb->s_active));
2606
2607         obd = class_exp2obd(sbi->ll_md_exp);
2608         if (obd == NULL) {
2609                 CERROR("Invalid MDC connection handle %#llx\n",
2610                        sbi->ll_md_exp->exp_handle.h_cookie);
2611                 EXIT;
2612                 return;
2613         }
2614         obd->obd_force = 1;
2615
2616         obd = class_exp2obd(sbi->ll_dt_exp);
2617         if (obd == NULL) {
2618                 CERROR("Invalid LOV connection handle %#llx\n",
2619                        sbi->ll_dt_exp->exp_handle.h_cookie);
2620                 EXIT;
2621                 return;
2622         }
2623         obd->obd_force = 1;
2624
2625         OBD_ALLOC_PTR(ioc_data);
2626         if (ioc_data) {
2627                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2628                               sizeof *ioc_data, ioc_data, NULL);
2629
2630                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2631                               sizeof *ioc_data, ioc_data, NULL);
2632
2633                 OBD_FREE_PTR(ioc_data);
2634         }
2635
2636         /* Really, we'd like to wait until there are no requests outstanding,
2637          * and then continue.  For now, we just periodically checking for vfs
2638          * to decrement mnt_cnt and hope to finish it within 10sec.
2639          */
2640         cnt = 10;
2641         while (cnt > 0 &&
2642                !may_umount(sbi->ll_mnt.mnt)) {
2643                 ssleep(1);
2644                 cnt -= 1;
2645         }
2646
2647         EXIT;
2648 }
2649
2650 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2651 {
2652         struct ll_sb_info *sbi = ll_s2sbi(sb);
2653         char *profilenm = get_profile_name(sb);
2654         int err;
2655         __u32 read_only;
2656
2657         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2658                 read_only = *flags & MS_RDONLY;
2659                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2660                                          sizeof(KEY_READ_ONLY),
2661                                          KEY_READ_ONLY, sizeof(read_only),
2662                                          &read_only, NULL);
2663                 if (err) {
2664                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2665                                       profilenm, read_only ?
2666                                       "read-only" : "read-write", err);
2667                         return err;
2668                 }
2669
2670                 if (read_only)
2671                         sb->s_flags |= SB_RDONLY;
2672                 else
2673                         sb->s_flags &= ~SB_RDONLY;
2674
2675                 if (sbi->ll_flags & LL_SBI_VERBOSE)
2676                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2677                                       read_only ?  "read-only" : "read-write");
2678         }
2679         return 0;
2680 }
2681
2682 /**
2683  * Cleanup the open handle that is cached on MDT-side.
2684  *
2685  * For open case, the client side open handling thread may hit error
2686  * after the MDT grant the open. Under such case, the client should
2687  * send close RPC to the MDT as cleanup; otherwise, the open handle
2688  * on the MDT will be leaked there until the client umount or evicted.
2689  *
2690  * In further, if someone unlinked the file, because the open handle
2691  * holds the reference on such file/object, then it will block the
2692  * subsequent threads that want to locate such object via FID.
2693  *
2694  * \param[in] sb        super block for this file-system
2695  * \param[in] open_req  pointer to the original open request
2696  */
2697 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2698 {
2699         struct mdt_body                 *body;
2700         struct md_op_data               *op_data;
2701         struct ptlrpc_request           *close_req = NULL;
2702         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2703         ENTRY;
2704
2705         body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
2706         OBD_ALLOC_PTR(op_data);
2707         if (op_data == NULL) {
2708                 CWARN("%s: cannot allocate op_data to release open handle for "
2709                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2710
2711                 RETURN_EXIT;
2712         }
2713
2714         op_data->op_fid1 = body->mbo_fid1;
2715         op_data->op_open_handle = body->mbo_open_handle;
2716         op_data->op_mod_time = ktime_get_real_seconds();
2717         md_close(exp, op_data, NULL, &close_req);
2718         ptlrpc_req_finished(close_req);
2719         ll_finish_md_op_data(op_data);
2720
2721         EXIT;
2722 }
2723
2724 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2725                   struct super_block *sb, struct lookup_intent *it)
2726 {
2727         struct ll_sb_info *sbi = NULL;
2728         struct lustre_md md = { NULL };
2729         bool default_lmv_deleted = false;
2730         int rc;
2731
2732         ENTRY;
2733
2734         LASSERT(*inode || sb);
2735         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2736         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2737                               sbi->ll_md_exp, &md);
2738         if (rc != 0)
2739                 GOTO(out, rc);
2740
2741         /*
2742          * clear default_lmv only if intent_getattr reply doesn't contain it.
2743          * but it needs to be done after iget, check this early because
2744          * ll_update_lsm_md() may change md.
2745          */
2746         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2747             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2748                 default_lmv_deleted = true;
2749
2750         if (*inode) {
2751                 rc = ll_update_inode(*inode, &md);
2752                 if (rc != 0)
2753                         GOTO(out, rc);
2754         } else {
2755                 LASSERT(sb != NULL);
2756
2757                 /*
2758                  * At this point server returns to client's same fid as client
2759                  * generated for creating. So using ->fid1 is okay here.
2760                  */
2761                 if (!fid_is_sane(&md.body->mbo_fid1)) {
2762                         CERROR("%s: Fid is insane "DFID"\n",
2763                                 sbi->ll_fsname,
2764                                 PFID(&md.body->mbo_fid1));
2765                         GOTO(out, rc = -EINVAL);
2766                 }
2767
2768                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2769                                              sbi->ll_flags & LL_SBI_32BIT_API),
2770                                  &md);
2771                 if (IS_ERR(*inode)) {
2772                         lmd_clear_acl(&md);
2773                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2774                         *inode = NULL;
2775                         CERROR("new_inode -fatal: rc %d\n", rc);
2776                         GOTO(out, rc);
2777                 }
2778         }
2779
2780         /* Handling piggyback layout lock.
2781          * Layout lock can be piggybacked by getattr and open request.
2782          * The lsm can be applied to inode only if it comes with a layout lock
2783          * otherwise correct layout may be overwritten, for example:
2784          * 1. proc1: mdt returns a lsm but not granting layout
2785          * 2. layout was changed by another client
2786          * 3. proc2: refresh layout and layout lock granted
2787          * 4. proc1: to apply a stale layout */
2788         if (it != NULL && it->it_lock_mode != 0) {
2789                 struct lustre_handle lockh;
2790                 struct ldlm_lock *lock;
2791
2792                 lockh.cookie = it->it_lock_handle;
2793                 lock = ldlm_handle2lock(&lockh);
2794                 LASSERT(lock != NULL);
2795                 if (ldlm_has_layout(lock)) {
2796                         struct cl_object_conf conf;
2797
2798                         memset(&conf, 0, sizeof(conf));
2799                         conf.coc_opc = OBJECT_CONF_SET;
2800                         conf.coc_inode = *inode;
2801                         conf.coc_lock = lock;
2802                         conf.u.coc_layout = md.layout;
2803                         (void)ll_layout_conf(*inode, &conf);
2804                 }
2805                 LDLM_LOCK_PUT(lock);
2806         }
2807
2808         if (default_lmv_deleted)
2809                 ll_update_default_lsm_md(*inode, &md);
2810
2811         GOTO(out, rc = 0);
2812
2813 out:
2814         /* cleanup will be done if necessary */
2815         md_free_lustre_md(sbi->ll_md_exp, &md);
2816
2817         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
2818                 ll_intent_drop_lock(it);
2819                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, req);
2820         }
2821
2822         return rc;
2823 }
2824
2825 int ll_obd_statfs(struct inode *inode, void __user *arg)
2826 {
2827         struct ll_sb_info *sbi = NULL;
2828         struct obd_export *exp;
2829         char *buf = NULL;
2830         struct obd_ioctl_data *data = NULL;
2831         __u32 type;
2832         int len = 0, rc;
2833
2834         if (!inode || !(sbi = ll_i2sbi(inode)))
2835                 GOTO(out_statfs, rc = -EINVAL);
2836
2837         rc = obd_ioctl_getdata(&buf, &len, arg);
2838         if (rc)
2839                 GOTO(out_statfs, rc);
2840
2841         data = (void*)buf;
2842         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2843             !data->ioc_pbuf1 || !data->ioc_pbuf2)
2844                 GOTO(out_statfs, rc = -EINVAL);
2845
2846         if (data->ioc_inllen1 != sizeof(__u32) ||
2847             data->ioc_inllen2 != sizeof(__u32) ||
2848             data->ioc_plen1 != sizeof(struct obd_statfs) ||
2849             data->ioc_plen2 != sizeof(struct obd_uuid))
2850                 GOTO(out_statfs, rc = -EINVAL);
2851
2852         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2853         if (type & LL_STATFS_LMV)
2854                 exp = sbi->ll_md_exp;
2855         else if (type & LL_STATFS_LOV)
2856                 exp = sbi->ll_dt_exp;
2857         else
2858                 GOTO(out_statfs, rc = -ENODEV);
2859
2860         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2861         if (rc)
2862                 GOTO(out_statfs, rc);
2863 out_statfs:
2864         OBD_FREE_LARGE(buf, len);
2865         return rc;
2866 }
2867
2868 /*
2869  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
2870  * be called early to avoid deadlock.
2871  */
2872 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
2873 {
2874         if (op_data->op_mea2_sem) {
2875                 up_read(op_data->op_mea2_sem);
2876                 op_data->op_mea2_sem = NULL;
2877         }
2878
2879         if (op_data->op_mea1_sem) {
2880                 up_read(op_data->op_mea1_sem);
2881                 op_data->op_mea1_sem = NULL;
2882         }
2883 }
2884
2885 /* this function prepares md_op_data hint for passing it down to MD stack. */
2886 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2887                                       struct inode *i1, struct inode *i2,
2888                                       const char *name, size_t namelen,
2889                                       __u32 mode, enum md_op_code opc,
2890                                       void *data)
2891 {
2892         LASSERT(i1 != NULL);
2893
2894         if (name == NULL) {
2895                 /* Do not reuse namelen for something else. */
2896                 if (namelen != 0)
2897                         return ERR_PTR(-EINVAL);
2898         } else {
2899                 if (namelen > ll_i2sbi(i1)->ll_namelen)
2900                         return ERR_PTR(-ENAMETOOLONG);
2901
2902                 if (!lu_name_is_valid_2(name, namelen))
2903                         return ERR_PTR(-EINVAL);
2904         }
2905
2906         if (op_data == NULL)
2907                 OBD_ALLOC_PTR(op_data);
2908
2909         if (op_data == NULL)
2910                 return ERR_PTR(-ENOMEM);
2911
2912         ll_i2gids(op_data->op_suppgids, i1, i2);
2913         op_data->op_fid1 = *ll_inode2fid(i1);
2914         op_data->op_code = opc;
2915
2916         if (S_ISDIR(i1->i_mode)) {
2917                 down_read(&ll_i2info(i1)->lli_lsm_sem);
2918                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
2919                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
2920                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
2921         }
2922
2923         if (i2) {
2924                 op_data->op_fid2 = *ll_inode2fid(i2);
2925                 if (S_ISDIR(i2->i_mode)) {
2926                         if (i2 != i1) {
2927                                 down_read(&ll_i2info(i2)->lli_lsm_sem);
2928                                 op_data->op_mea2_sem =
2929                                                 &ll_i2info(i2)->lli_lsm_sem;
2930                         }
2931                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
2932                 }
2933         } else {
2934                 fid_zero(&op_data->op_fid2);
2935         }
2936
2937         if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
2938                 op_data->op_cli_flags |= CLI_HASH64;
2939
2940         if (ll_need_32bit_api(ll_i2sbi(i1)))
2941                 op_data->op_cli_flags |= CLI_API32;
2942
2943         op_data->op_name = name;
2944         op_data->op_namelen = namelen;
2945         op_data->op_mode = mode;
2946         op_data->op_mod_time = ktime_get_real_seconds();
2947         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2948         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2949         op_data->op_cap = cfs_curproc_cap_pack();
2950         op_data->op_mds = 0;
2951         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
2952              filename_is_volatile(name, namelen, &op_data->op_mds)) {
2953                 op_data->op_bias |= MDS_CREATE_VOLATILE;
2954         }
2955         op_data->op_data = data;
2956
2957         return op_data;
2958 }
2959
2960 void ll_finish_md_op_data(struct md_op_data *op_data)
2961 {
2962         ll_unlock_md_op_lsm(op_data);
2963         security_release_secctx(op_data->op_file_secctx,
2964                                 op_data->op_file_secctx_size);
2965         OBD_FREE_PTR(op_data);
2966 }
2967
2968 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2969 {
2970         struct ll_sb_info *sbi;
2971
2972         LASSERT(seq && dentry);
2973         sbi = ll_s2sbi(dentry->d_sb);
2974
2975         if (sbi->ll_flags & LL_SBI_NOLCK)
2976                 seq_puts(seq, ",nolock");
2977
2978         /* "flock" is the default since 2.13, but it wasn't for many years,
2979          * so it is still useful to print this to show it is enabled.
2980          * Start to print "noflock" so it is now clear when flock is disabled.
2981          */
2982         if (sbi->ll_flags & LL_SBI_FLOCK)
2983                 seq_puts(seq, ",flock");
2984         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2985                 seq_puts(seq, ",localflock");
2986         else
2987                 seq_puts(seq, ",noflock");
2988
2989         if (sbi->ll_flags & LL_SBI_USER_XATTR)
2990                 seq_puts(seq, ",user_xattr");
2991
2992         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2993                 seq_puts(seq, ",lazystatfs");
2994
2995         if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2996                 seq_puts(seq, ",user_fid2path");
2997
2998         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
2999                 seq_puts(seq, ",always_ping");
3000
3001         if (ll_sbi_has_test_dummy_encryption(sbi))
3002                 seq_puts(seq, ",test_dummy_encryption");
3003
3004         if (ll_sbi_has_encrypt(sbi))
3005                 seq_puts(seq, ",encrypt");
3006         else
3007                 seq_puts(seq, ",noencrypt");
3008
3009         RETURN(0);
3010 }
3011
3012 /**
3013  * Get obd name by cmd, and copy out to user space
3014  */
3015 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3016 {
3017         struct ll_sb_info *sbi = ll_i2sbi(inode);
3018         struct obd_device *obd;
3019         ENTRY;
3020
3021         if (cmd == OBD_IOC_GETDTNAME)
3022                 obd = class_exp2obd(sbi->ll_dt_exp);
3023         else if (cmd == OBD_IOC_GETMDNAME)
3024                 obd = class_exp2obd(sbi->ll_md_exp);
3025         else
3026                 RETURN(-EINVAL);
3027
3028         if (!obd)
3029                 RETURN(-ENOENT);
3030
3031         if (copy_to_user((void __user *)arg, obd->obd_name,
3032                          strlen(obd->obd_name) + 1))
3033                 RETURN(-EFAULT);
3034
3035         RETURN(0);
3036 }
3037
3038 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3039 {
3040         char *path = NULL;
3041
3042         struct path p;
3043
3044         p.dentry = dentry;
3045         p.mnt = current->fs->root.mnt;
3046         path_get(&p);
3047         path = d_path(&p, buf, bufsize);
3048         path_put(&p);
3049         return path;
3050 }
3051
3052 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3053 {
3054         char *buf, *path = NULL;
3055         struct dentry *dentry = NULL;
3056         struct inode *inode = page->mapping->host;
3057
3058         /* this can be called inside spin lock so use GFP_ATOMIC. */
3059         buf = (char *)__get_free_page(GFP_ATOMIC);
3060         if (buf != NULL) {
3061                 dentry = d_find_alias(page->mapping->host);
3062                 if (dentry != NULL)
3063                         path = ll_d_path(dentry, buf, PAGE_SIZE);
3064         }
3065
3066         /* The below message is checked in recovery-small.sh test_24b */
3067         CDEBUG(D_WARNING,
3068                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3069                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3070                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3071                PFID(ll_inode2fid(inode)),
3072                (path && !IS_ERR(path)) ? path : "", ioret);
3073
3074         if (dentry != NULL)
3075                 dput(dentry);
3076
3077         if (buf != NULL)
3078                 free_page((unsigned long)buf);
3079 }
3080
3081 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3082                         struct lov_user_md **kbuf)
3083 {
3084         struct lov_user_md      lum;
3085         ssize_t                 lum_size;
3086         ENTRY;
3087
3088         if (copy_from_user(&lum, md, sizeof(lum)))
3089                 RETURN(-EFAULT);
3090
3091         lum_size = ll_lov_user_md_size(&lum);
3092         if (lum_size < 0)
3093                 RETURN(lum_size);
3094
3095         OBD_ALLOC_LARGE(*kbuf, lum_size);
3096         if (*kbuf == NULL)
3097                 RETURN(-ENOMEM);
3098
3099         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3100                 OBD_FREE_LARGE(*kbuf, lum_size);
3101                 RETURN(-EFAULT);
3102         }
3103
3104         RETURN(lum_size);
3105 }
3106
3107 /*
3108  * Compute llite root squash state after a change of root squash
3109  * configuration setting or add/remove of a lnet nid
3110  */
3111 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3112 {
3113         struct root_squash_info *squash = &sbi->ll_squash;
3114         int i;
3115         bool matched;
3116         struct lnet_process_id id;
3117
3118         /* Update norootsquash flag */
3119         spin_lock(&squash->rsi_lock);
3120         if (list_empty(&squash->rsi_nosquash_nids))
3121                 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3122         else {
3123                 /* Do not apply root squash as soon as one of our NIDs is
3124                  * in the nosquash_nids list */
3125                 matched = false;
3126                 i = 0;
3127                 while (LNetGetId(i++, &id) != -ENOENT) {
3128                         if (id.nid == LNET_NID_LO_0)
3129                                 continue;
3130                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3131                                 matched = true;
3132                                 break;
3133                         }
3134                 }
3135                 if (matched)
3136                         sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
3137                 else
3138                         sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3139         }
3140         spin_unlock(&squash->rsi_lock);
3141 }
3142
3143 /**
3144  * Parse linkea content to extract information about a given hardlink
3145  *
3146  * \param[in]   ldata      - Initialized linkea data
3147  * \param[in]   linkno     - Link identifier
3148  * \param[out]  parent_fid - The entry's parent FID
3149  * \param[out]  ln         - Entry name destination buffer
3150  *
3151  * \retval 0 on success
3152  * \retval Appropriate negative error code on failure
3153  */
3154 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3155                             struct lu_fid *parent_fid, struct lu_name *ln)
3156 {
3157         unsigned int    idx;
3158         int             rc;
3159         ENTRY;
3160
3161         rc = linkea_init_with_rec(ldata);
3162         if (rc < 0)
3163                 RETURN(rc);
3164
3165         if (linkno >= ldata->ld_leh->leh_reccount)
3166                 /* beyond last link */
3167                 RETURN(-ENODATA);
3168
3169         linkea_first_entry(ldata);
3170         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3171                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3172                                     parent_fid);
3173                 if (idx == linkno)
3174                         break;
3175
3176                 linkea_next_entry(ldata);
3177         }
3178
3179         if (idx < linkno)
3180                 RETURN(-ENODATA);
3181
3182         RETURN(0);
3183 }
3184
3185 /**
3186  * Get parent FID and name of an identified link. Operation is performed for
3187  * a given link number, letting the caller iterate over linkno to list one or
3188  * all links of an entry.
3189  *
3190  * \param[in]     file - File descriptor against which to perform the operation
3191  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3192  *                       on and the available size. It is eventually filled with
3193  *                       the requested information or left untouched on error
3194  *
3195  * \retval - 0 on success
3196  * \retval - Appropriate negative error code on failure
3197  */
3198 int ll_getparent(struct file *file, struct getparent __user *arg)
3199 {
3200         struct inode            *inode = file_inode(file);
3201         struct linkea_data      *ldata;
3202         struct lu_buf            buf = LU_BUF_NULL;
3203         struct lu_name           ln;
3204         struct lu_fid            parent_fid;
3205         __u32                    linkno;
3206         __u32                    name_size;
3207         int                      rc;
3208
3209         ENTRY;
3210
3211         if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
3212             !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
3213                 RETURN(-EPERM);
3214
3215         if (get_user(name_size, &arg->gp_name_size))
3216                 RETURN(-EFAULT);
3217
3218         if (get_user(linkno, &arg->gp_linkno))
3219                 RETURN(-EFAULT);
3220
3221         if (name_size > PATH_MAX)
3222                 RETURN(-EINVAL);
3223
3224         OBD_ALLOC(ldata, sizeof(*ldata));
3225         if (ldata == NULL)
3226                 RETURN(-ENOMEM);
3227
3228         rc = linkea_data_new(ldata, &buf);
3229         if (rc < 0)
3230                 GOTO(ldata_free, rc);
3231
3232         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3233                            buf.lb_len, OBD_MD_FLXATTR);
3234         if (rc < 0)
3235                 GOTO(lb_free, rc);
3236
3237         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3238         if (rc < 0)
3239                 GOTO(lb_free, rc);
3240
3241         if (ln.ln_namelen >= name_size)
3242                 GOTO(lb_free, rc = -EOVERFLOW);
3243
3244         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3245                 GOTO(lb_free, rc = -EFAULT);
3246
3247         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3248                 GOTO(lb_free, rc = -EFAULT);
3249
3250         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3251                 GOTO(lb_free, rc = -EFAULT);
3252
3253 lb_free:
3254         lu_buf_free(&buf);
3255 ldata_free:
3256         OBD_FREE(ldata, sizeof(*ldata));
3257
3258         RETURN(rc);
3259 }