Whamcloud - gitweb
LU-16019 llite: fully disable readahead in kernel I/O path
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/llite/llite_lib.c
32  *
33  * Lustre Light Super operations
34  */
35
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include <linux/cpu.h>
39 #include <linux/module.h>
40 #include <linux/random.h>
41 #include <linux/statfs.h>
42 #include <linux/time.h>
43 #include <linux/file.h>
44 #include <linux/types.h>
45 #include <libcfs/linux/linux-uuid.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48 #include <linux/user_namespace.h>
49 #include <linux/delay.h>
50 #include <linux/uidgid.h>
51 #include <linux/fs_struct.h>
52
53 #ifndef HAVE_CPUS_READ_LOCK
54 #include <libcfs/linux/linux-cpu.h>
55 #endif
56 #include <libcfs/linux/linux-misc.h>
57 #include <uapi/linux/lustre/lustre_ioctl.h>
58 #ifdef HAVE_UAPI_LINUX_MOUNT_H
59 #include <uapi/linux/mount.h>
60 #endif
61
62 #include <lustre_ha.h>
63 #include <lustre_dlm.h>
64 #include <lprocfs_status.h>
65 #include <lustre_disk.h>
66 #include <uapi/linux/lustre/lustre_param.h>
67 #include <lustre_log.h>
68 #include <cl_object.h>
69 #include <obd_cksum.h>
70 #include "llite_internal.h"
71
72 struct kmem_cache *ll_file_data_slab;
73
74 #ifndef log2
75 #define log2(n) ffz(~(n))
76 #endif
77
78 /**
79  * If there is only one number of core visible to Lustre,
80  * async readahead will be disabled, to avoid massive over
81  * subscription, we use 1/2 of active cores as default max
82  * async readahead requests.
83  */
84 static inline unsigned int ll_get_ra_async_max_active(void)
85 {
86         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
87 }
88
89 static struct ll_sb_info *ll_init_sbi(void)
90 {
91         struct ll_sb_info *sbi = NULL;
92         unsigned long pages;
93         unsigned long lru_page_max;
94         struct sysinfo si;
95         int rc;
96
97         ENTRY;
98
99         OBD_ALLOC_PTR(sbi);
100         if (sbi == NULL)
101                 RETURN(ERR_PTR(-ENOMEM));
102
103         rc = pcc_super_init(&sbi->ll_pcc_super);
104         if (rc < 0)
105                 GOTO(out_sbi, rc);
106
107         spin_lock_init(&sbi->ll_lock);
108         mutex_init(&sbi->ll_lco.lco_lock);
109         spin_lock_init(&sbi->ll_pp_extent_lock);
110         spin_lock_init(&sbi->ll_process_lock);
111         sbi->ll_rw_stats_on = 0;
112         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
113
114         si_meminfo(&si);
115         pages = si.totalram - si.totalhigh;
116         lru_page_max = pages / 2;
117
118         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
119         sbi->ll_ra_info.ll_readahead_wq =
120                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
121                                        0, CFS_CPT_ANY,
122                                        sbi->ll_ra_info.ra_async_max_active);
123         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
124                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
125
126         /* initialize ll_cache data */
127         sbi->ll_cache = cl_cache_init(lru_page_max);
128         if (sbi->ll_cache == NULL)
129                 GOTO(out_destroy_ra, rc = -ENOMEM);
130
131         /* initialize foreign symlink prefix path */
132         OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
133         if (sbi->ll_foreign_symlink_prefix == NULL)
134                 GOTO(out_destroy_ra, rc = -ENOMEM);
135         memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
136         sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
137
138         /* initialize foreign symlink upcall path, none by default */
139         OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
140         if (sbi->ll_foreign_symlink_upcall == NULL)
141                 GOTO(out_destroy_ra, rc = -ENOMEM);
142         memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
143         sbi->ll_foreign_symlink_upcall_items = NULL;
144         sbi->ll_foreign_symlink_upcall_nb_items = 0;
145         init_rwsem(&sbi->ll_foreign_symlink_sem);
146         /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
147          * not enabled by default
148          */
149
150         sbi->ll_ra_info.ra_max_pages =
151                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
152         sbi->ll_ra_info.ra_max_pages_per_file =
153                 min(sbi->ll_ra_info.ra_max_pages / 4,
154                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
155         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
156                                 sbi->ll_ra_info.ra_max_pages_per_file;
157         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
158         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
159         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
160
161         set_bit(LL_SBI_VERBOSE, sbi->ll_flags);
162 #ifdef ENABLE_CHECKSUM
163         set_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
164 #endif
165 #ifdef ENABLE_FLOCK
166         set_bit(LL_SBI_FLOCK, sbi->ll_flags);
167 #endif
168
169 #ifdef HAVE_LRU_RESIZE_SUPPORT
170         set_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags);
171 #endif
172         set_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags);
173
174         /* metadata statahead is enabled by default */
175         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
176         sbi->ll_sa_max = LL_SA_RPC_DEF;
177         atomic_set(&sbi->ll_sa_total, 0);
178         atomic_set(&sbi->ll_sa_wrong, 0);
179         atomic_set(&sbi->ll_sa_running, 0);
180         atomic_set(&sbi->ll_agl_total, 0);
181         set_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags);
182         set_bit(LL_SBI_FAST_READ, sbi->ll_flags);
183         set_bit(LL_SBI_TINY_WRITE, sbi->ll_flags);
184         set_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags);
185         ll_sbi_set_encrypt(sbi, true);
186         ll_sbi_set_name_encrypt(sbi, true);
187
188         /* root squash */
189         sbi->ll_squash.rsi_uid = 0;
190         sbi->ll_squash.rsi_gid = 0;
191         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
192         spin_lock_init(&sbi->ll_squash.rsi_lock);
193
194         /* Per-filesystem file heat */
195         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
196         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
197
198         /* Per-fs open heat level before requesting open lock */
199         sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT;
200         sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS;
201         sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS;
202         RETURN(sbi);
203 out_destroy_ra:
204         if (sbi->ll_foreign_symlink_prefix)
205                 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
206         if (sbi->ll_cache) {
207                 cl_cache_decref(sbi->ll_cache);
208                 sbi->ll_cache = NULL;
209         }
210         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
211 out_pcc:
212         pcc_super_fini(&sbi->ll_pcc_super);
213 out_sbi:
214         OBD_FREE_PTR(sbi);
215         RETURN(ERR_PTR(rc));
216 }
217
218 static void ll_free_sbi(struct super_block *sb)
219 {
220         struct ll_sb_info *sbi = ll_s2sbi(sb);
221         ENTRY;
222
223         if (sbi != NULL) {
224                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
225                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
226                 if (sbi->ll_ra_info.ll_readahead_wq)
227                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
228                 if (sbi->ll_cache != NULL) {
229                         cl_cache_decref(sbi->ll_cache);
230                         sbi->ll_cache = NULL;
231                 }
232                 if (sbi->ll_foreign_symlink_prefix) {
233                         OBD_FREE(sbi->ll_foreign_symlink_prefix,
234                                  sbi->ll_foreign_symlink_prefix_size);
235                         sbi->ll_foreign_symlink_prefix = NULL;
236                 }
237                 if (sbi->ll_foreign_symlink_upcall) {
238                         OBD_FREE(sbi->ll_foreign_symlink_upcall,
239                                  strlen(sbi->ll_foreign_symlink_upcall) +
240                                        1);
241                         sbi->ll_foreign_symlink_upcall = NULL;
242                 }
243                 if (sbi->ll_foreign_symlink_upcall_items) {
244                         int i;
245                         int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
246                         struct ll_foreign_symlink_upcall_item *items =
247                                 sbi->ll_foreign_symlink_upcall_items;
248
249                         for (i = 0 ; i < nb_items; i++)
250                                 if (items[i].type == STRING_TYPE)
251                                         OBD_FREE(items[i].string,
252                                                        items[i].size);
253
254                         OBD_FREE_LARGE(items, nb_items *
255                                 sizeof(struct ll_foreign_symlink_upcall_item));
256                         sbi->ll_foreign_symlink_upcall_items = NULL;
257                 }
258                 ll_free_rw_stats_info(sbi);
259                 pcc_super_fini(&sbi->ll_pcc_super);
260                 OBD_FREE(sbi, sizeof(*sbi));
261         }
262         EXIT;
263 }
264
265 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
266 {
267         struct inode *root = NULL;
268         struct ll_sb_info *sbi = ll_s2sbi(sb);
269         struct obd_statfs *osfs = NULL;
270         struct ptlrpc_request *request = NULL;
271         struct obd_connect_data *data = NULL;
272         struct obd_uuid *uuid;
273         struct md_op_data *op_data;
274         struct lustre_md lmd;
275         u64 valid;
276         int size, err, checksum;
277         bool api32;
278         void *encctx;
279         int encctxlen;
280
281         ENTRY;
282         sbi->ll_md_obd = class_name2obd(md);
283         if (!sbi->ll_md_obd) {
284                 CERROR("MD %s: not setup or attached\n", md);
285                 RETURN(-EINVAL);
286         }
287
288         OBD_ALLOC_PTR(data);
289         if (data == NULL)
290                 RETURN(-ENOMEM);
291
292         OBD_ALLOC_PTR(osfs);
293         if (osfs == NULL) {
294                 OBD_FREE_PTR(data);
295                 RETURN(-ENOMEM);
296         }
297
298         /* pass client page size via ocd_grant_blkbits, the server should report
299          * back its backend blocksize for grant calculation purpose */
300         data->ocd_grant_blkbits = PAGE_SHIFT;
301
302         /* indicate MDT features supported by this client */
303         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
304                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
305                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
306                                   OBD_CONNECT_SRVLOCK  |
307                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
308                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
309                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
310                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
311                                   OBD_CONNECT_64BITHASH |
312                                   OBD_CONNECT_EINPROGRESS |
313                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
314                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
315                                   OBD_CONNECT_MAX_EASIZE |
316                                   OBD_CONNECT_FLOCK_DEAD |
317                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
318                                   OBD_CONNECT_OPEN_BY_FID |
319                                   OBD_CONNECT_DIR_STRIPE |
320                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
321                                   OBD_CONNECT_SUBTREE |
322                                   OBD_CONNECT_MULTIMODRPCS |
323                                   OBD_CONNECT_GRANT_PARAM |
324                                   OBD_CONNECT_GRANT_SHRINK |
325                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
326
327         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
328                                    OBD_CONNECT2_SUM_STATFS |
329                                    OBD_CONNECT2_OVERSTRIPING |
330                                    OBD_CONNECT2_FLR |
331                                    OBD_CONNECT2_LOCK_CONVERT |
332                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
333                                    OBD_CONNECT2_INC_XID |
334                                    OBD_CONNECT2_LSOM |
335                                    OBD_CONNECT2_ASYNC_DISCARD |
336                                    OBD_CONNECT2_PCC |
337                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
338                                    OBD_CONNECT2_GETATTR_PFID |
339                                    OBD_CONNECT2_DOM_LVB |
340                                    OBD_CONNECT2_REP_MBITS |
341                                    OBD_CONNECT2_ATOMIC_OPEN_LOCK;
342
343 #ifdef HAVE_LRU_RESIZE_SUPPORT
344         if (test_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags))
345                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
346 #endif
347         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
348
349         data->ocd_cksum_types = obd_cksum_types_supported_client();
350
351         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
352                 /* flag mdc connection as lightweight, only used for test
353                  * purpose, use with care */
354                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
355
356         data->ocd_ibits_known = MDS_INODELOCK_FULL;
357         data->ocd_version = LUSTRE_VERSION_CODE;
358
359         if (sb->s_flags & SB_RDONLY)
360                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
361         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags))
362                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
363
364 #ifdef SB_NOSEC
365         /* Setting this indicates we correctly support S_NOSEC (See kernel
366          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
367          */
368         sb->s_flags |= SB_NOSEC;
369 #endif
370         sbi->ll_fop = ll_select_file_operations(sbi);
371
372         /* always ping even if server suppress_pings */
373         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
374                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
375
376         obd_connect_set_secctx(data);
377         if (ll_sbi_has_encrypt(sbi)) {
378                 obd_connect_set_name_enc(data);
379                 obd_connect_set_enc(data);
380         }
381
382 #if defined(CONFIG_SECURITY)
383         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
384 #endif
385
386         data->ocd_brw_size = MD_MAX_BRW_SIZE;
387
388         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
389                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
390         if (err == -EBUSY) {
391                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
392                                    "recovery, of which this client is not a "
393                                    "part. Please wait for recovery to complete,"
394                                    " abort, or time out.\n", md);
395                 GOTO(out, err);
396         } else if (err) {
397                 CERROR("cannot connect to %s: rc = %d\n", md, err);
398                 GOTO(out, err);
399         }
400
401         sbi->ll_md_exp->exp_connect_data = *data;
402
403         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
404                            LUSTRE_SEQ_METADATA);
405         if (err) {
406                 CERROR("%s: Can't init metadata layer FID infrastructure, "
407                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
408                 GOTO(out_md, err);
409         }
410
411         /* For mount, we only need fs info from MDT0, and also in DNE, it
412          * can make sure the client can be mounted as long as MDT0 is
413          * avaible */
414         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
415                         ktime_get_seconds() - sbi->ll_statfs_max_age,
416                         OBD_STATFS_FOR_MDT0);
417         if (err)
418                 GOTO(out_md_fid, err);
419
420         /* This needs to be after statfs to ensure connect has finished.
421          * Note that "data" does NOT contain the valid connect reply.
422          * If connecting to a 1.8 server there will be no LMV device, so
423          * we can access the MDC export directly and exp_connect_flags will
424          * be non-zero, but if accessing an upgraded 2.1 server it will
425          * have the correct flags filled in.
426          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
427         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
428         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
429             valid != CLIENT_CONNECT_MDT_REQD) {
430                 char *buf;
431
432                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
433                 obd_connect_flags2str(buf, PAGE_SIZE,
434                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
435                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
436                                    "feature(s) needed for correct operation "
437                                    "of this client (%s). Please upgrade "
438                                    "server or downgrade client.\n",
439                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
440                 OBD_FREE(buf, PAGE_SIZE);
441                 GOTO(out_md_fid, err = -EPROTO);
442         }
443
444         size = sizeof(*data);
445         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
446                            KEY_CONN_DATA,  &size, data);
447         if (err) {
448                 CERROR("%s: Get connect data failed: rc = %d\n",
449                        sbi->ll_md_exp->exp_obd->obd_name, err);
450                 GOTO(out_md_fid, err);
451         }
452
453         LASSERT(osfs->os_bsize);
454         sb->s_blocksize = osfs->os_bsize;
455         sb->s_blocksize_bits = log2(osfs->os_bsize);
456         sb->s_magic = LL_SUPER_MAGIC;
457         sb->s_maxbytes = MAX_LFS_FILESIZE;
458         sbi->ll_inode_cache_enabled = 1;
459         sbi->ll_namelen = osfs->os_namelen;
460         sbi->ll_mnt.mnt = current->fs->root.mnt;
461         sbi->ll_mnt_ns = current->nsproxy->mnt_ns;
462
463         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags) &&
464             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
465                 LCONSOLE_INFO("Disabling user_xattr feature because "
466                               "it is not supported on the server\n");
467                 clear_bit(LL_SBI_USER_XATTR, sbi->ll_flags);
468         }
469
470         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
471 #ifdef SB_POSIXACL
472                 sb->s_flags |= SB_POSIXACL;
473 #endif
474                 set_bit(LL_SBI_ACL, sbi->ll_flags);
475         } else {
476                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
477 #ifdef SB_POSIXACL
478                 sb->s_flags &= ~SB_POSIXACL;
479 #endif
480                 clear_bit(LL_SBI_ACL, sbi->ll_flags);
481         }
482
483         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
484                 set_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
485
486         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
487                 set_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags);
488
489         if (obd_connect_has_secctx(data))
490                 set_bit(LL_SBI_FILE_SECCTX, sbi->ll_flags);
491
492         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
493                 if (ll_sb_has_test_dummy_encryption(sb))
494                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
495                                       sbi->ll_fsname,
496                                       sbi->ll_md_exp->exp_obd->obd_name);
497                 ll_sbi_set_encrypt(sbi, false);
498         }
499
500         if (ll_sbi_has_name_encrypt(sbi) && !obd_connect_has_name_enc(data)) {
501                 struct  lustre_sb_info *lsi = s2lsi(sb);
502
503                 if (ll_sb_has_test_dummy_encryption(sb))
504                         LCONSOLE_WARN("%s: server %s does not support name encryption, not using it.\n",
505                                       sbi->ll_fsname,
506                                       sbi->ll_md_exp->exp_obd->obd_name);
507                 lsi->lsi_flags &= ~LSI_FILENAME_ENC;
508                 ll_sbi_set_name_encrypt(sbi, false);
509         }
510
511         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
512                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
513                         LCONSOLE_INFO("%s: disabling xattr cache due to "
514                                       "unknown maximum xattr size.\n", dt);
515                 } else if (!sbi->ll_xattr_cache_set) {
516                         /* If xattr_cache is already set (no matter 0 or 1)
517                          * during processing llog, it won't be enabled here. */
518                         set_bit(LL_SBI_XATTR_CACHE, sbi->ll_flags);
519                         sbi->ll_xattr_cache_enabled = 1;
520                 }
521         }
522
523         sbi->ll_dt_obd = class_name2obd(dt);
524         if (!sbi->ll_dt_obd) {
525                 CERROR("DT %s: not setup or attached\n", dt);
526                 GOTO(out_md_fid, err = -ENODEV);
527         }
528
529         /* pass client page size via ocd_grant_blkbits, the server should report
530          * back its backend blocksize for grant calculation purpose */
531         data->ocd_grant_blkbits = PAGE_SHIFT;
532
533         /* indicate OST features supported by this client */
534         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
535                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
536                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
537                                   OBD_CONNECT_SRVLOCK |
538                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
539                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
540                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
541                                   OBD_CONNECT_EINPROGRESS |
542                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
543                                   OBD_CONNECT_LAYOUTLOCK |
544                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
545                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
546                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
547         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
548                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK |
549                                    OBD_CONNECT2_REP_MBITS;
550
551         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
552                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
553
554         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
555          * disabled by default, because it can still be enabled on the
556          * fly via /sys. As a consequence, we still need to come to an
557          * agreement on the supported algorithms at connect time
558          */
559         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
560
561         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
562                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
563         else
564                 data->ocd_cksum_types = obd_cksum_types_supported_client();
565
566 #ifdef HAVE_LRU_RESIZE_SUPPORT
567         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
568 #endif
569         /* always ping even if server suppress_pings */
570         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
571                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
572
573         if (ll_sbi_has_encrypt(sbi))
574                 obd_connect_set_enc(data);
575
576         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
577                "ocd_grant: %d\n", data->ocd_connect_flags,
578                data->ocd_version, data->ocd_grant);
579
580         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
581         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
582
583         data->ocd_brw_size = DT_MAX_BRW_SIZE;
584
585         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
586                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
587         if (err == -EBUSY) {
588                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
589                                    "recovery, of which this client is not a "
590                                    "part.  Please wait for recovery to "
591                                    "complete, abort, or time out.\n", dt);
592                 GOTO(out_md, err);
593         } else if (err) {
594                 CERROR("%s: Cannot connect to %s: rc = %d\n",
595                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
596                 GOTO(out_md, err);
597         }
598
599         if (ll_sbi_has_encrypt(sbi) &&
600             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
601                 if (ll_sb_has_test_dummy_encryption(sb))
602                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
603                                       sbi->ll_fsname, dt);
604                 ll_sbi_set_encrypt(sbi, false);
605         } else if (ll_sb_has_test_dummy_encryption(sb)) {
606                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
607         }
608
609         sbi->ll_dt_exp->exp_connect_data = *data;
610
611         /* Don't change value if it was specified in the config log */
612         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
613                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
614                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
615                               (data->ocd_brw_size >> PAGE_SHIFT));
616                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
617                     sbi->ll_ra_info.ra_max_pages_per_file)
618                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
619                                 sbi->ll_ra_info.ra_max_pages_per_file;
620         }
621
622         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
623                            LUSTRE_SEQ_METADATA);
624         if (err) {
625                 CERROR("%s: Can't init data layer FID infrastructure, "
626                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
627                 GOTO(out_dt, err);
628         }
629
630         mutex_lock(&sbi->ll_lco.lco_lock);
631         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
632         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
633         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
634         mutex_unlock(&sbi->ll_lco.lco_lock);
635
636         fid_zero(&sbi->ll_root_fid);
637         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
638                            &sbi->ll_root_fid);
639         if (err) {
640                 CERROR("cannot mds_connect: rc = %d\n", err);
641                 GOTO(out_lock_cn_cb, err);
642         }
643         if (!fid_is_sane(&sbi->ll_root_fid)) {
644                 CERROR("%s: Invalid root fid "DFID" during mount\n",
645                        sbi->ll_md_exp->exp_obd->obd_name,
646                        PFID(&sbi->ll_root_fid));
647                 GOTO(out_lock_cn_cb, err = -EINVAL);
648         }
649         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
650
651         sb->s_op = &lustre_super_operations;
652         sb->s_xattr = ll_xattr_handlers;
653 #if THREAD_SIZE >= 8192 /*b=17630*/
654         sb->s_export_op = &lustre_export_operations;
655 #endif
656 #ifdef HAVE_LUSTRE_CRYPTO
657         llcrypt_set_ops(sb, &lustre_cryptops);
658 #endif
659
660         /* make root inode
661          * XXX: move this to after cbd setup? */
662         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE |
663                 OBD_MD_ENCCTX;
664         if (test_bit(LL_SBI_ACL, sbi->ll_flags))
665                 valid |= OBD_MD_FLACL;
666
667         OBD_ALLOC_PTR(op_data);
668         if (op_data == NULL)
669                 GOTO(out_lock_cn_cb, err = -ENOMEM);
670
671         op_data->op_fid1 = sbi->ll_root_fid;
672         op_data->op_mode = 0;
673         op_data->op_valid = valid;
674
675         err = md_getattr(sbi->ll_md_exp, op_data, &request);
676
677         /* We need enc ctx info, so reset it in op_data to
678          * prevent it from being freed.
679          */
680         encctx = op_data->op_file_encctx;
681         encctxlen = op_data->op_file_encctx_size;
682         op_data->op_file_encctx = NULL;
683         op_data->op_file_encctx_size = 0;
684         OBD_FREE_PTR(op_data);
685         if (err) {
686                 CERROR("%s: md_getattr failed for root: rc = %d\n",
687                        sbi->ll_md_exp->exp_obd->obd_name, err);
688                 GOTO(out_lock_cn_cb, err);
689         }
690
691         err = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill,
692                                sbi->ll_dt_exp, sbi->ll_md_exp, &lmd);
693         if (err) {
694                 CERROR("failed to understand root inode md: rc = %d\n", err);
695                 ptlrpc_req_finished(request);
696                 GOTO(out_lock_cn_cb, err);
697         }
698
699         LASSERT(fid_is_sane(&sbi->ll_root_fid));
700         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
701         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid, api32), &lmd);
702         md_free_lustre_md(sbi->ll_md_exp, &lmd);
703
704         if (IS_ERR(root)) {
705                 lmd_clear_acl(&lmd);
706                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
707                 root = NULL;
708                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
709                        sbi->ll_fsname, err);
710                 ptlrpc_req_finished(request);
711                 GOTO(out_root, err);
712         }
713
714         if (encctxlen) {
715                 CDEBUG(D_SEC,
716                        "server returned encryption ctx for root inode "DFID"\n",
717                        PFID(&sbi->ll_root_fid));
718                 err = ll_set_encflags(root, encctx, encctxlen, true);
719                 if (err)
720                         CWARN("%s: cannot set enc ctx for "DFID": rc = %d\n",
721                               sbi->ll_fsname,
722                               PFID(&sbi->ll_root_fid), err);
723         }
724         ptlrpc_req_finished(request);
725
726         checksum = test_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
727         if (sbi->ll_checksum_set) {
728                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
729                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
730                                          sizeof(checksum), &checksum, NULL);
731                 if (err) {
732                         CERROR("%s: Set checksum failed: rc = %d\n",
733                                sbi->ll_dt_exp->exp_obd->obd_name, err);
734                         GOTO(out_root, err);
735                 }
736         }
737         cl_sb_init(sb);
738
739         sb->s_root = d_make_root(root);
740         if (sb->s_root == NULL) {
741                 err = -ENOMEM;
742                 CERROR("%s: can't make root dentry: rc = %d\n",
743                        sbi->ll_fsname, err);
744                 GOTO(out_root, err);
745         }
746
747         sbi->ll_sdev_orig = sb->s_dev;
748
749         /* We set sb->s_dev equal on all lustre clients in order to support
750          * NFS export clustering.  NFSD requires that the FSID be the same
751          * on all clients. */
752         /* s_dev is also used in lt_compare() to compare two fs, but that is
753          * only a node-local comparison. */
754         uuid = obd_get_uuid(sbi->ll_md_exp);
755         if (uuid != NULL)
756                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
757
758         if (data != NULL)
759                 OBD_FREE_PTR(data);
760         if (osfs != NULL)
761                 OBD_FREE_PTR(osfs);
762
763         if (sbi->ll_dt_obd) {
764                 err = sysfs_create_link(&sbi->ll_kset.kobj,
765                                         &sbi->ll_dt_obd->obd_kset.kobj,
766                                         sbi->ll_dt_obd->obd_type->typ_name);
767                 if (err < 0) {
768                         CERROR("%s: could not register %s in llite: rc = %d\n",
769                                dt, sbi->ll_fsname, err);
770                         err = 0;
771                 }
772         }
773
774         if (sbi->ll_md_obd) {
775                 err = sysfs_create_link(&sbi->ll_kset.kobj,
776                                         &sbi->ll_md_obd->obd_kset.kobj,
777                                         sbi->ll_md_obd->obd_type->typ_name);
778                 if (err < 0) {
779                         CERROR("%s: could not register %s in llite: rc = %d\n",
780                                md, sbi->ll_fsname, err);
781                         err = 0;
782                 }
783         }
784
785         RETURN(err);
786 out_root:
787         iput(root);
788 out_lock_cn_cb:
789         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
790 out_dt:
791         obd_disconnect(sbi->ll_dt_exp);
792         sbi->ll_dt_exp = NULL;
793         sbi->ll_dt_obd = NULL;
794 out_md_fid:
795         obd_fid_fini(sbi->ll_md_exp->exp_obd);
796 out_md:
797         obd_disconnect(sbi->ll_md_exp);
798         sbi->ll_md_exp = NULL;
799         sbi->ll_md_obd = NULL;
800 out:
801         if (data != NULL)
802                 OBD_FREE_PTR(data);
803         if (osfs != NULL)
804                 OBD_FREE_PTR(osfs);
805         return err;
806 }
807
808 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
809 {
810         int size, rc;
811
812         size = sizeof(*lmmsize);
813         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
814                           KEY_MAX_EASIZE, &size, lmmsize);
815         if (rc != 0) {
816                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
817                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
818                 RETURN(rc);
819         }
820
821         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
822
823         size = sizeof(int);
824         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
825                           KEY_MAX_EASIZE, &size, lmmsize);
826         if (rc)
827                 CERROR("Get max mdsize error rc %d\n", rc);
828
829         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
830
831         RETURN(rc);
832 }
833
834 /**
835  * Get the value of the default_easize parameter.
836  *
837  * \see client_obd::cl_default_mds_easize
838  *
839  * \param[in] sbi       superblock info for this filesystem
840  * \param[out] lmmsize  pointer to storage location for value
841  *
842  * \retval 0            on success
843  * \retval negative     negated errno on failure
844  */
845 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
846 {
847         int size, rc;
848
849         size = sizeof(int);
850         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
851                          KEY_DEFAULT_EASIZE, &size, lmmsize);
852         if (rc)
853                 CERROR("Get default mdsize error rc %d\n", rc);
854
855         RETURN(rc);
856 }
857
858 /**
859  * Set the default_easize parameter to the given value.
860  *
861  * \see client_obd::cl_default_mds_easize
862  *
863  * \param[in] sbi       superblock info for this filesystem
864  * \param[in] lmmsize   the size to set
865  *
866  * \retval 0            on success
867  * \retval negative     negated errno on failure
868  */
869 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
870 {
871         int rc;
872
873         if (lmmsize < sizeof(struct lov_mds_md) ||
874             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
875                 return -EINVAL;
876
877         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
878                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
879                                 sizeof(int), &lmmsize, NULL);
880
881         RETURN(rc);
882 }
883
884 static void client_common_put_super(struct super_block *sb)
885 {
886         struct ll_sb_info *sbi = ll_s2sbi(sb);
887         ENTRY;
888
889         cl_sb_fini(sb);
890
891         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
892         obd_disconnect(sbi->ll_dt_exp);
893         sbi->ll_dt_exp = NULL;
894
895         ll_debugfs_unregister_super(sb);
896
897         obd_fid_fini(sbi->ll_md_exp->exp_obd);
898         obd_disconnect(sbi->ll_md_exp);
899         sbi->ll_md_exp = NULL;
900
901         EXIT;
902 }
903
904 void ll_kill_super(struct super_block *sb)
905 {
906         struct ll_sb_info *sbi;
907         ENTRY;
908
909         /* not init sb ?*/
910         if (!(sb->s_flags & SB_ACTIVE))
911                 return;
912
913         sbi = ll_s2sbi(sb);
914         /* we need restore s_dev from changed for clustred NFS before put_super
915          * because new kernels have cached s_dev and change sb->s_dev in
916          * put_super not affected real removing devices */
917         if (sbi) {
918                 sb->s_dev = sbi->ll_sdev_orig;
919
920                 /* wait running statahead threads to quit */
921                 while (atomic_read(&sbi->ll_sa_running) > 0)
922                         schedule_timeout_uninterruptible(
923                                 cfs_time_seconds(1) >> 3);
924         }
925
926         EXIT;
927 }
928
929 /* Since we use this table for ll_sbi_flags_seq_show make
930  * sure what you want displayed for a specific token that
931  * is listed more than once below be listed first. For
932  * example we want "checksum" displayed, not "nochecksum"
933  * for the sbi_flags.
934  */
935 static const match_table_t ll_sbi_flags_name = {
936         {LL_SBI_NOLCK,                  "nolock"},
937         {LL_SBI_CHECKSUM,               "checksum"},
938         {LL_SBI_CHECKSUM,               "nochecksum"},
939         {LL_SBI_LOCALFLOCK,             "localflock"},
940         {LL_SBI_FLOCK,                  "flock"},
941         {LL_SBI_FLOCK,                  "noflock"},
942         {LL_SBI_USER_XATTR,             "user_xattr"},
943         {LL_SBI_USER_XATTR,             "nouser_xattr"},
944         {LL_SBI_LRU_RESIZE,             "lruresize"},
945         {LL_SBI_LRU_RESIZE,             "nolruresize"},
946         {LL_SBI_LAZYSTATFS,             "lazystatfs"},
947         {LL_SBI_LAZYSTATFS,             "nolazystatfs"},
948         {LL_SBI_32BIT_API,              "32bitapi"},
949         {LL_SBI_USER_FID2PATH,          "user_fid2path"},
950         {LL_SBI_USER_FID2PATH,          "nouser_fid2path"},
951         {LL_SBI_VERBOSE,                "verbose"},
952         {LL_SBI_VERBOSE,                "noverbose"},
953         {LL_SBI_ALWAYS_PING,            "always_ping"},
954         {LL_SBI_TEST_DUMMY_ENCRYPTION,  "test_dummy_encryption=%s"},
955         {LL_SBI_TEST_DUMMY_ENCRYPTION,  "test_dummy_encryption"},
956         {LL_SBI_ENCRYPT,                "encrypt"},
957         {LL_SBI_ENCRYPT,                "noencrypt"},
958         {LL_SBI_FOREIGN_SYMLINK,        "foreign_symlink=%s"},
959         {LL_SBI_NUM_MOUNT_OPT,          NULL},
960
961         {LL_SBI_ACL,                    "acl"},
962         {LL_SBI_AGL_ENABLED,            "agl"},
963         {LL_SBI_64BIT_HASH,             "64bit_hash"},
964         {LL_SBI_LAYOUT_LOCK,            "layout"},
965         {LL_SBI_XATTR_CACHE,            "xattr_cache"},
966         {LL_SBI_NOROOTSQUASH,           "norootsquash"},
967         {LL_SBI_FAST_READ,              "fast_read"},
968         {LL_SBI_FILE_SECCTX,            "file_secctx"},
969         {LL_SBI_TINY_WRITE,             "tiny_write"},
970         {LL_SBI_FILE_HEAT,              "file_heat"},
971         {LL_SBI_PARALLEL_DIO,           "parallel_dio"},
972         {LL_SBI_ENCRYPT_NAME,           "name_encrypt"},
973 };
974
975 int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
976 {
977         struct super_block *sb = m->private;
978         int i;
979
980         for (i = 0; i < LL_SBI_NUM_FLAGS; i++) {
981                 int j;
982
983                 if (!test_bit(i, ll_s2sbi(sb)->ll_flags))
984                         continue;
985
986                 for (j = 0; j < ARRAY_SIZE(ll_sbi_flags_name); j++) {
987                         if (ll_sbi_flags_name[j].token == i &&
988                             ll_sbi_flags_name[j].pattern) {
989                                 seq_printf(m, "%s ",
990                                            ll_sbi_flags_name[j].pattern);
991                                 break;
992                         }
993                 }
994         }
995         seq_puts(m, "\b\n");
996         return 0;
997 }
998
999 /* non-client-specific mount options are parsed in lmd_parse */
1000 static int ll_options(char *options, struct super_block *sb)
1001 {
1002         struct ll_sb_info *sbi = ll_s2sbi(sb);
1003         char *s2, *s1, *opts;
1004         int err = 0;
1005
1006         ENTRY;
1007         if (!options)
1008                 RETURN(0);
1009
1010         /* Don't stomp on lmd_opts */
1011         opts = kstrdup(options, GFP_KERNEL);
1012         if (!opts)
1013                 RETURN(-ENOMEM);
1014         s1 = opts;
1015         s2 = opts;
1016
1017         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
1018
1019         while ((s1 = strsep(&opts, ",")) != NULL) {
1020                 substring_t args[MAX_OPT_ARGS];
1021                 bool turn_off = false;
1022                 int token;
1023
1024                 if (!*s1)
1025                         continue;
1026
1027                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
1028
1029                 if (strncmp(s1, "no", 2) == 0)
1030                         turn_off = true;
1031
1032                 /*
1033                  * Initialize args struct so we know whether arg was
1034                  * found; some options take optional arguments.
1035                  */
1036                 args[0].to = NULL;
1037                 args[0].from = NULL;
1038                 token = match_token(s1, ll_sbi_flags_name, args);
1039                 if (token == LL_SBI_NUM_MOUNT_OPT) {
1040                         if (match_wildcard("context", s1) ||
1041                             match_wildcard("fscontext", s1) ||
1042                             match_wildcard("defcontext", s1) ||
1043                             match_wildcard("rootcontext",s1))
1044                                 continue;
1045
1046                         LCONSOLE_ERROR_MSG(0x152,
1047                                            "Unknown option '%s', won't mount.\n",
1048                                            s1);
1049                         RETURN(-EINVAL);
1050                 }
1051
1052                 switch (token) {
1053                 case LL_SBI_NOLCK:
1054                 case LL_SBI_32BIT_API:
1055                 case LL_SBI_64BIT_HASH:
1056                 case LL_SBI_ALWAYS_PING:
1057                         set_bit(token, sbi->ll_flags);
1058                         break;
1059
1060                 case LL_SBI_FLOCK:
1061                         clear_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags);
1062                         if (turn_off)
1063                                 clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1064                         else
1065                                 set_bit(token, sbi->ll_flags);
1066                         break;
1067
1068                 case LL_SBI_LOCALFLOCK:
1069                         clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1070                         set_bit(token, sbi->ll_flags);
1071                         break;
1072
1073                 case LL_SBI_CHECKSUM:
1074                         sbi->ll_checksum_set = 1;
1075                         fallthrough;
1076                 case LL_SBI_USER_XATTR:
1077                 case LL_SBI_USER_FID2PATH:
1078                 case LL_SBI_LRU_RESIZE:
1079                 case LL_SBI_LAZYSTATFS:
1080                 case LL_SBI_VERBOSE:
1081                         if (turn_off)
1082                                 clear_bit(token, sbi->ll_flags);
1083                         else
1084                                 set_bit(token, sbi->ll_flags);
1085                         break;
1086                 case LL_SBI_TEST_DUMMY_ENCRYPTION: {
1087 #ifdef HAVE_LUSTRE_CRYPTO
1088 #ifdef HAVE_FSCRYPT_DUMMY_CONTEXT_ENABLED
1089                         set_bit(token, sbi->ll_flags);
1090 #else
1091                         struct lustre_sb_info *lsi = s2lsi(sb);
1092
1093                         err = llcrypt_set_test_dummy_encryption(sb, &args[0],
1094                                                                 &lsi->lsi_dummy_enc_ctx);
1095                         if (!err)
1096                                 break;
1097
1098                         if (err == -EEXIST)
1099                                 LCONSOLE_WARN(
1100                                          "Can't change test_dummy_encryption");
1101                         else if (err == -EINVAL)
1102                                 LCONSOLE_WARN(
1103                                         "Value of option \"%s\" unrecognized",
1104                                         options);
1105                         else
1106                                 LCONSOLE_WARN(
1107                                          "Error processing option \"%s\" [%d]",
1108                                          options, err);
1109                         err = -1;
1110 #endif
1111 #else
1112                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1113 #endif
1114                         break;
1115                 }
1116                 case LL_SBI_ENCRYPT:
1117 #ifdef HAVE_LUSTRE_CRYPTO
1118                         if (turn_off)
1119                                 clear_bit(token, sbi->ll_flags);
1120                         else
1121                                 set_bit(token, sbi->ll_flags);
1122 #else
1123                         LCONSOLE_WARN("noencrypt or encrypt mount option ignored: encryption not supported\n");
1124 #endif
1125                         break;
1126                 case LL_SBI_FOREIGN_SYMLINK:
1127                         /* non-default prefix provided ? */
1128                         if (args->from) {
1129                                 size_t old_len;
1130                                 char *old;
1131
1132                                 /* path must be absolute */
1133                                 if (args->from[0] != '/') {
1134                                         LCONSOLE_ERROR_MSG(0x152,
1135                                                            "foreign prefix '%s' must be an absolute path\n",
1136                                                            args->from);
1137                                         RETURN(-EINVAL);
1138                                 }
1139
1140                                 old_len = sbi->ll_foreign_symlink_prefix_size;
1141                                 old = sbi->ll_foreign_symlink_prefix;
1142                                 /* alloc for path length and '\0' */
1143                                 sbi->ll_foreign_symlink_prefix = match_strdup(args);
1144                                 if (!sbi->ll_foreign_symlink_prefix) {
1145                                         /* restore previous */
1146                                         sbi->ll_foreign_symlink_prefix = old;
1147                                         sbi->ll_foreign_symlink_prefix_size =
1148                                                 old_len;
1149                                         RETURN(-ENOMEM);
1150                                 }
1151                                 sbi->ll_foreign_symlink_prefix_size =
1152                                         args->to - args->from + 1;
1153                                 OBD_ALLOC_POST(sbi->ll_foreign_symlink_prefix,
1154                                                sbi->ll_foreign_symlink_prefix_size,
1155                                                "kmalloced");
1156                                 if (old)
1157                                         OBD_FREE(old, old_len);
1158
1159                                 /* enable foreign symlink support */
1160                                 set_bit(token, sbi->ll_flags);
1161                         } else {
1162                                 LCONSOLE_ERROR_MSG(0x152,
1163                                                    "invalid %s option\n", s1);
1164                         }
1165                 fallthrough;
1166                 default:
1167                         break;
1168                 }
1169         }
1170         kfree(opts);
1171         RETURN(err);
1172 }
1173
1174 void ll_lli_init(struct ll_inode_info *lli)
1175 {
1176         lli->lli_inode_magic = LLI_INODE_MAGIC;
1177         lli->lli_flags = 0;
1178         rwlock_init(&lli->lli_lock);
1179         lli->lli_posix_acl = NULL;
1180         /* Do not set lli_fid, it has been initialized already. */
1181         fid_zero(&lli->lli_pfid);
1182         lli->lli_mds_read_och = NULL;
1183         lli->lli_mds_write_och = NULL;
1184         lli->lli_mds_exec_och = NULL;
1185         lli->lli_open_fd_read_count = 0;
1186         lli->lli_open_fd_write_count = 0;
1187         lli->lli_open_fd_exec_count = 0;
1188         mutex_init(&lli->lli_och_mutex);
1189         spin_lock_init(&lli->lli_agl_lock);
1190         spin_lock_init(&lli->lli_layout_lock);
1191         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1192         lli->lli_clob = NULL;
1193
1194         init_rwsem(&lli->lli_xattrs_list_rwsem);
1195         mutex_init(&lli->lli_xattrs_enq_lock);
1196
1197         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1198         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1199                 lli->lli_opendir_key = NULL;
1200                 lli->lli_sai = NULL;
1201                 spin_lock_init(&lli->lli_sa_lock);
1202                 lli->lli_opendir_pid = 0;
1203                 lli->lli_sa_enabled = 0;
1204                 init_rwsem(&lli->lli_lsm_sem);
1205         } else {
1206                 mutex_init(&lli->lli_size_mutex);
1207                 mutex_init(&lli->lli_setattr_mutex);
1208                 lli->lli_symlink_name = NULL;
1209                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1210                 range_lock_tree_init(&lli->lli_write_tree);
1211                 init_rwsem(&lli->lli_glimpse_sem);
1212                 lli->lli_glimpse_time = ktime_set(0, 0);
1213                 INIT_LIST_HEAD(&lli->lli_agl_list);
1214                 lli->lli_agl_index = 0;
1215                 lli->lli_async_rc = 0;
1216                 spin_lock_init(&lli->lli_heat_lock);
1217                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1218                 lli->lli_heat_flags = 0;
1219                 mutex_init(&lli->lli_pcc_lock);
1220                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1221                 lli->lli_pcc_inode = NULL;
1222                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1223                 lli->lli_pcc_generation = 0;
1224                 mutex_init(&lli->lli_group_mutex);
1225                 lli->lli_group_users = 0;
1226                 lli->lli_group_gid = 0;
1227         }
1228         mutex_init(&lli->lli_layout_mutex);
1229         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1230         /* ll_cl_context initialize */
1231         INIT_LIST_HEAD(&lli->lli_lccs);
1232 }
1233
1234 #define MAX_STRING_SIZE 128
1235
1236 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1237 #ifndef HAVE_BDI_CAP_MAP_COPY
1238 # define BDI_CAP_MAP_COPY       0
1239 #endif
1240
1241 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1242 {
1243         struct  lustre_sb_info *lsi = s2lsi(sb);
1244         char buf[MAX_STRING_SIZE];
1245         va_list args;
1246         int err;
1247
1248         err = bdi_init(&lsi->lsi_bdi);
1249         if (err)
1250                 return err;
1251
1252         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1253         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1254         lsi->lsi_bdi.name = "lustre";
1255         va_start(args, fmt);
1256         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1257         va_end(args);
1258         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1259         va_end(args);
1260         if (!err)
1261                 sb->s_bdi = &lsi->lsi_bdi;
1262
1263         return err;
1264 }
1265 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1266
1267 int ll_fill_super(struct super_block *sb)
1268 {
1269         struct  lustre_profile *lprof = NULL;
1270         struct  lustre_sb_info *lsi = s2lsi(sb);
1271         struct  ll_sb_info *sbi = NULL;
1272         char    *dt = NULL, *md = NULL;
1273         char    *profilenm = get_profile_name(sb);
1274         struct config_llog_instance *cfg;
1275         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1276         const int instlen = LUSTRE_MAXINSTANCE + 2;
1277         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1278         char name[MAX_STRING_SIZE];
1279         int md_len = 0;
1280         int dt_len = 0;
1281         uuid_t uuid;
1282         char *ptr;
1283         int len;
1284         int err;
1285
1286         ENTRY;
1287         /* for ASLR, to map between cfg_instance and hashed ptr */
1288         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1289                profilenm, cfg_instance, sb);
1290
1291         OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
1292
1293         OBD_ALLOC_PTR(cfg);
1294         if (cfg == NULL)
1295                 GOTO(out_free_cfg, err = -ENOMEM);
1296
1297         /* client additional sb info */
1298         lsi->lsi_llsbi = sbi = ll_init_sbi();
1299         if (IS_ERR(sbi))
1300                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1301
1302         err = ll_options(lsi->lsi_lmd->lmd_opts, sb);
1303         if (err)
1304                 GOTO(out_free_cfg, err);
1305
1306         if (ll_sb_has_test_dummy_encryption(sb))
1307                 /* enable filename encryption by default for dummy enc mode */
1308                 lsi->lsi_flags |= LSI_FILENAME_ENC;
1309         else
1310                 /* filename encryption is disabled by default */
1311                 lsi->lsi_flags &= ~LSI_FILENAME_ENC;
1312
1313         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1314         sb->s_d_op = &ll_d_ops;
1315
1316         /* UUID handling */
1317         generate_random_uuid(uuid.b);
1318         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1319
1320         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1321
1322         /* Get fsname */
1323         len = strlen(profilenm);
1324         ptr = strrchr(profilenm, '-');
1325         if (ptr && (strcmp(ptr, "-client") == 0))
1326                 len -= 7;
1327
1328         if (len > LUSTRE_MAXFSNAME) {
1329                 if (unlikely(len >= MAX_STRING_SIZE))
1330                         len = MAX_STRING_SIZE - 1;
1331                 strncpy(name, profilenm, len);
1332                 name[len] = '\0';
1333                 err = -ENAMETOOLONG;
1334                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1335                        name, LUSTRE_MAXFSNAME, err);
1336                 GOTO(out_free_cfg, err);
1337         }
1338         strncpy(sbi->ll_fsname, profilenm, len);
1339         sbi->ll_fsname[len] = '\0';
1340
1341         /* Mount info */
1342         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1343                  profilenm, cfg_instance);
1344
1345         err = super_setup_bdi_name(sb, "%s", name);
1346         if (err)
1347                 GOTO(out_free_cfg, err);
1348
1349         /* disable kernel readahead */
1350         sb->s_bdi->ra_pages = 0;
1351 #ifdef HAVE_BDI_IO_PAGES
1352         sb->s_bdi->io_pages = 0;
1353 #endif
1354
1355         /* Call ll_debugfs_register_super() before lustre_process_log()
1356          * so that "llite.*.*" params can be processed correctly.
1357          */
1358         err = ll_debugfs_register_super(sb, name);
1359         if (err < 0) {
1360                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1361                        sbi->ll_fsname, err);
1362                 err = 0;
1363         }
1364
1365         /* The cfg_instance is a value unique to this super, in case some
1366          * joker tries to mount the same fs at two mount points.
1367          */
1368         cfg->cfg_instance = cfg_instance;
1369         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1370         cfg->cfg_callback = class_config_llog_handler;
1371         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1372         /* set up client obds */
1373         err = lustre_process_log(sb, profilenm, cfg);
1374         if (err < 0)
1375                 GOTO(out_debugfs, err);
1376
1377         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1378         lprof = class_get_profile(profilenm);
1379         if (lprof == NULL) {
1380                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1381                                    " read from the MGS.  Does that filesystem "
1382                                    "exist?\n", profilenm);
1383                 GOTO(out_debugfs, err = -EINVAL);
1384         }
1385         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1386                lprof->lp_md, lprof->lp_dt);
1387
1388         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1389         OBD_ALLOC(dt, dt_len);
1390         if (!dt)
1391                 GOTO(out_profile, err = -ENOMEM);
1392         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1393
1394         md_len = strlen(lprof->lp_md) + instlen + 2;
1395         OBD_ALLOC(md, md_len);
1396         if (!md)
1397                 GOTO(out_free_dt, err = -ENOMEM);
1398         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1399
1400         /* connections, registrations, sb setup */
1401         err = client_common_fill_super(sb, md, dt);
1402         if (err < 0)
1403                 GOTO(out_free_md, err);
1404
1405         sbi->ll_client_common_fill_super_succeeded = 1;
1406
1407 out_free_md:
1408         if (md)
1409                 OBD_FREE(md, md_len);
1410 out_free_dt:
1411         if (dt)
1412                 OBD_FREE(dt, dt_len);
1413 out_profile:
1414         if (lprof)
1415                 class_put_profile(lprof);
1416 out_debugfs:
1417         if (err < 0)
1418                 ll_debugfs_unregister_super(sb);
1419 out_free_cfg:
1420         if (cfg)
1421                 OBD_FREE_PTR(cfg);
1422
1423         if (err)
1424                 ll_put_super(sb);
1425         else if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1426                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1427         RETURN(err);
1428 } /* ll_fill_super */
1429
1430 void ll_put_super(struct super_block *sb)
1431 {
1432         struct config_llog_instance cfg, params_cfg;
1433         struct obd_device *obd;
1434         struct lustre_sb_info *lsi = s2lsi(sb);
1435         struct ll_sb_info *sbi = ll_s2sbi(sb);
1436         char *profilenm = get_profile_name(sb);
1437         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1438         long ccc_count;
1439         int next, force = 1, rc = 0;
1440         ENTRY;
1441
1442         if (IS_ERR(sbi))
1443                 GOTO(out_no_sbi, 0);
1444
1445         /* Should replace instance_id with something better for ASLR */
1446         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1447                profilenm, cfg_instance, sb);
1448
1449         cfg.cfg_instance = cfg_instance;
1450         lustre_end_log(sb, profilenm, &cfg);
1451
1452         params_cfg.cfg_instance = cfg_instance;
1453         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1454
1455         if (sbi->ll_md_exp) {
1456                 obd = class_exp2obd(sbi->ll_md_exp);
1457                 if (obd)
1458                         force = obd->obd_force;
1459         }
1460
1461         /* Wait for unstable pages to be committed to stable storage */
1462         if (force == 0) {
1463                 rc = l_wait_event_abortable(
1464                         sbi->ll_cache->ccc_unstable_waitq,
1465                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1466         }
1467
1468         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1469         if (force == 0 && rc != -ERESTARTSYS)
1470                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1471
1472         /* We need to set force before the lov_disconnect in
1473          * lustre_common_put_super, since l_d cleans up osc's as well.
1474          */
1475         if (force) {
1476                 next = 0;
1477                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1478                                                      &next)) != NULL) {
1479                         obd->obd_force = force;
1480                 }
1481         }
1482
1483         if (sbi->ll_client_common_fill_super_succeeded) {
1484                 /* Only if client_common_fill_super succeeded */
1485                 client_common_put_super(sb);
1486         }
1487
1488         /* imitate failed cleanup */
1489         if (OBD_FAIL_CHECK(OBD_FAIL_OBD_CLEANUP))
1490                 goto skip_cleanup;
1491
1492         next = 0;
1493         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1494                 class_manual_cleanup(obd);
1495
1496 skip_cleanup:
1497         if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1498                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1499
1500         if (profilenm)
1501                 class_del_profile(profilenm);
1502
1503 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1504         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1505                 bdi_destroy(&lsi->lsi_bdi);
1506                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1507         }
1508 #endif
1509
1510         llcrypt_free_dummy_context(&lsi->lsi_dummy_enc_ctx);
1511         ll_free_sbi(sb);
1512         lsi->lsi_llsbi = NULL;
1513 out_no_sbi:
1514         lustre_common_put_super(sb);
1515
1516         cl_env_cache_purge(~0);
1517
1518         EXIT;
1519 } /* client_put_super */
1520
1521 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1522 {
1523         struct inode *inode = NULL;
1524
1525         /* NOTE: we depend on atomic igrab() -bzzz */
1526         lock_res_and_lock(lock);
1527         if (lock->l_resource->lr_lvb_inode) {
1528                 struct ll_inode_info * lli;
1529                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1530                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1531                         inode = igrab(lock->l_resource->lr_lvb_inode);
1532                 } else {
1533                         inode = lock->l_resource->lr_lvb_inode;
1534                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1535                                          D_WARNING, lock, "lr_lvb_inode %p is "
1536                                          "bogus: magic %08x",
1537                                          lock->l_resource->lr_lvb_inode,
1538                                          lli->lli_inode_magic);
1539                         inode = NULL;
1540                 }
1541         }
1542         unlock_res_and_lock(lock);
1543         return inode;
1544 }
1545
1546 void ll_dir_clear_lsm_md(struct inode *inode)
1547 {
1548         struct ll_inode_info *lli = ll_i2info(inode);
1549
1550         LASSERT(S_ISDIR(inode->i_mode));
1551
1552         if (lli->lli_lsm_md) {
1553                 lmv_free_memmd(lli->lli_lsm_md);
1554                 lli->lli_lsm_md = NULL;
1555         }
1556
1557         if (lli->lli_default_lsm_md) {
1558                 lmv_free_memmd(lli->lli_default_lsm_md);
1559                 lli->lli_default_lsm_md = NULL;
1560         }
1561 }
1562
1563 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1564                                       const struct lu_fid *fid,
1565                                       struct lustre_md *md)
1566 {
1567         struct ll_sb_info *sbi = ll_s2sbi(sb);
1568         struct ll_inode_info *lli;
1569         struct mdt_body *body = md->body;
1570         struct inode *inode;
1571         ino_t ino;
1572
1573         ENTRY;
1574
1575         LASSERT(md->lmv);
1576         ino = cl_fid_build_ino(fid, test_bit(LL_SBI_32BIT_API, sbi->ll_flags));
1577         inode = iget_locked(sb, ino);
1578         if (inode == NULL) {
1579                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1580                        sbi->ll_fsname, PFID(fid));
1581                 RETURN(ERR_PTR(-ENOENT));
1582         }
1583
1584         lli = ll_i2info(inode);
1585         if (inode->i_state & I_NEW) {
1586                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1587                                 (body->mbo_mode & S_IFMT);
1588                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1589                          PFID(fid));
1590
1591                 inode->i_mtime.tv_sec = 0;
1592                 inode->i_atime.tv_sec = 0;
1593                 inode->i_ctime.tv_sec = 0;
1594                 inode->i_rdev = 0;
1595
1596 #ifdef HAVE_BACKING_DEV_INFO
1597                 /* initializing backing dev info. */
1598                 inode->i_mapping->backing_dev_info =
1599                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1600 #endif
1601                 inode->i_op = &ll_dir_inode_operations;
1602                 inode->i_fop = &ll_dir_operations;
1603                 lli->lli_fid = *fid;
1604                 ll_lli_init(lli);
1605
1606                 /* master object FID */
1607                 lli->lli_pfid = body->mbo_fid1;
1608                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1609                        lli, PFID(fid), PFID(&lli->lli_pfid));
1610                 unlock_new_inode(inode);
1611         } else {
1612                 /* in directory restripe/auto-split, a directory will be
1613                  * transformed to a stripe if it's plain, set its pfid here,
1614                  * otherwise ll_lock_cancel_bits() can't find the master inode.
1615                  */
1616                 lli->lli_pfid = body->mbo_fid1;
1617         }
1618
1619         RETURN(inode);
1620 }
1621
1622 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1623 {
1624         struct lu_fid *fid;
1625         struct lmv_stripe_md *lsm = md->lmv;
1626         struct ll_inode_info *lli = ll_i2info(inode);
1627         int i;
1628
1629         LASSERT(lsm != NULL);
1630
1631         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1632                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1633         lsm_md_dump(D_INODE, lsm);
1634
1635         if (!lmv_dir_striped(lsm))
1636                 goto out;
1637
1638         /* XXX sigh, this lsm_root initialization should be in
1639          * LMV layer, but it needs ll_iget right now, so we
1640          * put this here right now. */
1641         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1642                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1643                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1644
1645                 if (!fid_is_sane(fid))
1646                         continue;
1647
1648                 /* Unfortunately ll_iget will call ll_update_inode,
1649                  * where the initialization of slave inode is slightly
1650                  * different, so it reset lsm_md to NULL to avoid
1651                  * initializing lsm for slave inode. */
1652                 lsm->lsm_md_oinfo[i].lmo_root =
1653                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1654                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1655                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1656
1657                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1658                         while (i-- > 0) {
1659                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1660                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1661                         }
1662                         return rc;
1663                 }
1664         }
1665 out:
1666         lli->lli_lsm_md = lsm;
1667
1668         return 0;
1669 }
1670
1671 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1672 {
1673         struct ll_inode_info *lli = ll_i2info(inode);
1674
1675         ENTRY;
1676
1677         if (!md->default_lmv) {
1678                 /* clear default lsm */
1679                 if (lli->lli_default_lsm_md) {
1680                         down_write(&lli->lli_lsm_sem);
1681                         if (lli->lli_default_lsm_md) {
1682                                 lmv_free_memmd(lli->lli_default_lsm_md);
1683                                 lli->lli_default_lsm_md = NULL;
1684                         }
1685                         lli->lli_inherit_depth = 0;
1686                         up_write(&lli->lli_lsm_sem);
1687                 }
1688                 RETURN_EXIT;
1689         }
1690
1691         if (lli->lli_default_lsm_md) {
1692                 /* do nonthing if default lsm isn't changed */
1693                 down_read(&lli->lli_lsm_sem);
1694                 if (lli->lli_default_lsm_md &&
1695                     lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1696                         up_read(&lli->lli_lsm_sem);
1697                         RETURN_EXIT;
1698                 }
1699                 up_read(&lli->lli_lsm_sem);
1700         }
1701
1702         down_write(&lli->lli_lsm_sem);
1703         if (lli->lli_default_lsm_md)
1704                 lmv_free_memmd(lli->lli_default_lsm_md);
1705         lli->lli_default_lsm_md = md->default_lmv;
1706         lsm_md_dump(D_INODE, md->default_lmv);
1707         md->default_lmv = NULL;
1708         up_write(&lli->lli_lsm_sem);
1709         RETURN_EXIT;
1710 }
1711
1712 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1713 {
1714         struct ll_inode_info *lli = ll_i2info(inode);
1715         struct lmv_stripe_md *lsm = md->lmv;
1716         struct cl_attr  *attr;
1717         int rc = 0;
1718
1719         ENTRY;
1720
1721         LASSERT(S_ISDIR(inode->i_mode));
1722         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1723                PFID(ll_inode2fid(inode)));
1724
1725         /* update default LMV */
1726         if (md->default_lmv)
1727                 ll_update_default_lsm_md(inode, md);
1728
1729         /* after dir migration/restripe, a stripe may be turned into a
1730          * directory, in this case, zero out its lli_pfid.
1731          */
1732         if (unlikely(fid_is_norm(&lli->lli_pfid)))
1733                 fid_zero(&lli->lli_pfid);
1734
1735         /*
1736          * no striped information from request, lustre_md from req does not
1737          * include stripeEA, see ll_md_setattr()
1738          */
1739         if (!lsm)
1740                 RETURN(0);
1741
1742         /*
1743          * normally dir layout doesn't change, only take read lock to check
1744          * that to avoid blocking other MD operations.
1745          */
1746         down_read(&lli->lli_lsm_sem);
1747
1748         /* some current lookup initialized lsm, and unchanged */
1749         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1750                 GOTO(unlock, rc = 0);
1751
1752         /* if dir layout doesn't match, check whether version is increased,
1753          * which means layout is changed, this happens in dir split/merge and
1754          * lfsck.
1755          *
1756          * foreign LMV should not change.
1757          */
1758         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1759             lsm->lsm_md_layout_version <=
1760             lli->lli_lsm_md->lsm_md_layout_version) {
1761                 CERROR("%s: "DFID" dir layout mismatch:\n",
1762                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1763                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1764                 lsm_md_dump(D_ERROR, lsm);
1765                 GOTO(unlock, rc = -EINVAL);
1766         }
1767
1768         up_read(&lli->lli_lsm_sem);
1769         down_write(&lli->lli_lsm_sem);
1770         /* clear existing lsm */
1771         if (lli->lli_lsm_md) {
1772                 lmv_free_memmd(lli->lli_lsm_md);
1773                 lli->lli_lsm_md = NULL;
1774         }
1775
1776         rc = ll_init_lsm_md(inode, md);
1777         if (rc) {
1778                 up_write(&lli->lli_lsm_sem);
1779                 RETURN(rc);
1780         }
1781
1782         /* md_merge_attr() may take long, since lsm is already set, switch to
1783          * read lock.
1784          */
1785         downgrade_write(&lli->lli_lsm_sem);
1786
1787         /* set md->lmv to NULL, so the following free lustre_md will not free
1788          * this lsm.
1789          */
1790         md->lmv = NULL;
1791
1792         if (!lmv_dir_striped(lli->lli_lsm_md))
1793                 GOTO(unlock, rc = 0);
1794
1795         OBD_ALLOC_PTR(attr);
1796         if (!attr)
1797                 GOTO(unlock, rc = -ENOMEM);
1798
1799         /* validate the lsm */
1800         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1801                            ll_md_blocking_ast);
1802         if (!rc) {
1803                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1804                         md->body->mbo_nlink = attr->cat_nlink;
1805                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1806                         md->body->mbo_size = attr->cat_size;
1807                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1808                         md->body->mbo_atime = attr->cat_atime;
1809                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1810                         md->body->mbo_ctime = attr->cat_ctime;
1811                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1812                         md->body->mbo_mtime = attr->cat_mtime;
1813         }
1814
1815         OBD_FREE_PTR(attr);
1816         GOTO(unlock, rc);
1817 unlock:
1818         up_read(&lli->lli_lsm_sem);
1819
1820         return rc;
1821 }
1822
1823 void ll_clear_inode(struct inode *inode)
1824 {
1825         struct ll_inode_info *lli = ll_i2info(inode);
1826         struct ll_sb_info *sbi = ll_i2sbi(inode);
1827
1828         ENTRY;
1829
1830         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1831                PFID(ll_inode2fid(inode)), inode);
1832
1833         if (S_ISDIR(inode->i_mode)) {
1834                 /* these should have been cleared in ll_file_release */
1835                 LASSERT(lli->lli_opendir_key == NULL);
1836                 LASSERT(lli->lli_sai == NULL);
1837                 LASSERT(lli->lli_opendir_pid == 0);
1838         } else {
1839                 pcc_inode_free(inode);
1840         }
1841
1842         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1843
1844         LASSERT(!lli->lli_open_fd_write_count);
1845         LASSERT(!lli->lli_open_fd_read_count);
1846         LASSERT(!lli->lli_open_fd_exec_count);
1847
1848         if (lli->lli_mds_write_och)
1849                 ll_md_real_close(inode, FMODE_WRITE);
1850         if (lli->lli_mds_exec_och)
1851                 ll_md_real_close(inode, FMODE_EXEC);
1852         if (lli->lli_mds_read_och)
1853                 ll_md_real_close(inode, FMODE_READ);
1854
1855         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1856                 OBD_FREE(lli->lli_symlink_name,
1857                          strlen(lli->lli_symlink_name) + 1);
1858                 lli->lli_symlink_name = NULL;
1859         }
1860
1861         ll_xattr_cache_destroy(inode);
1862
1863         forget_all_cached_acls(inode);
1864         lli_clear_acl(lli);
1865         lli->lli_inode_magic = LLI_INODE_DEAD;
1866
1867         if (S_ISDIR(inode->i_mode))
1868                 ll_dir_clear_lsm_md(inode);
1869         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1870                 LASSERT(list_empty(&lli->lli_agl_list));
1871
1872         /*
1873          * XXX This has to be done before lsm is freed below, because
1874          * cl_object still uses inode lsm.
1875          */
1876         cl_inode_fini(inode);
1877
1878         llcrypt_put_encryption_info(inode);
1879
1880         EXIT;
1881 }
1882
1883 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1884 {
1885         struct lustre_md md;
1886         struct inode *inode = dentry->d_inode;
1887         struct ll_sb_info *sbi = ll_i2sbi(inode);
1888         struct ptlrpc_request *request = NULL;
1889         int rc, ia_valid;
1890
1891         ENTRY;
1892
1893         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1894                                      LUSTRE_OPC_ANY, NULL);
1895         if (IS_ERR(op_data))
1896                 RETURN(PTR_ERR(op_data));
1897
1898         /* If this is a chgrp of a regular file, we want to reserve enough
1899          * quota to cover the entire file size.
1900          */
1901         if (S_ISREG(inode->i_mode) && op_data->op_attr.ia_valid & ATTR_GID &&
1902             from_kgid(&init_user_ns, op_data->op_attr.ia_gid) !=
1903             from_kgid(&init_user_ns, inode->i_gid)) {
1904                 op_data->op_xvalid |= OP_XVALID_BLOCKS;
1905                 op_data->op_attr_blocks = inode->i_blocks;
1906         }
1907
1908
1909         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1910         if (rc) {
1911                 ptlrpc_req_finished(request);
1912                 if (rc == -ENOENT) {
1913                         clear_nlink(inode);
1914                         /* Unlinked special device node? Or just a race?
1915                          * Pretend we done everything. */
1916                         if (!S_ISREG(inode->i_mode) &&
1917                             !S_ISDIR(inode->i_mode)) {
1918                                 ia_valid = op_data->op_attr.ia_valid;
1919                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1920                                 rc = simple_setattr(&init_user_ns, dentry,
1921                                                     &op_data->op_attr);
1922                                 op_data->op_attr.ia_valid = ia_valid;
1923                         }
1924                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1925                         CERROR("md_setattr fails: rc = %d\n", rc);
1926                 }
1927                 RETURN(rc);
1928         }
1929
1930         rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp,
1931                               sbi->ll_md_exp, &md);
1932         if (rc) {
1933                 ptlrpc_req_finished(request);
1934                 RETURN(rc);
1935         }
1936
1937         ia_valid = op_data->op_attr.ia_valid;
1938         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1939          * cache is not cleared yet. */
1940         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1941         if (S_ISREG(inode->i_mode))
1942                 inode_lock(inode);
1943         rc = simple_setattr(&init_user_ns, dentry, &op_data->op_attr);
1944         if (S_ISREG(inode->i_mode))
1945                 inode_unlock(inode);
1946         op_data->op_attr.ia_valid = ia_valid;
1947
1948         rc = ll_update_inode(inode, &md);
1949         ptlrpc_req_finished(request);
1950
1951         RETURN(rc);
1952 }
1953
1954 /**
1955  * Zero portion of page that is part of @inode.
1956  * This implies, if necessary:
1957  * - taking cl_lock on range corresponding to concerned page
1958  * - grabbing vm page
1959  * - associating cl_page
1960  * - proceeding to clio read
1961  * - zeroing range in page
1962  * - proceeding to cl_page flush
1963  * - releasing cl_lock
1964  *
1965  * \param[in] inode     inode
1966  * \param[in] index     page index
1967  * \param[in] offset    offset in page to start zero from
1968  * \param[in] len       len to zero
1969  *
1970  * \retval 0            on success
1971  * \retval negative     errno on failure
1972  */
1973 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1974                     unsigned len)
1975 {
1976         struct ll_inode_info *lli = ll_i2info(inode);
1977         struct cl_object *clob = lli->lli_clob;
1978         __u16 refcheck;
1979         struct lu_env *env = NULL;
1980         struct cl_io *io = NULL;
1981         struct cl_page *clpage = NULL;
1982         struct page *vmpage = NULL;
1983         unsigned from = index << PAGE_SHIFT;
1984         struct cl_lock *lock = NULL;
1985         struct cl_lock_descr *descr = NULL;
1986         struct cl_2queue *queue = NULL;
1987         struct cl_sync_io *anchor = NULL;
1988         bool holdinglock = false;
1989         int rc;
1990
1991         ENTRY;
1992
1993         env = cl_env_get(&refcheck);
1994         if (IS_ERR(env))
1995                 RETURN(PTR_ERR(env));
1996
1997         io = vvp_env_thread_io(env);
1998         io->ci_obj = clob;
1999         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
2000         if (rc)
2001                 GOTO(putenv, rc);
2002
2003         lock = vvp_env_lock(env);
2004         descr = &lock->cll_descr;
2005         descr->cld_obj   = io->ci_obj;
2006         descr->cld_start = cl_index(io->ci_obj, from);
2007         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
2008         descr->cld_mode  = CLM_WRITE;
2009         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
2010
2011         /* request lock for page */
2012         rc = cl_lock_request(env, io, lock);
2013         /* -ECANCELED indicates a matching lock with a different extent
2014          * was already present, and -EEXIST indicates a matching lock
2015          * on exactly the same extent was already present.
2016          * In both cases it means we are covered.
2017          */
2018         if (rc == -ECANCELED || rc == -EEXIST)
2019                 rc = 0;
2020         else if (rc < 0)
2021                 GOTO(iofini, rc);
2022         else
2023                 holdinglock = true;
2024
2025         /* grab page */
2026         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
2027         if (vmpage == NULL)
2028                 GOTO(rellock, rc = -EOPNOTSUPP);
2029
2030         if (!PageDirty(vmpage)) {
2031                 /* associate cl_page */
2032                 clpage = cl_page_find(env, clob, vmpage->index,
2033                                       vmpage, CPT_CACHEABLE);
2034                 if (IS_ERR(clpage))
2035                         GOTO(pagefini, rc = PTR_ERR(clpage));
2036
2037                 cl_page_assume(env, io, clpage);
2038         }
2039
2040         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
2041             !PageWriteback(vmpage)) {
2042                 /* read page */
2043                 /* Set PagePrivate2 to detect special case of empty page
2044                  * in osc_brw_fini_request().
2045                  * It is also used to tell ll_io_read_page() that we do not
2046                  * want the vmpage to be unlocked.
2047                  */
2048                 SetPagePrivate2(vmpage);
2049                 rc = ll_io_read_page(env, io, clpage, NULL);
2050                 if (!PagePrivate2(vmpage)) {
2051                         /* PagePrivate2 was cleared in osc_brw_fini_request()
2052                          * meaning we read an empty page. In this case, in order
2053                          * to avoid allocating unnecessary block in truncated
2054                          * file, we must not zero and write as below. Subsequent
2055                          * server-side truncate will handle things correctly.
2056                          */
2057                         cl_page_unassume(env, io, clpage);
2058                         GOTO(clpfini, rc = 0);
2059                 }
2060                 ClearPagePrivate2(vmpage);
2061                 if (rc)
2062                         GOTO(clpfini, rc);
2063         }
2064
2065         /* Thanks to PagePrivate2 flag, ll_io_read_page() did not unlock
2066          * the vmpage, so we are good to proceed and zero range in page.
2067          */
2068         zero_user(vmpage, offset, len);
2069
2070         if (holdinglock && clpage) {
2071                 /* explicitly write newly modified page */
2072                 queue = &io->ci_queue;
2073                 cl_2queue_init(queue);
2074                 anchor = &vvp_env_info(env)->vti_anchor;
2075                 cl_sync_io_init(anchor, 1);
2076                 clpage->cp_sync_io = anchor;
2077                 cl_page_list_add(&queue->c2_qin, clpage, true);
2078                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
2079                 if (rc)
2080                         GOTO(queuefini1, rc);
2081                 rc = cl_sync_io_wait(env, anchor, 0);
2082                 if (rc)
2083                         GOTO(queuefini2, rc);
2084                 cl_page_assume(env, io, clpage);
2085
2086 queuefini2:
2087                 cl_2queue_discard(env, io, queue);
2088 queuefini1:
2089                 cl_2queue_disown(env, queue);
2090                 cl_2queue_fini(env, queue);
2091         }
2092
2093 clpfini:
2094         if (clpage)
2095                 cl_page_put(env, clpage);
2096 pagefini:
2097         unlock_page(vmpage);
2098         put_page(vmpage);
2099 rellock:
2100         if (holdinglock)
2101                 cl_lock_release(env, lock);
2102 iofini:
2103         cl_io_fini(env, io);
2104 putenv:
2105         if (env)
2106                 cl_env_put(env, &refcheck);
2107
2108         RETURN(rc);
2109 }
2110
2111 /**
2112  * Get reference file from volatile file name.
2113  * Volatile file name may look like:
2114  * <parent>/LUSTRE_VOLATILE_HDR:<mdt_index>:<random>:fd=<fd>
2115  * where fd is opened descriptor of reference file.
2116  *
2117  * \param[in] volatile_name     volatile file name
2118  * \param[in] volatile_len      volatile file name length
2119  * \param[out] ref_file         pointer to struct file of reference file
2120  *
2121  * \retval 0            on success
2122  * \retval negative     errno on failure
2123  */
2124 int volatile_ref_file(const char *volatile_name, int volatile_len,
2125                       struct file **ref_file)
2126 {
2127         char *p, *q, *fd_str;
2128         int fd, rc;
2129
2130         p = strnstr(volatile_name, ":fd=", volatile_len);
2131         if (!p || strlen(p + 4) == 0)
2132                 return -EINVAL;
2133
2134         q = strchrnul(p + 4, ':');
2135         fd_str = kstrndup(p + 4, q - p - 4, GFP_NOFS);
2136         if (!fd_str)
2137                 return -ENOMEM;
2138         rc = kstrtouint(fd_str, 10, &fd);
2139         kfree(fd_str);
2140         if (rc)
2141                 return -EINVAL;
2142
2143         *ref_file = fget(fd);
2144         if (!(*ref_file))
2145                 return -EINVAL;
2146         return 0;
2147 }
2148
2149 /* If this inode has objects allocated to it (lsm != NULL), then the OST
2150  * object(s) determine the file size and mtime.  Otherwise, the MDS will
2151  * keep these values until such a time that objects are allocated for it.
2152  * We do the MDS operations first, as it is checking permissions for us.
2153  * We don't to the MDS RPC if there is nothing that we want to store there,
2154  * otherwise there is no harm in updating mtime/atime on the MDS if we are
2155  * going to do an RPC anyways.
2156  *
2157  * If we are doing a truncate, we will send the mtime and ctime updates
2158  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2159  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2160  * at the same time.
2161  *
2162  * In case of HSMimport, we only set attr on MDS.
2163  */
2164 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2165                    enum op_xvalid xvalid, bool hsm_import)
2166 {
2167         struct inode *inode = dentry->d_inode;
2168         struct ll_inode_info *lli = ll_i2info(inode);
2169         struct md_op_data *op_data = NULL;
2170         ktime_t kstart = ktime_get();
2171         int rc = 0;
2172
2173         ENTRY;
2174
2175         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2176                "valid %x, hsm_import %d\n",
2177                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2178                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2179                hsm_import);
2180
2181         if (attr->ia_valid & ATTR_SIZE) {
2182                 /* Check new size against VFS/VM file size limit and rlimit */
2183                 rc = inode_newsize_ok(inode, attr->ia_size);
2184                 if (rc)
2185                         RETURN(rc);
2186
2187                 /* The maximum Lustre file size is variable, based on the
2188                  * OST maximum object size and number of stripes.  This
2189                  * needs another check in addition to the VFS check above. */
2190                 if (attr->ia_size > ll_file_maxbytes(inode)) {
2191                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2192                                PFID(&lli->lli_fid), attr->ia_size,
2193                                ll_file_maxbytes(inode));
2194                         RETURN(-EFBIG);
2195                 }
2196
2197                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2198         }
2199
2200         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2201         if (attr->ia_valid & TIMES_SET_FLAGS) {
2202                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2203                     !capable(CAP_FOWNER))
2204                         RETURN(-EPERM);
2205         }
2206
2207         /* We mark all of the fields "set" so MDS/OST does not re-set them */
2208         if (!(xvalid & OP_XVALID_CTIME_SET) &&
2209              (attr->ia_valid & ATTR_CTIME)) {
2210                 attr->ia_ctime = current_time(inode);
2211                 xvalid |= OP_XVALID_CTIME_SET;
2212         }
2213         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2214             (attr->ia_valid & ATTR_ATIME)) {
2215                 attr->ia_atime = current_time(inode);
2216                 attr->ia_valid |= ATTR_ATIME_SET;
2217         }
2218         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2219             (attr->ia_valid & ATTR_MTIME)) {
2220                 attr->ia_mtime = current_time(inode);
2221                 attr->ia_valid |= ATTR_MTIME_SET;
2222         }
2223
2224         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2225                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2226                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2227                        ktime_get_real_seconds());
2228
2229         if (S_ISREG(inode->i_mode))
2230                 inode_unlock(inode);
2231
2232         /* We always do an MDS RPC, even if we're only changing the size;
2233          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2234
2235         OBD_ALLOC_PTR(op_data);
2236         if (op_data == NULL)
2237                 GOTO(out, rc = -ENOMEM);
2238
2239         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2240                 /* If we are changing file size, file content is
2241                  * modified, flag it.
2242                  */
2243                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2244                 op_data->op_bias |= MDS_DATA_MODIFIED;
2245                 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
2246         }
2247
2248         if (attr->ia_valid & ATTR_FILE) {
2249                 struct ll_file_data *fd = attr->ia_file->private_data;
2250
2251                 if (fd->fd_lease_och)
2252                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2253         }
2254
2255         op_data->op_attr = *attr;
2256         op_data->op_xvalid = xvalid;
2257
2258         rc = ll_md_setattr(dentry, op_data);
2259         if (rc)
2260                 GOTO(out, rc);
2261
2262         if (!S_ISREG(inode->i_mode) || hsm_import)
2263                 GOTO(out, rc = 0);
2264
2265         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2266                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2267             xvalid & OP_XVALID_CTIME_SET) {
2268                 bool cached = false;
2269
2270                 rc = pcc_inode_setattr(inode, attr, &cached);
2271                 if (cached) {
2272                         if (rc) {
2273                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2274                                        "rc = %d\n",
2275                                        ll_i2sbi(inode)->ll_fsname,
2276                                        PFID(&lli->lli_fid), rc);
2277                                 GOTO(out, rc);
2278                         }
2279                 } else {
2280                         unsigned int flags = 0;
2281
2282                         /* For truncate and utimes sending attributes to OSTs,
2283                          * setting mtime/atime to the past will be performed
2284                          * under PW [0:EOF] extent lock (new_size:EOF for
2285                          * truncate). It may seem excessive to send mtime/atime
2286                          * updates to OSTs when not setting times to past, but
2287                          * it is necessary due to possible time
2288                          * de-synchronization between MDT inode and OST objects
2289                          */
2290                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
2291                                 xvalid |= OP_XVALID_FLAGS;
2292                                 flags = LUSTRE_ENCRYPT_FL;
2293                                 /* Call to ll_io_zero_page is not necessary if
2294                                  * truncating on PAGE_SIZE boundary, because
2295                                  * whole pages will be wiped.
2296                                  * In case of Direct IO, all we need is to set
2297                                  * new size.
2298                                  */
2299                                 if (attr->ia_valid & ATTR_SIZE &&
2300                                     attr->ia_size & ~PAGE_MASK &&
2301                                     !(attr->ia_valid & ATTR_FILE &&
2302                                       attr->ia_file->f_flags & O_DIRECT)) {
2303                                         pgoff_t offset =
2304                                                 attr->ia_size & (PAGE_SIZE - 1);
2305
2306                                         rc = ll_io_zero_page(inode,
2307                                                     attr->ia_size >> PAGE_SHIFT,
2308                                                     offset, PAGE_SIZE - offset);
2309                                         if (rc)
2310                                                 GOTO(out, rc);
2311                                 }
2312                                 /* If encrypted volatile file without the key,
2313                                  * we need to fetch size from reference file,
2314                                  * and set it on OST objects. This happens when
2315                                  * migrating or extending an encrypted file
2316                                  * without the key.
2317                                  */
2318                                 if (filename_is_volatile(dentry->d_name.name,
2319                                                          dentry->d_name.len,
2320                                                          NULL) &&
2321                                     llcrypt_require_key(inode) == -ENOKEY) {
2322                                         struct file *ref_file;
2323                                         struct inode *ref_inode;
2324                                         struct ll_inode_info *ref_lli;
2325                                         struct cl_object *ref_obj;
2326                                         struct cl_attr ref_attr = { 0 };
2327                                         struct lu_env *env;
2328                                         __u16 refcheck;
2329
2330                                         rc = volatile_ref_file(
2331                                                 dentry->d_name.name,
2332                                                 dentry->d_name.len,
2333                                                 &ref_file);
2334                                         if (rc)
2335                                                 GOTO(out, rc);
2336
2337                                         ref_inode = file_inode(ref_file);
2338                                         if (!ref_inode) {
2339                                                 fput(ref_file);
2340                                                 GOTO(out, rc = -EINVAL);
2341                                         }
2342
2343                                         env = cl_env_get(&refcheck);
2344                                         if (IS_ERR(env))
2345                                                 GOTO(out, rc = PTR_ERR(env));
2346
2347                                         ref_lli = ll_i2info(ref_inode);
2348                                         ref_obj = ref_lli->lli_clob;
2349                                         cl_object_attr_lock(ref_obj);
2350                                         rc = cl_object_attr_get(env, ref_obj,
2351                                                                 &ref_attr);
2352                                         cl_object_attr_unlock(ref_obj);
2353                                         cl_env_put(env, &refcheck);
2354                                         fput(ref_file);
2355                                         if (rc)
2356                                                 GOTO(out, rc);
2357
2358                                         attr->ia_valid |= ATTR_SIZE;
2359                                         attr->ia_size = ref_attr.cat_size;
2360                                 }
2361                         }
2362                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2363                 }
2364         }
2365
2366         /* If the file was restored, it needs to set dirty flag.
2367          *
2368          * We've already sent MDS_DATA_MODIFIED flag in
2369          * ll_md_setattr() for truncate. However, the MDT refuses to
2370          * set the HS_DIRTY flag on released files, so we have to set
2371          * it again if the file has been restored. Please check how
2372          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2373          *
2374          * Please notice that if the file is not released, the previous
2375          * MDS_DATA_MODIFIED has taken effect and usually
2376          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2377          * This way we can save an RPC for common open + trunc
2378          * operation. */
2379         if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
2380                 struct hsm_state_set hss = {
2381                         .hss_valid = HSS_SETMASK,
2382                         .hss_setmask = HS_DIRTY,
2383                 };
2384                 int rc2;
2385
2386                 rc2 = ll_hsm_state_set(inode, &hss);
2387                 /* truncate and write can happen at the same time, so that
2388                  * the file can be set modified even though the file is not
2389                  * restored from released state, and ll_hsm_state_set() is
2390                  * not applicable for the file, and rc2 < 0 is normal in this
2391                  * case. */
2392                 if (rc2 < 0)
2393                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2394                                PFID(ll_inode2fid(inode)), rc2);
2395         }
2396
2397         EXIT;
2398 out:
2399         if (op_data != NULL)
2400                 ll_finish_md_op_data(op_data);
2401
2402         if (S_ISREG(inode->i_mode)) {
2403                 inode_lock(inode);
2404                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2405                         inode_dio_wait(inode);
2406                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2407                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2408                  * inode flags, so there is a gap where S_NOSEC is not set.
2409                  * This can cause a writer to take the i_mutex unnecessarily,
2410                  * but this is safe to do and should be rare. */
2411                 inode_has_no_xattr(inode);
2412         }
2413
2414         if (!rc)
2415                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2416                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2417                                    ktime_us_delta(ktime_get(), kstart));
2418
2419         RETURN(rc);
2420 }
2421
2422 int ll_setattr(struct user_namespace *mnt_userns, struct dentry *de,
2423                struct iattr *attr)
2424 {
2425         int mode = de->d_inode->i_mode;
2426         enum op_xvalid xvalid = 0;
2427         int rc;
2428
2429         rc = llcrypt_prepare_setattr(de, attr);
2430         if (rc)
2431                 return rc;
2432
2433         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2434                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2435                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2436
2437         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2438                                (ATTR_SIZE|ATTR_MODE)) &&
2439             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2440              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2441               !(attr->ia_mode & S_ISGID))))
2442                 attr->ia_valid |= ATTR_FORCE;
2443
2444         if ((attr->ia_valid & ATTR_MODE) &&
2445             (mode & S_ISUID) &&
2446             !(attr->ia_mode & S_ISUID) &&
2447             !(attr->ia_valid & ATTR_KILL_SUID))
2448                 attr->ia_valid |= ATTR_KILL_SUID;
2449
2450         if ((attr->ia_valid & ATTR_MODE) &&
2451             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2452             !(attr->ia_mode & S_ISGID) &&
2453             !(attr->ia_valid & ATTR_KILL_SGID))
2454                 attr->ia_valid |= ATTR_KILL_SGID;
2455
2456         return ll_setattr_raw(de, attr, xvalid, false);
2457 }
2458
2459 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2460                        u32 flags)
2461 {
2462         struct obd_statfs obd_osfs = { 0 };
2463         time64_t max_age;
2464         int rc;
2465
2466         ENTRY;
2467         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2468
2469         if (test_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags))
2470                 flags |= OBD_STATFS_NODELAY;
2471
2472         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2473         if (rc)
2474                 RETURN(rc);
2475
2476         osfs->os_type = LL_SUPER_MAGIC;
2477
2478         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2479               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2480
2481         if (osfs->os_state & OS_STATFS_SUM)
2482                 GOTO(out, rc);
2483
2484         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2485         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2486                 GOTO(out, rc = 0);
2487
2488         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2489                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2490                obd_osfs.os_files);
2491
2492         osfs->os_bsize = obd_osfs.os_bsize;
2493         osfs->os_blocks = obd_osfs.os_blocks;
2494         osfs->os_bfree = obd_osfs.os_bfree;
2495         osfs->os_bavail = obd_osfs.os_bavail;
2496
2497         /* If we have _some_ OSTs, but don't have as many free objects on the
2498          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2499          * to compensate, so that the "inodes in use" number is correct.
2500          * This should be kept in sync with lod_statfs() behaviour.
2501          */
2502         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2503                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2504                                  obd_osfs.os_ffree;
2505                 osfs->os_ffree = obd_osfs.os_ffree;
2506         }
2507
2508 out:
2509         RETURN(rc);
2510 }
2511
2512 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2513 {
2514         struct if_quotactl qctl = {
2515                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2516                 .qc_type = PRJQUOTA,
2517                 .qc_valid = QC_GENERAL,
2518         };
2519         u64 limit, curblock;
2520         int ret;
2521
2522         qctl.qc_id = ll_i2info(inode)->lli_projid;
2523         ret = quotactl_ioctl(inode->i_sb, &qctl);
2524         if (ret) {
2525                 /* ignore errors if project ID does not have
2526                  * a quota limit or feature unsupported.
2527                  */
2528                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2529                         ret = 0;
2530                 return ret;
2531         }
2532
2533         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2534                  qctl.qc_dqblk.dqb_bsoftlimit :
2535                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2536         if (limit && sfs->f_blocks > limit) {
2537                 curblock = (qctl.qc_dqblk.dqb_curspace +
2538                                 sfs->f_bsize - 1) / sfs->f_bsize;
2539                 sfs->f_blocks = limit;
2540                 sfs->f_bfree = sfs->f_bavail =
2541                         (sfs->f_blocks > curblock) ?
2542                         (sfs->f_blocks - curblock) : 0;
2543         }
2544
2545         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2546                 qctl.qc_dqblk.dqb_isoftlimit :
2547                 qctl.qc_dqblk.dqb_ihardlimit;
2548         if (limit && sfs->f_files > limit) {
2549                 sfs->f_files = limit;
2550                 sfs->f_ffree = (sfs->f_files >
2551                         qctl.qc_dqblk.dqb_curinodes) ?
2552                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2553         }
2554
2555         return 0;
2556 }
2557
2558 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2559 {
2560         struct super_block *sb = de->d_sb;
2561         struct obd_statfs osfs;
2562         __u64 fsid = huge_encode_dev(sb->s_dev);
2563         ktime_t kstart = ktime_get();
2564         int rc;
2565
2566         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2567
2568         /* Some amount of caching on the client is allowed */
2569         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2570         if (rc)
2571                 return rc;
2572
2573         statfs_unpack(sfs, &osfs);
2574
2575         /* We need to downshift for all 32-bit kernels, because we can't
2576          * tell if the kernel is being called via sys_statfs64() or not.
2577          * Stop before overflowing f_bsize - in which case it is better
2578          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2579         if (sizeof(long) < 8) {
2580                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2581                         sfs->f_bsize <<= 1;
2582
2583                         osfs.os_blocks >>= 1;
2584                         osfs.os_bfree >>= 1;
2585                         osfs.os_bavail >>= 1;
2586                 }
2587         }
2588
2589         sfs->f_blocks = osfs.os_blocks;
2590         sfs->f_bfree = osfs.os_bfree;
2591         sfs->f_bavail = osfs.os_bavail;
2592         sfs->f_fsid.val[0] = (__u32)fsid;
2593         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2594         if (ll_i2info(de->d_inode)->lli_projid)
2595                 return ll_statfs_project(de->d_inode, sfs);
2596
2597         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2598                            ktime_us_delta(ktime_get(), kstart));
2599
2600         return 0;
2601 }
2602
2603 void ll_inode_size_lock(struct inode *inode)
2604 {
2605         struct ll_inode_info *lli;
2606
2607         LASSERT(!S_ISDIR(inode->i_mode));
2608
2609         lli = ll_i2info(inode);
2610         mutex_lock(&lli->lli_size_mutex);
2611 }
2612
2613 void ll_inode_size_unlock(struct inode *inode)
2614 {
2615         struct ll_inode_info *lli;
2616
2617         lli = ll_i2info(inode);
2618         mutex_unlock(&lli->lli_size_mutex);
2619 }
2620
2621 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
2622 {
2623         /* do not clear encryption flag */
2624         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2625         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2626         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2627                 set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2628         else
2629                 clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2630 }
2631
2632 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2633 {
2634         struct ll_inode_info *lli = ll_i2info(inode);
2635         struct mdt_body *body = md->body;
2636         struct ll_sb_info *sbi = ll_i2sbi(inode);
2637         bool api32;
2638         int rc = 0;
2639
2640         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2641                 rc = cl_file_inode_init(inode, md);
2642                 if (rc)
2643                         return rc;
2644         }
2645
2646         if (S_ISDIR(inode->i_mode)) {
2647                 rc = ll_update_lsm_md(inode, md);
2648                 if (rc != 0)
2649                         return rc;
2650         }
2651
2652         if (body->mbo_valid & OBD_MD_FLACL)
2653                 lli_replace_acl(lli, md);
2654
2655         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
2656         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1, api32);
2657         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2658
2659         if (body->mbo_valid & OBD_MD_FLATIME) {
2660                 if (body->mbo_atime > inode->i_atime.tv_sec)
2661                         inode->i_atime.tv_sec = body->mbo_atime;
2662                 lli->lli_atime = body->mbo_atime;
2663         }
2664
2665         if (body->mbo_valid & OBD_MD_FLMTIME) {
2666                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2667                         CDEBUG(D_INODE,
2668                                "setting ino %lu mtime from %lld to %llu\n",
2669                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2670                                body->mbo_mtime);
2671                         inode->i_mtime.tv_sec = body->mbo_mtime;
2672                 }
2673                 lli->lli_mtime = body->mbo_mtime;
2674         }
2675
2676         if (body->mbo_valid & OBD_MD_FLCTIME) {
2677                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2678                         inode->i_ctime.tv_sec = body->mbo_ctime;
2679                 lli->lli_ctime = body->mbo_ctime;
2680         }
2681
2682         if (body->mbo_valid & OBD_MD_FLBTIME)
2683                 lli->lli_btime = body->mbo_btime;
2684
2685         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2686         if (body->mbo_valid & OBD_MD_FLFLAGS)
2687                 ll_update_inode_flags(inode, body->mbo_flags);
2688         if (body->mbo_valid & OBD_MD_FLMODE)
2689                 inode->i_mode = (inode->i_mode & S_IFMT) |
2690                                 (body->mbo_mode & ~S_IFMT);
2691
2692         if (body->mbo_valid & OBD_MD_FLTYPE)
2693                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2694                                 (body->mbo_mode & S_IFMT);
2695
2696         LASSERT(inode->i_mode != 0);
2697         if (body->mbo_valid & OBD_MD_FLUID)
2698                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2699         if (body->mbo_valid & OBD_MD_FLGID)
2700                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2701         if (body->mbo_valid & OBD_MD_FLPROJID)
2702                 lli->lli_projid = body->mbo_projid;
2703         if (body->mbo_valid & OBD_MD_FLNLINK) {
2704                 spin_lock(&inode->i_lock);
2705                 set_nlink(inode, body->mbo_nlink);
2706                 spin_unlock(&inode->i_lock);
2707         }
2708         if (body->mbo_valid & OBD_MD_FLRDEV)
2709                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2710
2711         if (body->mbo_valid & OBD_MD_FLID) {
2712                 /* FID shouldn't be changed! */
2713                 if (fid_is_sane(&lli->lli_fid)) {
2714                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2715                                  "Trying to change FID "DFID
2716                                  " to the "DFID", inode "DFID"(%p)\n",
2717                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2718                                  PFID(ll_inode2fid(inode)), inode);
2719                 } else {
2720                         lli->lli_fid = body->mbo_fid1;
2721                 }
2722         }
2723
2724         LASSERT(fid_seq(&lli->lli_fid) != 0);
2725
2726         /* In case of encrypted file without the key, please do not lose
2727          * clear text size stored into lli_lazysize in ll_merge_attr(),
2728          * we will need it in ll_prepare_close().
2729          */
2730         if (lli->lli_attr_valid & OBD_MD_FLLAZYSIZE && lli->lli_lazysize &&
2731             llcrypt_require_key(inode) == -ENOKEY)
2732                 lli->lli_attr_valid = body->mbo_valid | OBD_MD_FLLAZYSIZE;
2733         else
2734                 lli->lli_attr_valid = body->mbo_valid;
2735         if (body->mbo_valid & OBD_MD_FLSIZE) {
2736                 i_size_write(inode, body->mbo_size);
2737
2738                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2739                        PFID(ll_inode2fid(inode)),
2740                        (unsigned long long)body->mbo_size);
2741
2742                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2743                         inode->i_blocks = body->mbo_blocks;
2744         } else {
2745                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2746                         lli->lli_lazysize = body->mbo_size;
2747                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2748                         lli->lli_lazyblocks = body->mbo_blocks;
2749         }
2750
2751         if (body->mbo_valid & OBD_MD_TSTATE) {
2752                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2753                  * clear it when done to ensure to start again
2754                  * glimpsing updated attrs
2755                  */
2756                 if (body->mbo_t_state & MS_RESTORE)
2757                         set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2758                 else
2759                         clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2760         }
2761
2762         return 0;
2763 }
2764
2765 /* child default LMV is inherited from parent */
2766 static inline bool ll_default_lmv_inherited(struct lmv_stripe_md *pdmv,
2767                                             struct lmv_stripe_md *cdmv)
2768 {
2769         if (!pdmv || !cdmv)
2770                 return false;
2771
2772         if (pdmv->lsm_md_magic != cdmv->lsm_md_magic ||
2773             pdmv->lsm_md_stripe_count != cdmv->lsm_md_stripe_count ||
2774             pdmv->lsm_md_master_mdt_index != cdmv->lsm_md_master_mdt_index ||
2775             pdmv->lsm_md_hash_type != cdmv->lsm_md_hash_type)
2776                 return false;
2777
2778         if (cdmv->lsm_md_max_inherit !=
2779             lmv_inherit_next(pdmv->lsm_md_max_inherit))
2780                 return false;
2781
2782         if (cdmv->lsm_md_max_inherit_rr !=
2783             lmv_inherit_rr_next(pdmv->lsm_md_max_inherit_rr))
2784                 return false;
2785
2786         return true;
2787 }
2788
2789 /* update directory depth to ROOT, called after LOOKUP lock is fetched. */
2790 void ll_update_dir_depth(struct inode *dir, struct inode *inode)
2791 {
2792         struct ll_inode_info *plli;
2793         struct ll_inode_info *lli;
2794
2795         if (!S_ISDIR(inode->i_mode))
2796                 return;
2797
2798         if (inode == dir)
2799                 return;
2800
2801         plli = ll_i2info(dir);
2802         lli = ll_i2info(inode);
2803         lli->lli_dir_depth = plli->lli_dir_depth + 1;
2804         if (plli->lli_default_lsm_md && lli->lli_default_lsm_md) {
2805                 down_read(&plli->lli_lsm_sem);
2806                 down_read(&lli->lli_lsm_sem);
2807                 if (ll_default_lmv_inherited(plli->lli_default_lsm_md,
2808                                              lli->lli_default_lsm_md))
2809                         lli->lli_inherit_depth =
2810                                 plli->lli_inherit_depth + 1;
2811                 else
2812                         lli->lli_inherit_depth = 0;
2813                 up_read(&lli->lli_lsm_sem);
2814                 up_read(&plli->lli_lsm_sem);
2815         } else {
2816                 lli->lli_inherit_depth = 0;
2817         }
2818
2819         CDEBUG(D_INODE, DFID" depth %hu default LMV depth %hu\n",
2820                PFID(&lli->lli_fid), lli->lli_dir_depth, lli->lli_inherit_depth);
2821 }
2822
2823 void ll_truncate_inode_pages_final(struct inode *inode)
2824 {
2825         struct address_space *mapping = &inode->i_data;
2826         unsigned long nrpages;
2827         unsigned long flags;
2828
2829         truncate_inode_pages_final(mapping);
2830
2831         /* Workaround for LU-118: Note nrpages may not be totally updated when
2832          * truncate_inode_pages() returns, as there can be a page in the process
2833          * of deletion (inside __delete_from_page_cache()) in the specified
2834          * range. Thus mapping->nrpages can be non-zero when this function
2835          * returns even after truncation of the whole mapping.  Only do this if
2836          * npages isn't already zero.
2837          */
2838         nrpages = mapping->nrpages;
2839         if (nrpages) {
2840                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2841                 nrpages = mapping->nrpages;
2842                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2843         } /* Workaround end */
2844
2845         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2846                  "see https://jira.whamcloud.com/browse/LU-118\n",
2847                  ll_i2sbi(inode)->ll_fsname,
2848                  PFID(ll_inode2fid(inode)), inode, nrpages);
2849 }
2850
2851 int ll_read_inode2(struct inode *inode, void *opaque)
2852 {
2853         struct lustre_md *md = opaque;
2854         struct ll_inode_info *lli = ll_i2info(inode);
2855         int     rc;
2856         ENTRY;
2857
2858         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2859                PFID(&lli->lli_fid), inode);
2860
2861         /* Core attributes from the MDS first.  This is a new inode, and
2862          * the VFS doesn't zero times in the core inode so we have to do
2863          * it ourselves.  They will be overwritten by either MDS or OST
2864          * attributes - we just need to make sure they aren't newer.
2865          */
2866         inode->i_mtime.tv_sec = 0;
2867         inode->i_atime.tv_sec = 0;
2868         inode->i_ctime.tv_sec = 0;
2869         inode->i_rdev = 0;
2870         rc = ll_update_inode(inode, md);
2871         if (rc != 0)
2872                 RETURN(rc);
2873
2874         /* OIDEBUG(inode); */
2875
2876 #ifdef HAVE_BACKING_DEV_INFO
2877         /* initializing backing dev info. */
2878         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2879 #endif
2880         if (S_ISREG(inode->i_mode)) {
2881                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2882                 inode->i_op = &ll_file_inode_operations;
2883                 inode->i_fop = sbi->ll_fop;
2884                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2885                 EXIT;
2886         } else if (S_ISDIR(inode->i_mode)) {
2887                 inode->i_op = &ll_dir_inode_operations;
2888                 inode->i_fop = &ll_dir_operations;
2889                 EXIT;
2890         } else if (S_ISLNK(inode->i_mode)) {
2891                 inode->i_op = &ll_fast_symlink_inode_operations;
2892                 EXIT;
2893         } else {
2894                 inode->i_op = &ll_special_inode_operations;
2895
2896                 init_special_inode(inode, inode->i_mode,
2897                                    inode->i_rdev);
2898
2899                 EXIT;
2900         }
2901
2902         return 0;
2903 }
2904
2905 void ll_delete_inode(struct inode *inode)
2906 {
2907         struct ll_inode_info *lli = ll_i2info(inode);
2908         ENTRY;
2909
2910         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2911                 /* It is last chance to write out dirty pages,
2912                  * otherwise we may lose data while umount.
2913                  *
2914                  * If i_nlink is 0 then just discard data. This is safe because
2915                  * local inode gets i_nlink 0 from server only for the last
2916                  * unlink, so that file is not opened somewhere else
2917                  */
2918                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2919                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2920         }
2921
2922         ll_truncate_inode_pages_final(inode);
2923         ll_clear_inode(inode);
2924         clear_inode(inode);
2925
2926         EXIT;
2927 }
2928
2929 int ll_iocontrol(struct inode *inode, struct file *file,
2930                  unsigned int cmd, unsigned long arg)
2931 {
2932         struct ll_sb_info *sbi = ll_i2sbi(inode);
2933         struct ptlrpc_request *req = NULL;
2934         int rc, flags = 0;
2935         ENTRY;
2936
2937         switch (cmd) {
2938         case FS_IOC_GETFLAGS: {
2939                 struct mdt_body *body;
2940                 struct md_op_data *op_data;
2941
2942                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2943                                              0, 0, LUSTRE_OPC_ANY,
2944                                              NULL);
2945                 if (IS_ERR(op_data))
2946                         RETURN(PTR_ERR(op_data));
2947
2948                 op_data->op_valid = OBD_MD_FLFLAGS;
2949                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2950                 ll_finish_md_op_data(op_data);
2951                 if (rc) {
2952                         CERROR("%s: failure inode "DFID": rc = %d\n",
2953                                sbi->ll_md_exp->exp_obd->obd_name,
2954                                PFID(ll_inode2fid(inode)), rc);
2955                         RETURN(-abs(rc));
2956                 }
2957
2958                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2959
2960                 flags = body->mbo_flags;
2961
2962                 ptlrpc_req_finished(req);
2963
2964                 RETURN(put_user(flags, (int __user *)arg));
2965         }
2966         case FS_IOC_SETFLAGS: {
2967                 struct iattr *attr;
2968                 struct md_op_data *op_data;
2969                 struct cl_object *obj;
2970                 struct fsxattr fa = { 0 };
2971
2972                 if (get_user(flags, (int __user *)arg))
2973                         RETURN(-EFAULT);
2974
2975                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2976                 if (flags & LUSTRE_PROJINHERIT_FL)
2977                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2978
2979                 rc = ll_ioctl_check_project(inode, fa.fsx_xflags,
2980                                             fa.fsx_projid);
2981                 if (rc)
2982                         RETURN(rc);
2983
2984                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2985                                              LUSTRE_OPC_ANY, NULL);
2986                 if (IS_ERR(op_data))
2987                         RETURN(PTR_ERR(op_data));
2988
2989                 op_data->op_attr_flags = flags;
2990                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2991                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2992                 ll_finish_md_op_data(op_data);
2993                 ptlrpc_req_finished(req);
2994                 if (rc)
2995                         RETURN(rc);
2996
2997                 ll_update_inode_flags(inode, flags);
2998
2999                 obj = ll_i2info(inode)->lli_clob;
3000                 if (obj == NULL)
3001                         RETURN(0);
3002
3003                 OBD_ALLOC_PTR(attr);
3004                 if (attr == NULL)
3005                         RETURN(-ENOMEM);
3006
3007                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
3008
3009                 OBD_FREE_PTR(attr);
3010                 RETURN(rc);
3011         }
3012         default:
3013                 RETURN(-ENOSYS);
3014         }
3015
3016         RETURN(0);
3017 }
3018
3019 int ll_flush_ctx(struct inode *inode)
3020 {
3021         struct ll_sb_info  *sbi = ll_i2sbi(inode);
3022
3023         CDEBUG(D_SEC, "flush context for user %d\n",
3024                from_kuid(&init_user_ns, current_uid()));
3025
3026         obd_set_info_async(NULL, sbi->ll_md_exp,
3027                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
3028                            0, NULL, NULL);
3029         obd_set_info_async(NULL, sbi->ll_dt_exp,
3030                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
3031                            0, NULL, NULL);
3032         return 0;
3033 }
3034
3035 /* umount -f client means force down, don't save state */
3036 void ll_umount_begin(struct super_block *sb)
3037 {
3038         struct ll_sb_info *sbi = ll_s2sbi(sb);
3039         struct obd_device *obd;
3040         struct obd_ioctl_data *ioc_data;
3041         int cnt;
3042         ENTRY;
3043
3044         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
3045                sb->s_count, atomic_read(&sb->s_active));
3046
3047         obd = class_exp2obd(sbi->ll_md_exp);
3048         if (obd == NULL) {
3049                 CERROR("Invalid MDC connection handle %#llx\n",
3050                        sbi->ll_md_exp->exp_handle.h_cookie);
3051                 EXIT;
3052                 return;
3053         }
3054         obd->obd_force = 1;
3055
3056         obd = class_exp2obd(sbi->ll_dt_exp);
3057         if (obd == NULL) {
3058                 CERROR("Invalid LOV connection handle %#llx\n",
3059                        sbi->ll_dt_exp->exp_handle.h_cookie);
3060                 EXIT;
3061                 return;
3062         }
3063         obd->obd_force = 1;
3064
3065         OBD_ALLOC_PTR(ioc_data);
3066         if (ioc_data) {
3067                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
3068                               sizeof *ioc_data, ioc_data, NULL);
3069
3070                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
3071                               sizeof *ioc_data, ioc_data, NULL);
3072
3073                 OBD_FREE_PTR(ioc_data);
3074         }
3075
3076         /* Really, we'd like to wait until there are no requests outstanding,
3077          * and then continue.  For now, we just periodically checking for vfs
3078          * to decrement mnt_cnt and hope to finish it within 10sec.
3079          */
3080         cnt = 10;
3081         while (cnt > 0 &&
3082                !may_umount(sbi->ll_mnt.mnt)) {
3083                 ssleep(1);
3084                 cnt -= 1;
3085         }
3086
3087         EXIT;
3088 }
3089
3090 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
3091 {
3092         struct ll_sb_info *sbi = ll_s2sbi(sb);
3093         char *profilenm = get_profile_name(sb);
3094         int err;
3095         __u32 read_only;
3096
3097         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
3098                 read_only = *flags & MS_RDONLY;
3099                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
3100                                          sizeof(KEY_READ_ONLY),
3101                                          KEY_READ_ONLY, sizeof(read_only),
3102                                          &read_only, NULL);
3103                 if (err) {
3104                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
3105                                       profilenm, read_only ?
3106                                       "read-only" : "read-write", err);
3107                         return err;
3108                 }
3109
3110                 if (read_only)
3111                         sb->s_flags |= SB_RDONLY;
3112                 else
3113                         sb->s_flags &= ~SB_RDONLY;
3114
3115                 if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
3116                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
3117                                       read_only ?  "read-only" : "read-write");
3118         }
3119         return 0;
3120 }
3121
3122 /**
3123  * Cleanup the open handle that is cached on MDT-side.
3124  *
3125  * For open case, the client side open handling thread may hit error
3126  * after the MDT grant the open. Under such case, the client should
3127  * send close RPC to the MDT as cleanup; otherwise, the open handle
3128  * on the MDT will be leaked there until the client umount or evicted.
3129  *
3130  * In further, if someone unlinked the file, because the open handle
3131  * holds the reference on such file/object, then it will block the
3132  * subsequent threads that want to locate such object via FID.
3133  *
3134  * \param[in] sb        super block for this file-system
3135  * \param[in] open_req  pointer to the original open request
3136  */
3137 void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill)
3138 {
3139         struct mdt_body                 *body;
3140         struct md_op_data               *op_data;
3141         struct ptlrpc_request           *close_req = NULL;
3142         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
3143         ENTRY;
3144
3145         body = req_capsule_server_get(pill, &RMF_MDT_BODY);
3146         OBD_ALLOC_PTR(op_data);
3147         if (op_data == NULL) {
3148                 CWARN("%s: cannot allocate op_data to release open handle for "
3149                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
3150
3151                 RETURN_EXIT;
3152         }
3153
3154         op_data->op_fid1 = body->mbo_fid1;
3155         op_data->op_open_handle = body->mbo_open_handle;
3156         op_data->op_mod_time = ktime_get_real_seconds();
3157         md_close(exp, op_data, NULL, &close_req);
3158         ptlrpc_req_finished(close_req);
3159         ll_finish_md_op_data(op_data);
3160
3161         EXIT;
3162 }
3163
3164 /* set filesystem-wide default LMV for subdir mount if it's enabled on ROOT. */
3165 static int ll_fileset_default_lmv_fixup(struct inode *inode,
3166                                         struct lustre_md *md)
3167 {
3168         struct ll_sb_info *sbi = ll_i2sbi(inode);
3169         struct ptlrpc_request *req = NULL;
3170         union lmv_mds_md *lmm = NULL;
3171         int size = 0;
3172         int rc;
3173
3174         LASSERT(is_root_inode(inode));
3175         LASSERT(!fid_is_root(&sbi->ll_root_fid));
3176         LASSERT(!md->default_lmv);
3177
3178         rc = ll_dir_get_default_layout(inode, (void **)&lmm, &size, &req,
3179                                        OBD_MD_DEFAULT_MEA,
3180                                        GET_DEFAULT_LAYOUT_ROOT);
3181         if (rc && rc != -ENODATA)
3182                 GOTO(out, rc);
3183
3184         rc = 0;
3185         if (lmm && size) {
3186                 rc = md_unpackmd(sbi->ll_md_exp, &md->default_lmv, lmm, size);
3187                 if (rc < 0)
3188                         GOTO(out, rc);
3189
3190                 rc = 0;
3191         }
3192         EXIT;
3193 out:
3194         if (req)
3195                 ptlrpc_req_finished(req);
3196         return rc;
3197 }
3198
3199 int ll_prep_inode(struct inode **inode, struct req_capsule *pill,
3200                   struct super_block *sb, struct lookup_intent *it)
3201 {
3202         struct ll_sb_info *sbi = NULL;
3203         struct lustre_md md = { NULL };
3204         bool default_lmv_deleted = false;
3205         int rc;
3206
3207         ENTRY;
3208
3209         LASSERT(*inode || sb);
3210         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
3211         rc = md_get_lustre_md(sbi->ll_md_exp, pill, sbi->ll_dt_exp,
3212                               sbi->ll_md_exp, &md);
3213         if (rc != 0)
3214                 GOTO(out, rc);
3215
3216         /*
3217          * clear default_lmv only if intent_getattr reply doesn't contain it.
3218          * but it needs to be done after iget, check this early because
3219          * ll_update_lsm_md() may change md.
3220          */
3221         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
3222             S_ISDIR(md.body->mbo_mode) && !md.default_lmv) {
3223                 if (unlikely(*inode && is_root_inode(*inode) &&
3224                              !fid_is_root(&sbi->ll_root_fid))) {
3225                         rc = ll_fileset_default_lmv_fixup(*inode, &md);
3226                         if (rc)
3227                                 GOTO(out, rc);
3228                 }
3229
3230                 if (!md.default_lmv)
3231                         default_lmv_deleted = true;
3232         }
3233
3234         if (*inode) {
3235                 rc = ll_update_inode(*inode, &md);
3236                 if (rc != 0)
3237                         GOTO(out, rc);
3238         } else {
3239                 bool api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
3240                 struct lu_fid *fid1 = &md.body->mbo_fid1;
3241
3242                 LASSERT(sb != NULL);
3243
3244                 /*
3245                  * At this point server returns to client's same fid as client
3246                  * generated for creating. So using ->fid1 is okay here.
3247                  */
3248                 if (!fid_is_sane(fid1)) {
3249                         CERROR("%s: Fid is insane "DFID"\n",
3250                                 sbi->ll_fsname, PFID(fid1));
3251                         GOTO(out, rc = -EINVAL);
3252                 }
3253
3254                 *inode = ll_iget(sb, cl_fid_build_ino(fid1, api32), &md);
3255                 if (IS_ERR(*inode)) {
3256                         lmd_clear_acl(&md);
3257                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
3258                         *inode = NULL;
3259                         CERROR("new_inode -fatal: rc %d\n", rc);
3260                         GOTO(out, rc);
3261                 }
3262         }
3263
3264         /* Handling piggyback layout lock.
3265          * Layout lock can be piggybacked by getattr and open request.
3266          * The lsm can be applied to inode only if it comes with a layout lock
3267          * otherwise correct layout may be overwritten, for example:
3268          * 1. proc1: mdt returns a lsm but not granting layout
3269          * 2. layout was changed by another client
3270          * 3. proc2: refresh layout and layout lock granted
3271          * 4. proc1: to apply a stale layout */
3272         if (it != NULL && it->it_lock_mode != 0) {
3273                 struct lustre_handle lockh;
3274                 struct ldlm_lock *lock;
3275
3276                 lockh.cookie = it->it_lock_handle;
3277                 lock = ldlm_handle2lock(&lockh);
3278                 LASSERT(lock != NULL);
3279                 if (ldlm_has_layout(lock)) {
3280                         struct cl_object_conf conf;
3281
3282                         memset(&conf, 0, sizeof(conf));
3283                         conf.coc_opc = OBJECT_CONF_SET;
3284                         conf.coc_inode = *inode;
3285                         conf.coc_lock = lock;
3286                         conf.u.coc_layout = md.layout;
3287                         (void)ll_layout_conf(*inode, &conf);
3288                 }
3289                 LDLM_LOCK_PUT(lock);
3290         }
3291
3292         if (default_lmv_deleted)
3293                 ll_update_default_lsm_md(*inode, &md);
3294
3295         /* we may want to apply some policy for foreign file/dir */
3296         if (ll_sbi_has_foreign_symlink(sbi)) {
3297                 rc = ll_manage_foreign(*inode, &md);
3298                 if (rc < 0)
3299                         GOTO(out, rc);
3300         }
3301
3302         GOTO(out, rc = 0);
3303
3304 out:
3305         /* cleanup will be done if necessary */
3306         md_free_lustre_md(sbi->ll_md_exp, &md);
3307
3308         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
3309                 ll_intent_drop_lock(it);
3310                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, pill);
3311         }
3312
3313         return rc;
3314 }
3315
3316 int ll_obd_statfs(struct inode *inode, void __user *arg)
3317 {
3318         struct ll_sb_info *sbi = NULL;
3319         struct obd_export *exp;
3320         struct obd_ioctl_data *data = NULL;
3321         __u32 type;
3322         int len = 0, rc;
3323
3324         if (inode)
3325                 sbi = ll_i2sbi(inode);
3326         if (!sbi)
3327                 GOTO(out_statfs, rc = -EINVAL);
3328
3329         rc = obd_ioctl_getdata(&data, &len, arg);
3330         if (rc)
3331                 GOTO(out_statfs, rc);
3332
3333         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3334             !data->ioc_pbuf1 || !data->ioc_pbuf2)
3335                 GOTO(out_statfs, rc = -EINVAL);
3336
3337         if (data->ioc_inllen1 != sizeof(__u32) ||
3338             data->ioc_inllen2 != sizeof(__u32) ||
3339             data->ioc_plen1 != sizeof(struct obd_statfs) ||
3340             data->ioc_plen2 != sizeof(struct obd_uuid))
3341                 GOTO(out_statfs, rc = -EINVAL);
3342
3343         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3344         if (type & LL_STATFS_LMV)
3345                 exp = sbi->ll_md_exp;
3346         else if (type & LL_STATFS_LOV)
3347                 exp = sbi->ll_dt_exp;
3348         else
3349                 GOTO(out_statfs, rc = -ENODEV);
3350
3351         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3352         if (rc)
3353                 GOTO(out_statfs, rc);
3354 out_statfs:
3355         OBD_FREE_LARGE(data, len);
3356         return rc;
3357 }
3358
3359 /*
3360  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3361  * be called early to avoid deadlock.
3362  */
3363 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3364 {
3365         if (op_data->op_mea2_sem) {
3366                 up_read_non_owner(op_data->op_mea2_sem);
3367                 op_data->op_mea2_sem = NULL;
3368         }
3369
3370         if (op_data->op_mea1_sem) {
3371                 up_read_non_owner(op_data->op_mea1_sem);
3372                 op_data->op_mea1_sem = NULL;
3373         }
3374 }
3375
3376 /* this function prepares md_op_data hint for passing it down to MD stack. */
3377 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3378                                       struct inode *i1, struct inode *i2,
3379                                       const char *name, size_t namelen,
3380                                       __u32 mode, enum md_op_code opc,
3381                                       void *data)
3382 {
3383         struct llcrypt_name fname = { 0 };
3384         int rc;
3385
3386         LASSERT(i1 != NULL);
3387
3388         if (name == NULL) {
3389                 /* Do not reuse namelen for something else. */
3390                 if (namelen != 0)
3391                         return ERR_PTR(-EINVAL);
3392         } else {
3393                 if ((!IS_ENCRYPTED(i1) ||
3394                      (opc != LUSTRE_OPC_LOOKUP && opc != LUSTRE_OPC_CREATE)) &&
3395                     namelen > ll_i2sbi(i1)->ll_namelen)
3396                         return ERR_PTR(-ENAMETOOLONG);
3397
3398                 /* "/" is not valid name, but it's allowed */
3399                 if (!lu_name_is_valid_2(name, namelen) &&
3400                     strncmp("/", name, namelen) != 0)
3401                         return ERR_PTR(-EINVAL);
3402         }
3403
3404         if (op_data == NULL)
3405                 OBD_ALLOC_PTR(op_data);
3406
3407         if (op_data == NULL)
3408                 return ERR_PTR(-ENOMEM);
3409
3410         ll_i2gids(op_data->op_suppgids, i1, i2);
3411         /* If the client is using a subdir mount and looks at what it sees as
3412          * /.fscrypt, interpret it as the .fscrypt dir at the root of the fs.
3413          */
3414         if (unlikely(i1->i_sb && i1->i_sb->s_root && is_root_inode(i1) &&
3415                      !fid_is_root(ll_inode2fid(i1)) &&
3416                      name && namelen == strlen(dot_fscrypt_name) &&
3417                      strncmp(name, dot_fscrypt_name, namelen) == 0))
3418                 lu_root_fid(&op_data->op_fid1);
3419         else
3420                 op_data->op_fid1 = *ll_inode2fid(i1);
3421
3422         if (S_ISDIR(i1->i_mode)) {
3423                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3424                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3425                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3426                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3427         }
3428
3429         if (i2) {
3430                 op_data->op_fid2 = *ll_inode2fid(i2);
3431                 if (S_ISDIR(i2->i_mode)) {
3432                         if (i2 != i1) {
3433                                 /* i2 is typically a child of i1, and MUST be
3434                                  * further from the root to avoid deadlocks.
3435                                  */
3436                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3437                                 op_data->op_mea2_sem =
3438                                                 &ll_i2info(i2)->lli_lsm_sem;
3439                         }
3440                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3441                 }
3442         } else {
3443                 fid_zero(&op_data->op_fid2);
3444         }
3445
3446         if (test_bit(LL_SBI_64BIT_HASH, ll_i2sbi(i1)->ll_flags))
3447                 op_data->op_cli_flags |= CLI_HASH64;
3448
3449         if (ll_need_32bit_api(ll_i2sbi(i1)))
3450                 op_data->op_cli_flags |= CLI_API32;
3451
3452         if ((i2 && is_root_inode(i2)) ||
3453             opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_CREATE) {
3454                 /* In case of lookup, ll_setup_filename() has already been
3455                  * called in ll_lookup_it(), so just take provided name.
3456                  * Also take provided name if we are dealing with root inode.
3457                  */
3458                 fname.disk_name.name = (unsigned char *)name;
3459                 fname.disk_name.len = namelen;
3460         } else if (name && namelen) {
3461                 struct qstr dname = QSTR_INIT(name, namelen);
3462                 struct inode *dir;
3463                 struct lu_fid *pfid = NULL;
3464                 struct lu_fid fid;
3465                 int lookup;
3466
3467                 if (!S_ISDIR(i1->i_mode) && i2 && S_ISDIR(i2->i_mode)) {
3468                         /* special case when called from ll_link() */
3469                         dir = i2;
3470                         lookup = 0;
3471                 } else {
3472                         dir = i1;
3473                         lookup = (int)(opc == LUSTRE_OPC_ANY);
3474                 }
3475                 if (opc == LUSTRE_OPC_ANY && lookup)
3476                         pfid = &fid;
3477                 rc = ll_setup_filename(dir, &dname, lookup, &fname, pfid);
3478                 if (rc) {
3479                         ll_finish_md_op_data(op_data);
3480                         return ERR_PTR(rc);
3481                 }
3482                 if (pfid && !fid_is_zero(pfid)) {
3483                         if (i2 == NULL)
3484                                 op_data->op_fid2 = fid;
3485                         op_data->op_bias = MDS_FID_OP;
3486                 }
3487                 if (fname.disk_name.name &&
3488                     fname.disk_name.name != (unsigned char *)name) {
3489                         /* op_data->op_name must be freed after use */
3490                         op_data->op_flags |= MF_OPNAME_KMALLOCED;
3491                 }
3492         }
3493
3494         /* In fact LUSTRE_OPC_LOOKUP, LUSTRE_OPC_OPEN
3495          * are LUSTRE_OPC_ANY
3496          */
3497         if (opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_OPEN)
3498                 op_data->op_code = LUSTRE_OPC_ANY;
3499         else
3500                 op_data->op_code = opc;
3501         op_data->op_name = fname.disk_name.name;
3502         op_data->op_namelen = fname.disk_name.len;
3503         op_data->op_mode = mode;
3504         op_data->op_mod_time = ktime_get_real_seconds();
3505         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3506         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3507         op_data->op_cap = current_cap();
3508         op_data->op_mds = 0;
3509         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3510              filename_is_volatile(name, namelen, &op_data->op_mds)) {
3511                 op_data->op_bias |= MDS_CREATE_VOLATILE;
3512         }
3513         op_data->op_data = data;
3514
3515         return op_data;
3516 }
3517
3518 void ll_finish_md_op_data(struct md_op_data *op_data)
3519 {
3520         ll_unlock_md_op_lsm(op_data);
3521         ll_security_release_secctx(op_data->op_file_secctx,
3522                                    op_data->op_file_secctx_size);
3523         if (op_data->op_flags & MF_OPNAME_KMALLOCED)
3524                 /* allocated via ll_setup_filename called
3525                  * from ll_prep_md_op_data
3526                  */
3527                 kfree(op_data->op_name);
3528         llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3529         OBD_FREE_PTR(op_data);
3530 }
3531
3532 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3533 {
3534         struct ll_sb_info *sbi;
3535         int i;
3536
3537         LASSERT(seq && dentry);
3538         sbi = ll_s2sbi(dentry->d_sb);
3539
3540         if (test_bit(LL_SBI_NOLCK, sbi->ll_flags))
3541                 seq_puts(seq, "nolock");
3542
3543         for (i = 1; ll_sbi_flags_name[i].token != LL_SBI_NUM_MOUNT_OPT; i++) {
3544                 /* match_table in some cases has patterns for both enabled and
3545                  * disabled cases. Ignore 'no'xxx versions if bit is set.
3546                  */
3547                 if (test_bit(ll_sbi_flags_name[i].token, sbi->ll_flags) &&
3548                     strncmp(ll_sbi_flags_name[i].pattern, "no", 2)) {
3549                         if (ll_sbi_flags_name[i].token ==
3550                             LL_SBI_FOREIGN_SYMLINK) {
3551                                 seq_show_option(seq, "foreign_symlink",
3552                                                 sbi->ll_foreign_symlink_prefix);
3553                         } else {
3554                                 seq_printf(seq, ",%s",
3555                                            ll_sbi_flags_name[i].pattern);
3556                         }
3557
3558                         /* You can have either localflock or flock but not
3559                          * both. If localflock is set don't print flock or
3560                          * noflock.
3561                          */
3562                         if (ll_sbi_flags_name[i].token == LL_SBI_LOCALFLOCK)
3563                                 i += 2;
3564                 } else if (!test_bit(ll_sbi_flags_name[i].token, sbi->ll_flags) &&
3565                            !strncmp(ll_sbi_flags_name[i].pattern, "no", 2)) {
3566                         seq_printf(seq, ",%s",
3567                                    ll_sbi_flags_name[i].pattern);
3568                 }
3569         }
3570
3571         llcrypt_show_test_dummy_encryption(seq, ',', dentry->d_sb);
3572
3573         RETURN(0);
3574 }
3575
3576 /**
3577  * Get obd name by cmd, and copy out to user space
3578  */
3579 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3580 {
3581         struct ll_sb_info *sbi = ll_i2sbi(inode);
3582         struct obd_device *obd;
3583         ENTRY;
3584
3585         if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
3586                 obd = class_exp2obd(sbi->ll_dt_exp);
3587         else if (cmd == OBD_IOC_GETMDNAME)
3588                 obd = class_exp2obd(sbi->ll_md_exp);
3589         else
3590                 RETURN(-EINVAL);
3591
3592         if (!obd)
3593                 RETURN(-ENOENT);
3594
3595         if (copy_to_user((void __user *)arg, obd->obd_name,
3596                          strlen(obd->obd_name) + 1))
3597                 RETURN(-EFAULT);
3598
3599         RETURN(0);
3600 }
3601
3602 struct dname_buf {
3603         struct work_struct db_work;
3604         struct dentry *db_dentry;
3605         /* Let's hope the path is not too long, 32 bytes for the work struct
3606          * on my kernel
3607          */
3608         char buf[PAGE_SIZE - sizeof(struct work_struct) - sizeof(void *)];
3609 };
3610
3611 static void ll_dput_later(struct work_struct *work)
3612 {
3613         struct dname_buf *db = container_of(work, struct dname_buf, db_work);
3614
3615         dput(db->db_dentry);
3616         free_page((unsigned long)db);
3617 }
3618
3619 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3620 {
3621         char *path = NULL;
3622
3623         struct path p;
3624
3625         p.dentry = dentry;
3626         p.mnt = current->fs->root.mnt;
3627         path_get(&p);
3628         path = d_path(&p, buf, bufsize);
3629         path_put(&p);
3630         return path;
3631 }
3632
3633 void ll_dirty_page_discard_warn(struct inode *inode, int ioret)
3634 {
3635         struct dname_buf *db;
3636         char  *path = NULL;
3637         struct dentry *dentry = NULL;
3638
3639         /* this can be called inside spin lock so use GFP_ATOMIC. */
3640         db = (struct dname_buf *)__get_free_page(GFP_ATOMIC);
3641         if (db != NULL) {
3642
3643                 dentry = d_find_alias(inode);
3644                 if (dentry != NULL)
3645                         path = ll_d_path(dentry, db->buf, sizeof(db->buf));
3646         }
3647
3648         /* The below message is checked in recovery-small.sh test_24b */
3649         CDEBUG(D_WARNING,
3650                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3651                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3652                s2lsi(inode->i_sb)->lsi_lmd->lmd_dev,
3653                PFID(ll_inode2fid(inode)),
3654                (path && !IS_ERR(path)) ? path : "", ioret);
3655
3656         if (dentry != NULL) {
3657                 /* We cannot dput here since if we happen to be the last holder
3658                  * then we can end up waiting for page evictions that
3659                  * in turn wait for RPCs that need this instance of ptlrpcd
3660                  * (callng brw_interpret->*page_completion*->vmpage_error->here)
3661                  * LU-15340
3662                  */
3663                 INIT_WORK(&db->db_work, ll_dput_later);
3664                 db->db_dentry = dentry;
3665                 schedule_work(&db->db_work);
3666         } else {
3667                 if (db != NULL)
3668                         free_page((unsigned long)db);
3669         }
3670 }
3671
3672 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3673                         struct lov_user_md **kbuf)
3674 {
3675         struct lov_user_md      lum;
3676         ssize_t                 lum_size;
3677         ENTRY;
3678
3679         if (copy_from_user(&lum, md, sizeof(lum)))
3680                 RETURN(-EFAULT);
3681
3682         lum_size = ll_lov_user_md_size(&lum);
3683         if (lum_size < 0)
3684                 RETURN(lum_size);
3685
3686         OBD_ALLOC_LARGE(*kbuf, lum_size);
3687         if (*kbuf == NULL)
3688                 RETURN(-ENOMEM);
3689
3690         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3691                 OBD_FREE_LARGE(*kbuf, lum_size);
3692                 RETURN(-EFAULT);
3693         }
3694
3695         RETURN(lum_size);
3696 }
3697
3698 /*
3699  * Compute llite root squash state after a change of root squash
3700  * configuration setting or add/remove of a lnet nid
3701  */
3702 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3703 {
3704         struct root_squash_info *squash = &sbi->ll_squash;
3705         int i;
3706         bool matched;
3707         struct lnet_processid id;
3708
3709         /* Update norootsquash flag */
3710         spin_lock(&squash->rsi_lock);
3711         if (list_empty(&squash->rsi_nosquash_nids))
3712                 clear_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3713         else {
3714                 /* Do not apply root squash as soon as one of our NIDs is
3715                  * in the nosquash_nids list */
3716                 matched = false;
3717                 i = 0;
3718                 while (LNetGetId(i++, &id) != -ENOENT) {
3719                         if (nid_is_lo0(&id.nid))
3720                                 continue;
3721                         if (cfs_match_nid(lnet_nid_to_nid4(&id.nid),
3722                                           &squash->rsi_nosquash_nids)) {
3723                                 matched = true;
3724                                 break;
3725                         }
3726                 }
3727                 if (matched)
3728                         set_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3729                 else
3730                         clear_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3731         }
3732         spin_unlock(&squash->rsi_lock);
3733 }
3734
3735 /**
3736  * Parse linkea content to extract information about a given hardlink
3737  *
3738  * \param[in]   ldata      - Initialized linkea data
3739  * \param[in]   linkno     - Link identifier
3740  * \param[out]  parent_fid - The entry's parent FID
3741  * \param[out]  ln         - Entry name destination buffer
3742  *
3743  * \retval 0 on success
3744  * \retval Appropriate negative error code on failure
3745  */
3746 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3747                             struct lu_fid *parent_fid, struct lu_name *ln)
3748 {
3749         unsigned int    idx;
3750         int             rc;
3751         ENTRY;
3752
3753         rc = linkea_init_with_rec(ldata);
3754         if (rc < 0)
3755                 RETURN(rc);
3756
3757         if (linkno >= ldata->ld_leh->leh_reccount)
3758                 /* beyond last link */
3759                 RETURN(-ENODATA);
3760
3761         linkea_first_entry(ldata);
3762         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3763                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3764                                     parent_fid);
3765                 if (idx == linkno)
3766                         break;
3767
3768                 linkea_next_entry(ldata);
3769         }
3770
3771         if (idx < linkno)
3772                 RETURN(-ENODATA);
3773
3774         RETURN(0);
3775 }
3776
3777 /**
3778  * Get parent FID and name of an identified link. Operation is performed for
3779  * a given link number, letting the caller iterate over linkno to list one or
3780  * all links of an entry.
3781  *
3782  * \param[in]     file - File descriptor against which to perform the operation
3783  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3784  *                       on and the available size. It is eventually filled with
3785  *                       the requested information or left untouched on error
3786  *
3787  * \retval - 0 on success
3788  * \retval - Appropriate negative error code on failure
3789  */
3790 int ll_getparent(struct file *file, struct getparent __user *arg)
3791 {
3792         struct inode            *inode = file_inode(file);
3793         struct linkea_data      *ldata;
3794         struct lu_buf            buf = LU_BUF_NULL;
3795         struct lu_name           ln;
3796         struct lu_fid            parent_fid;
3797         __u32                    linkno;
3798         __u32                    name_size;
3799         int                      rc;
3800
3801         ENTRY;
3802
3803         if (!capable(CAP_DAC_READ_SEARCH) &&
3804             !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
3805                 RETURN(-EPERM);
3806
3807         if (get_user(name_size, &arg->gp_name_size))
3808                 RETURN(-EFAULT);
3809
3810         if (get_user(linkno, &arg->gp_linkno))
3811                 RETURN(-EFAULT);
3812
3813         if (name_size > PATH_MAX)
3814                 RETURN(-EINVAL);
3815
3816         OBD_ALLOC(ldata, sizeof(*ldata));
3817         if (ldata == NULL)
3818                 RETURN(-ENOMEM);
3819
3820         rc = linkea_data_new(ldata, &buf);
3821         if (rc < 0)
3822                 GOTO(ldata_free, rc);
3823
3824         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3825                            buf.lb_len, OBD_MD_FLXATTR);
3826         if (rc < 0)
3827                 GOTO(lb_free, rc);
3828
3829         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3830         if (rc < 0)
3831                 GOTO(lb_free, rc);
3832
3833         if (ln.ln_namelen >= name_size)
3834                 GOTO(lb_free, rc = -EOVERFLOW);
3835
3836         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3837                 GOTO(lb_free, rc = -EFAULT);
3838
3839         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3840                 GOTO(lb_free, rc = -EFAULT);
3841
3842         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3843                 GOTO(lb_free, rc = -EFAULT);
3844
3845 lb_free:
3846         lu_buf_free(&buf);
3847 ldata_free:
3848         OBD_FREE(ldata, sizeof(*ldata));
3849
3850         RETURN(rc);
3851 }