Whamcloud - gitweb
LU-14541 llite: reenable fast_read by default
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/llite/llite_lib.c
32  *
33  * Lustre Light Super operations
34  */
35
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include <linux/cpu.h>
39 #include <linux/module.h>
40 #include <linux/random.h>
41 #include <linux/statfs.h>
42 #include <linux/time.h>
43 #include <linux/file.h>
44 #include <linux/types.h>
45 #include <libcfs/linux/linux-uuid.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48 #include <linux/user_namespace.h>
49 #include <linux/delay.h>
50 #include <linux/uidgid.h>
51 #include <linux/fs_struct.h>
52
53 #ifndef HAVE_CPUS_READ_LOCK
54 #include <libcfs/linux/linux-cpu.h>
55 #endif
56 #include <libcfs/linux/linux-misc.h>
57 #include <uapi/linux/lustre/lustre_ioctl.h>
58 #ifdef HAVE_UAPI_LINUX_MOUNT_H
59 #include <uapi/linux/mount.h>
60 #endif
61
62 #include <lustre_ha.h>
63 #include <lustre_dlm.h>
64 #include <lprocfs_status.h>
65 #include <lustre_disk.h>
66 #include <uapi/linux/lustre/lustre_param.h>
67 #include <lustre_log.h>
68 #include <cl_object.h>
69 #include <obd_cksum.h>
70 #include "llite_internal.h"
71
72 struct kmem_cache *ll_file_data_slab;
73
74 #ifndef log2
75 #define log2(n) ffz(~(n))
76 #endif
77
78 /**
79  * If there is only one number of core visible to Lustre,
80  * async readahead will be disabled, to avoid massive over
81  * subscription, we use 1/2 of active cores as default max
82  * async readahead requests.
83  */
84 static inline unsigned int ll_get_ra_async_max_active(void)
85 {
86         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
87 }
88
89 static struct ll_sb_info *ll_init_sbi(void)
90 {
91         struct ll_sb_info *sbi = NULL;
92         unsigned long pages;
93         unsigned long lru_page_max;
94         struct sysinfo si;
95         int rc;
96
97         ENTRY;
98
99         OBD_ALLOC_PTR(sbi);
100         if (sbi == NULL)
101                 RETURN(ERR_PTR(-ENOMEM));
102
103         rc = pcc_super_init(&sbi->ll_pcc_super);
104         if (rc < 0)
105                 GOTO(out_sbi, rc);
106
107         spin_lock_init(&sbi->ll_lock);
108         mutex_init(&sbi->ll_lco.lco_lock);
109         spin_lock_init(&sbi->ll_pp_extent_lock);
110         spin_lock_init(&sbi->ll_process_lock);
111         sbi->ll_rw_stats_on = 0;
112         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
113
114         si_meminfo(&si);
115         pages = si.totalram - si.totalhigh;
116         lru_page_max = pages / 2;
117
118         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
119         sbi->ll_ra_info.ll_readahead_wq =
120                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
121                                        0, CFS_CPT_ANY,
122                                        sbi->ll_ra_info.ra_async_max_active);
123         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
124                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
125
126         /* initialize ll_cache data */
127         sbi->ll_cache = cl_cache_init(lru_page_max);
128         if (sbi->ll_cache == NULL)
129                 GOTO(out_destroy_ra, rc = -ENOMEM);
130
131         /* initialize foreign symlink prefix path */
132         OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
133         if (sbi->ll_foreign_symlink_prefix == NULL)
134                 GOTO(out_destroy_ra, rc = -ENOMEM);
135         memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
136         sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
137
138         /* initialize foreign symlink upcall path, none by default */
139         OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
140         if (sbi->ll_foreign_symlink_upcall == NULL)
141                 GOTO(out_destroy_ra, rc = -ENOMEM);
142         memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
143         sbi->ll_foreign_symlink_upcall_items = NULL;
144         sbi->ll_foreign_symlink_upcall_nb_items = 0;
145         init_rwsem(&sbi->ll_foreign_symlink_sem);
146         /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
147          * not enabled by default
148          */
149
150         sbi->ll_ra_info.ra_max_pages =
151                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
152         sbi->ll_ra_info.ra_max_pages_per_file =
153                 min(sbi->ll_ra_info.ra_max_pages / 4,
154                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
155         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
156                                 sbi->ll_ra_info.ra_max_pages_per_file;
157         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
158         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
159         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
160
161         set_bit(LL_SBI_VERBOSE, sbi->ll_flags);
162 #ifdef ENABLE_CHECKSUM
163         set_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
164 #endif
165 #ifdef ENABLE_FLOCK
166         set_bit(LL_SBI_FLOCK, sbi->ll_flags);
167 #endif
168
169 #ifdef HAVE_LRU_RESIZE_SUPPORT
170         set_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags);
171 #endif
172         set_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags);
173
174         /* metadata statahead is enabled by default */
175         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
176         sbi->ll_sa_max = LL_SA_RPC_DEF;
177         atomic_set(&sbi->ll_sa_total, 0);
178         atomic_set(&sbi->ll_sa_wrong, 0);
179         atomic_set(&sbi->ll_sa_running, 0);
180         atomic_set(&sbi->ll_agl_total, 0);
181         set_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags);
182         set_bit(LL_SBI_FAST_READ, sbi->ll_flags);
183         set_bit(LL_SBI_TINY_WRITE, sbi->ll_flags);
184         set_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags);
185         ll_sbi_set_encrypt(sbi, true);
186
187         /* root squash */
188         sbi->ll_squash.rsi_uid = 0;
189         sbi->ll_squash.rsi_gid = 0;
190         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
191         spin_lock_init(&sbi->ll_squash.rsi_lock);
192
193         /* Per-filesystem file heat */
194         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
195         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
196
197         /* Per-fs open heat level before requesting open lock */
198         sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT;
199         sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS;
200         sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS;
201         RETURN(sbi);
202 out_destroy_ra:
203         if (sbi->ll_foreign_symlink_prefix)
204                 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
205         if (sbi->ll_cache) {
206                 cl_cache_decref(sbi->ll_cache);
207                 sbi->ll_cache = NULL;
208         }
209         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
210 out_pcc:
211         pcc_super_fini(&sbi->ll_pcc_super);
212 out_sbi:
213         OBD_FREE_PTR(sbi);
214         RETURN(ERR_PTR(rc));
215 }
216
217 static void ll_free_sbi(struct super_block *sb)
218 {
219         struct ll_sb_info *sbi = ll_s2sbi(sb);
220         ENTRY;
221
222         if (sbi != NULL) {
223                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
224                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
225                 if (sbi->ll_ra_info.ll_readahead_wq)
226                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
227                 if (sbi->ll_cache != NULL) {
228                         cl_cache_decref(sbi->ll_cache);
229                         sbi->ll_cache = NULL;
230                 }
231                 if (sbi->ll_foreign_symlink_prefix) {
232                         OBD_FREE(sbi->ll_foreign_symlink_prefix,
233                                  sbi->ll_foreign_symlink_prefix_size);
234                         sbi->ll_foreign_symlink_prefix = NULL;
235                 }
236                 if (sbi->ll_foreign_symlink_upcall) {
237                         OBD_FREE(sbi->ll_foreign_symlink_upcall,
238                                  strlen(sbi->ll_foreign_symlink_upcall) +
239                                        1);
240                         sbi->ll_foreign_symlink_upcall = NULL;
241                 }
242                 if (sbi->ll_foreign_symlink_upcall_items) {
243                         int i;
244                         int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
245                         struct ll_foreign_symlink_upcall_item *items =
246                                 sbi->ll_foreign_symlink_upcall_items;
247
248                         for (i = 0 ; i < nb_items; i++)
249                                 if (items[i].type == STRING_TYPE)
250                                         OBD_FREE(items[i].string,
251                                                        items[i].size);
252
253                         OBD_FREE_LARGE(items, nb_items *
254                                 sizeof(struct ll_foreign_symlink_upcall_item));
255                         sbi->ll_foreign_symlink_upcall_items = NULL;
256                 }
257                 ll_free_rw_stats_info(sbi);
258                 pcc_super_fini(&sbi->ll_pcc_super);
259                 OBD_FREE(sbi, sizeof(*sbi));
260         }
261         EXIT;
262 }
263
264 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
265 {
266         struct inode *root = NULL;
267         struct ll_sb_info *sbi = ll_s2sbi(sb);
268         struct obd_statfs *osfs = NULL;
269         struct ptlrpc_request *request = NULL;
270         struct obd_connect_data *data = NULL;
271         struct obd_uuid *uuid;
272         struct md_op_data *op_data;
273         struct lustre_md lmd;
274         u64 valid;
275         int size, err, checksum;
276         bool api32;
277         void *encctx;
278         int encctxlen;
279
280         ENTRY;
281         sbi->ll_md_obd = class_name2obd(md);
282         if (!sbi->ll_md_obd) {
283                 CERROR("MD %s: not setup or attached\n", md);
284                 RETURN(-EINVAL);
285         }
286
287         OBD_ALLOC_PTR(data);
288         if (data == NULL)
289                 RETURN(-ENOMEM);
290
291         OBD_ALLOC_PTR(osfs);
292         if (osfs == NULL) {
293                 OBD_FREE_PTR(data);
294                 RETURN(-ENOMEM);
295         }
296
297         /* pass client page size via ocd_grant_blkbits, the server should report
298          * back its backend blocksize for grant calculation purpose */
299         data->ocd_grant_blkbits = PAGE_SHIFT;
300
301         /* indicate MDT features supported by this client */
302         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
303                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
304                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
305                                   OBD_CONNECT_SRVLOCK  |
306                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
307                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
308                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
309                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
310                                   OBD_CONNECT_64BITHASH |
311                                   OBD_CONNECT_EINPROGRESS |
312                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
313                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
314                                   OBD_CONNECT_MAX_EASIZE |
315                                   OBD_CONNECT_FLOCK_DEAD |
316                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
317                                   OBD_CONNECT_OPEN_BY_FID |
318                                   OBD_CONNECT_DIR_STRIPE |
319                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
320                                   OBD_CONNECT_SUBTREE |
321                                   OBD_CONNECT_MULTIMODRPCS |
322                                   OBD_CONNECT_GRANT_PARAM |
323                                   OBD_CONNECT_GRANT_SHRINK |
324                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
325
326         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
327                                    OBD_CONNECT2_SUM_STATFS |
328                                    OBD_CONNECT2_OVERSTRIPING |
329                                    OBD_CONNECT2_FLR |
330                                    OBD_CONNECT2_LOCK_CONVERT |
331                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
332                                    OBD_CONNECT2_INC_XID |
333                                    OBD_CONNECT2_LSOM |
334                                    OBD_CONNECT2_ASYNC_DISCARD |
335                                    OBD_CONNECT2_PCC |
336                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
337                                    OBD_CONNECT2_GETATTR_PFID |
338                                    OBD_CONNECT2_DOM_LVB |
339                                    OBD_CONNECT2_REP_MBITS |
340                                    OBD_CONNECT2_ATOMIC_OPEN_LOCK;
341
342 #ifdef HAVE_LRU_RESIZE_SUPPORT
343         if (test_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags))
344                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
345 #endif
346         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
347
348         data->ocd_cksum_types = obd_cksum_types_supported_client();
349
350         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
351                 /* flag mdc connection as lightweight, only used for test
352                  * purpose, use with care */
353                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
354
355         data->ocd_ibits_known = MDS_INODELOCK_FULL;
356         data->ocd_version = LUSTRE_VERSION_CODE;
357
358         if (sb->s_flags & SB_RDONLY)
359                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
360         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags))
361                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
362
363 #ifdef SB_NOSEC
364         /* Setting this indicates we correctly support S_NOSEC (See kernel
365          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
366          */
367         sb->s_flags |= SB_NOSEC;
368 #endif
369         sbi->ll_fop = ll_select_file_operations(sbi);
370
371         /* always ping even if server suppress_pings */
372         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
373                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
374
375         obd_connect_set_secctx(data);
376         if (ll_sbi_has_encrypt(sbi))
377                 obd_connect_set_enc(data);
378
379 #if defined(CONFIG_SECURITY)
380         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
381 #endif
382
383         data->ocd_brw_size = MD_MAX_BRW_SIZE;
384
385         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
386                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
387         if (err == -EBUSY) {
388                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
389                                    "recovery, of which this client is not a "
390                                    "part. Please wait for recovery to complete,"
391                                    " abort, or time out.\n", md);
392                 GOTO(out, err);
393         } else if (err) {
394                 CERROR("cannot connect to %s: rc = %d\n", md, err);
395                 GOTO(out, err);
396         }
397
398         sbi->ll_md_exp->exp_connect_data = *data;
399
400         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
401                            LUSTRE_SEQ_METADATA);
402         if (err) {
403                 CERROR("%s: Can't init metadata layer FID infrastructure, "
404                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
405                 GOTO(out_md, err);
406         }
407
408         /* For mount, we only need fs info from MDT0, and also in DNE, it
409          * can make sure the client can be mounted as long as MDT0 is
410          * avaible */
411         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
412                         ktime_get_seconds() - sbi->ll_statfs_max_age,
413                         OBD_STATFS_FOR_MDT0);
414         if (err)
415                 GOTO(out_md_fid, err);
416
417         /* This needs to be after statfs to ensure connect has finished.
418          * Note that "data" does NOT contain the valid connect reply.
419          * If connecting to a 1.8 server there will be no LMV device, so
420          * we can access the MDC export directly and exp_connect_flags will
421          * be non-zero, but if accessing an upgraded 2.1 server it will
422          * have the correct flags filled in.
423          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
424         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
425         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
426             valid != CLIENT_CONNECT_MDT_REQD) {
427                 char *buf;
428
429                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
430                 obd_connect_flags2str(buf, PAGE_SIZE,
431                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
432                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
433                                    "feature(s) needed for correct operation "
434                                    "of this client (%s). Please upgrade "
435                                    "server or downgrade client.\n",
436                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
437                 OBD_FREE(buf, PAGE_SIZE);
438                 GOTO(out_md_fid, err = -EPROTO);
439         }
440
441         size = sizeof(*data);
442         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
443                            KEY_CONN_DATA,  &size, data);
444         if (err) {
445                 CERROR("%s: Get connect data failed: rc = %d\n",
446                        sbi->ll_md_exp->exp_obd->obd_name, err);
447                 GOTO(out_md_fid, err);
448         }
449
450         LASSERT(osfs->os_bsize);
451         sb->s_blocksize = osfs->os_bsize;
452         sb->s_blocksize_bits = log2(osfs->os_bsize);
453         sb->s_magic = LL_SUPER_MAGIC;
454         sb->s_maxbytes = MAX_LFS_FILESIZE;
455         sbi->ll_namelen = osfs->os_namelen;
456         sbi->ll_mnt.mnt = current->fs->root.mnt;
457         sbi->ll_mnt_ns = current->nsproxy->mnt_ns;
458
459         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags) &&
460             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
461                 LCONSOLE_INFO("Disabling user_xattr feature because "
462                               "it is not supported on the server\n");
463                 clear_bit(LL_SBI_USER_XATTR, sbi->ll_flags);
464         }
465
466         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
467 #ifdef SB_POSIXACL
468                 sb->s_flags |= SB_POSIXACL;
469 #endif
470                 set_bit(LL_SBI_ACL, sbi->ll_flags);
471         } else {
472                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
473 #ifdef SB_POSIXACL
474                 sb->s_flags &= ~SB_POSIXACL;
475 #endif
476                 clear_bit(LL_SBI_ACL, sbi->ll_flags);
477         }
478
479         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
480                 set_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
481
482         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
483                 set_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags);
484
485         if (obd_connect_has_secctx(data))
486                 set_bit(LL_SBI_FILE_SECCTX, sbi->ll_flags);
487
488         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
489                 if (ll_sbi_has_test_dummy_encryption(sbi))
490                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
491                                       sbi->ll_fsname,
492                                       sbi->ll_md_exp->exp_obd->obd_name);
493                 ll_sbi_set_encrypt(sbi, false);
494         }
495
496         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
497                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
498                         LCONSOLE_INFO("%s: disabling xattr cache due to "
499                                       "unknown maximum xattr size.\n", dt);
500                 } else if (!sbi->ll_xattr_cache_set) {
501                         /* If xattr_cache is already set (no matter 0 or 1)
502                          * during processing llog, it won't be enabled here. */
503                         set_bit(LL_SBI_XATTR_CACHE, sbi->ll_flags);
504                         sbi->ll_xattr_cache_enabled = 1;
505                 }
506         }
507
508         sbi->ll_dt_obd = class_name2obd(dt);
509         if (!sbi->ll_dt_obd) {
510                 CERROR("DT %s: not setup or attached\n", dt);
511                 GOTO(out_md_fid, err = -ENODEV);
512         }
513
514         /* pass client page size via ocd_grant_blkbits, the server should report
515          * back its backend blocksize for grant calculation purpose */
516         data->ocd_grant_blkbits = PAGE_SHIFT;
517
518         /* indicate OST features supported by this client */
519         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
520                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
521                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
522                                   OBD_CONNECT_SRVLOCK |
523                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
524                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
525                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
526                                   OBD_CONNECT_EINPROGRESS |
527                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
528                                   OBD_CONNECT_LAYOUTLOCK |
529                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
530                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
531                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
532         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
533                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK |
534                                    OBD_CONNECT2_REP_MBITS;
535
536         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
537                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
538
539         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
540          * disabled by default, because it can still be enabled on the
541          * fly via /sys. As a consequence, we still need to come to an
542          * agreement on the supported algorithms at connect time
543          */
544         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
545
546         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
547                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
548         else
549                 data->ocd_cksum_types = obd_cksum_types_supported_client();
550
551 #ifdef HAVE_LRU_RESIZE_SUPPORT
552         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
553 #endif
554         /* always ping even if server suppress_pings */
555         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
556                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
557
558         if (ll_sbi_has_encrypt(sbi))
559                 obd_connect_set_enc(data);
560
561         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
562                "ocd_grant: %d\n", data->ocd_connect_flags,
563                data->ocd_version, data->ocd_grant);
564
565         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
566         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
567
568         data->ocd_brw_size = DT_MAX_BRW_SIZE;
569
570         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
571                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
572         if (err == -EBUSY) {
573                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
574                                    "recovery, of which this client is not a "
575                                    "part.  Please wait for recovery to "
576                                    "complete, abort, or time out.\n", dt);
577                 GOTO(out_md, err);
578         } else if (err) {
579                 CERROR("%s: Cannot connect to %s: rc = %d\n",
580                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
581                 GOTO(out_md, err);
582         }
583
584         if (ll_sbi_has_encrypt(sbi) &&
585             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
586                 if (ll_sbi_has_test_dummy_encryption(sbi))
587                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
588                                       sbi->ll_fsname, dt);
589                 ll_sbi_set_encrypt(sbi, false);
590         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
591                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
592         }
593
594         sbi->ll_dt_exp->exp_connect_data = *data;
595
596         /* Don't change value if it was specified in the config log */
597         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
598                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
599                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
600                               (data->ocd_brw_size >> PAGE_SHIFT));
601                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
602                     sbi->ll_ra_info.ra_max_pages_per_file)
603                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
604                                 sbi->ll_ra_info.ra_max_pages_per_file;
605         }
606
607         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
608                            LUSTRE_SEQ_METADATA);
609         if (err) {
610                 CERROR("%s: Can't init data layer FID infrastructure, "
611                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
612                 GOTO(out_dt, err);
613         }
614
615         mutex_lock(&sbi->ll_lco.lco_lock);
616         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
617         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
618         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
619         mutex_unlock(&sbi->ll_lco.lco_lock);
620
621         fid_zero(&sbi->ll_root_fid);
622         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
623                            &sbi->ll_root_fid);
624         if (err) {
625                 CERROR("cannot mds_connect: rc = %d\n", err);
626                 GOTO(out_lock_cn_cb, err);
627         }
628         if (!fid_is_sane(&sbi->ll_root_fid)) {
629                 CERROR("%s: Invalid root fid "DFID" during mount\n",
630                        sbi->ll_md_exp->exp_obd->obd_name,
631                        PFID(&sbi->ll_root_fid));
632                 GOTO(out_lock_cn_cb, err = -EINVAL);
633         }
634         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
635
636         sb->s_op = &lustre_super_operations;
637         sb->s_xattr = ll_xattr_handlers;
638 #if THREAD_SIZE >= 8192 /*b=17630*/
639         sb->s_export_op = &lustre_export_operations;
640 #endif
641 #ifdef HAVE_LUSTRE_CRYPTO
642         llcrypt_set_ops(sb, &lustre_cryptops);
643 #endif
644
645         /* make root inode
646          * XXX: move this to after cbd setup? */
647         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE |
648                 OBD_MD_ENCCTX;
649         if (test_bit(LL_SBI_ACL, sbi->ll_flags))
650                 valid |= OBD_MD_FLACL;
651
652         OBD_ALLOC_PTR(op_data);
653         if (op_data == NULL)
654                 GOTO(out_lock_cn_cb, err = -ENOMEM);
655
656         op_data->op_fid1 = sbi->ll_root_fid;
657         op_data->op_mode = 0;
658         op_data->op_valid = valid;
659
660         err = md_getattr(sbi->ll_md_exp, op_data, &request);
661
662         /* We need enc ctx info, so reset it in op_data to
663          * prevent it from being freed.
664          */
665         encctx = op_data->op_file_encctx;
666         encctxlen = op_data->op_file_encctx_size;
667         op_data->op_file_encctx = NULL;
668         op_data->op_file_encctx_size = 0;
669         OBD_FREE_PTR(op_data);
670         if (err) {
671                 CERROR("%s: md_getattr failed for root: rc = %d\n",
672                        sbi->ll_md_exp->exp_obd->obd_name, err);
673                 GOTO(out_lock_cn_cb, err);
674         }
675
676         err = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill,
677                                sbi->ll_dt_exp, sbi->ll_md_exp, &lmd);
678         if (err) {
679                 CERROR("failed to understand root inode md: rc = %d\n", err);
680                 ptlrpc_req_finished(request);
681                 GOTO(out_lock_cn_cb, err);
682         }
683
684         LASSERT(fid_is_sane(&sbi->ll_root_fid));
685         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
686         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid, api32), &lmd);
687         md_free_lustre_md(sbi->ll_md_exp, &lmd);
688
689         if (IS_ERR(root)) {
690                 lmd_clear_acl(&lmd);
691                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
692                 root = NULL;
693                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
694                        sbi->ll_fsname, err);
695                 ptlrpc_req_finished(request);
696                 GOTO(out_root, err);
697         }
698
699         if (encctxlen) {
700                 CDEBUG(D_SEC,
701                        "server returned encryption ctx for root inode "DFID"\n",
702                        PFID(&sbi->ll_root_fid));
703                 err = ll_set_encflags(root, encctx, encctxlen, true);
704                 if (err)
705                         CWARN("%s: cannot set enc ctx for "DFID": rc = %d\n",
706                               sbi->ll_fsname,
707                               PFID(&sbi->ll_root_fid), err);
708         }
709         ptlrpc_req_finished(request);
710
711         checksum = test_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
712         if (sbi->ll_checksum_set) {
713                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
714                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
715                                          sizeof(checksum), &checksum, NULL);
716                 if (err) {
717                         CERROR("%s: Set checksum failed: rc = %d\n",
718                                sbi->ll_dt_exp->exp_obd->obd_name, err);
719                         GOTO(out_root, err);
720                 }
721         }
722         cl_sb_init(sb);
723
724         sb->s_root = d_make_root(root);
725         if (sb->s_root == NULL) {
726                 err = -ENOMEM;
727                 CERROR("%s: can't make root dentry: rc = %d\n",
728                        sbi->ll_fsname, err);
729                 GOTO(out_root, err);
730         }
731
732         sbi->ll_sdev_orig = sb->s_dev;
733
734         /* We set sb->s_dev equal on all lustre clients in order to support
735          * NFS export clustering.  NFSD requires that the FSID be the same
736          * on all clients. */
737         /* s_dev is also used in lt_compare() to compare two fs, but that is
738          * only a node-local comparison. */
739         uuid = obd_get_uuid(sbi->ll_md_exp);
740         if (uuid != NULL)
741                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
742
743         if (data != NULL)
744                 OBD_FREE_PTR(data);
745         if (osfs != NULL)
746                 OBD_FREE_PTR(osfs);
747
748         if (sbi->ll_dt_obd) {
749                 err = sysfs_create_link(&sbi->ll_kset.kobj,
750                                         &sbi->ll_dt_obd->obd_kset.kobj,
751                                         sbi->ll_dt_obd->obd_type->typ_name);
752                 if (err < 0) {
753                         CERROR("%s: could not register %s in llite: rc = %d\n",
754                                dt, sbi->ll_fsname, err);
755                         err = 0;
756                 }
757         }
758
759         if (sbi->ll_md_obd) {
760                 err = sysfs_create_link(&sbi->ll_kset.kobj,
761                                         &sbi->ll_md_obd->obd_kset.kobj,
762                                         sbi->ll_md_obd->obd_type->typ_name);
763                 if (err < 0) {
764                         CERROR("%s: could not register %s in llite: rc = %d\n",
765                                md, sbi->ll_fsname, err);
766                         err = 0;
767                 }
768         }
769
770         RETURN(err);
771 out_root:
772         iput(root);
773 out_lock_cn_cb:
774         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
775 out_dt:
776         obd_disconnect(sbi->ll_dt_exp);
777         sbi->ll_dt_exp = NULL;
778         sbi->ll_dt_obd = NULL;
779 out_md_fid:
780         obd_fid_fini(sbi->ll_md_exp->exp_obd);
781 out_md:
782         obd_disconnect(sbi->ll_md_exp);
783         sbi->ll_md_exp = NULL;
784         sbi->ll_md_obd = NULL;
785 out:
786         if (data != NULL)
787                 OBD_FREE_PTR(data);
788         if (osfs != NULL)
789                 OBD_FREE_PTR(osfs);
790         return err;
791 }
792
793 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
794 {
795         int size, rc;
796
797         size = sizeof(*lmmsize);
798         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
799                           KEY_MAX_EASIZE, &size, lmmsize);
800         if (rc != 0) {
801                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
802                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
803                 RETURN(rc);
804         }
805
806         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
807
808         size = sizeof(int);
809         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
810                           KEY_MAX_EASIZE, &size, lmmsize);
811         if (rc)
812                 CERROR("Get max mdsize error rc %d\n", rc);
813
814         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
815
816         RETURN(rc);
817 }
818
819 /**
820  * Get the value of the default_easize parameter.
821  *
822  * \see client_obd::cl_default_mds_easize
823  *
824  * \param[in] sbi       superblock info for this filesystem
825  * \param[out] lmmsize  pointer to storage location for value
826  *
827  * \retval 0            on success
828  * \retval negative     negated errno on failure
829  */
830 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
831 {
832         int size, rc;
833
834         size = sizeof(int);
835         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
836                          KEY_DEFAULT_EASIZE, &size, lmmsize);
837         if (rc)
838                 CERROR("Get default mdsize error rc %d\n", rc);
839
840         RETURN(rc);
841 }
842
843 /**
844  * Set the default_easize parameter to the given value.
845  *
846  * \see client_obd::cl_default_mds_easize
847  *
848  * \param[in] sbi       superblock info for this filesystem
849  * \param[in] lmmsize   the size to set
850  *
851  * \retval 0            on success
852  * \retval negative     negated errno on failure
853  */
854 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
855 {
856         int rc;
857
858         if (lmmsize < sizeof(struct lov_mds_md) ||
859             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
860                 return -EINVAL;
861
862         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
863                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
864                                 sizeof(int), &lmmsize, NULL);
865
866         RETURN(rc);
867 }
868
869 static void client_common_put_super(struct super_block *sb)
870 {
871         struct ll_sb_info *sbi = ll_s2sbi(sb);
872         ENTRY;
873
874         cl_sb_fini(sb);
875
876         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
877         obd_disconnect(sbi->ll_dt_exp);
878         sbi->ll_dt_exp = NULL;
879
880         ll_debugfs_unregister_super(sb);
881
882         obd_fid_fini(sbi->ll_md_exp->exp_obd);
883         obd_disconnect(sbi->ll_md_exp);
884         sbi->ll_md_exp = NULL;
885
886         EXIT;
887 }
888
889 void ll_kill_super(struct super_block *sb)
890 {
891         struct ll_sb_info *sbi;
892         ENTRY;
893
894         /* not init sb ?*/
895         if (!(sb->s_flags & SB_ACTIVE))
896                 return;
897
898         sbi = ll_s2sbi(sb);
899         /* we need restore s_dev from changed for clustred NFS before put_super
900          * because new kernels have cached s_dev and change sb->s_dev in
901          * put_super not affected real removing devices */
902         if (sbi) {
903                 sb->s_dev = sbi->ll_sdev_orig;
904
905                 /* wait running statahead threads to quit */
906                 while (atomic_read(&sbi->ll_sa_running) > 0)
907                         schedule_timeout_uninterruptible(
908                                 cfs_time_seconds(1) >> 3);
909         }
910
911         EXIT;
912 }
913
914 /* Since we use this table for ll_sbi_flags_seq_show make
915  * sure what you want displayed for a specific token that
916  * is listed more than once below be listed first. For
917  * example we want "checksum" displayed, not "nochecksum"
918  * for the sbi_flags.
919  */
920 static const match_table_t ll_sbi_flags_name = {
921         {LL_SBI_NOLCK,                  "nolock"},
922         {LL_SBI_CHECKSUM,               "checksum"},
923         {LL_SBI_CHECKSUM,               "nochecksum"},
924         {LL_SBI_LOCALFLOCK,             "localflock"},
925         {LL_SBI_FLOCK,                  "flock"},
926         {LL_SBI_FLOCK,                  "noflock"},
927         {LL_SBI_USER_XATTR,             "user_xattr"},
928         {LL_SBI_USER_XATTR,             "nouser_xattr"},
929         {LL_SBI_LRU_RESIZE,             "lruresize"},
930         {LL_SBI_LRU_RESIZE,             "nolruresize"},
931         {LL_SBI_LAZYSTATFS,             "lazystatfs"},
932         {LL_SBI_LAZYSTATFS,             "nolazystatfs"},
933         {LL_SBI_32BIT_API,              "32bitapi"},
934         {LL_SBI_USER_FID2PATH,          "user_fid2path"},
935         {LL_SBI_USER_FID2PATH,          "nouser_fid2path"},
936         {LL_SBI_VERBOSE,                "verbose"},
937         {LL_SBI_VERBOSE,                "noverbose"},
938         {LL_SBI_ALWAYS_PING,            "always_ping"},
939         {LL_SBI_TEST_DUMMY_ENCRYPTION,  "test_dummy_encryption"},
940         {LL_SBI_ENCRYPT,                "encrypt"},
941         {LL_SBI_ENCRYPT,                "noencrypt"},
942         {LL_SBI_FOREIGN_SYMLINK,        "foreign_symlink=%s"},
943         {LL_SBI_NUM_MOUNT_OPT,          NULL},
944
945         {LL_SBI_ACL,                    "acl"},
946         {LL_SBI_AGL_ENABLED,            "agl"},
947         {LL_SBI_64BIT_HASH,             "64bit_hash"},
948         {LL_SBI_LAYOUT_LOCK,            "layout"},
949         {LL_SBI_XATTR_CACHE,            "xattr_cache"},
950         {LL_SBI_NOROOTSQUASH,           "norootsquash"},
951         {LL_SBI_FAST_READ,              "fast_read"},
952         {LL_SBI_FILE_SECCTX,            "file_secctx"},
953         {LL_SBI_TINY_WRITE,             "tiny_write"},
954         {LL_SBI_FILE_HEAT,              "file_heat"},
955         {LL_SBI_PARALLEL_DIO,           "parallel_dio"},
956 };
957
958 int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
959 {
960         struct super_block *sb = m->private;
961         int i;
962
963         for (i = 0; i < LL_SBI_NUM_FLAGS; i++) {
964                 int j;
965
966                 if (!test_bit(i, ll_s2sbi(sb)->ll_flags))
967                         continue;
968
969                 for (j = 0; j < ARRAY_SIZE(ll_sbi_flags_name); j++) {
970                         if (ll_sbi_flags_name[j].token == i &&
971                             ll_sbi_flags_name[j].pattern) {
972                                 seq_printf(m, "%s ",
973                                            ll_sbi_flags_name[j].pattern);
974                                 break;
975                         }
976                 }
977         }
978         seq_puts(m, "\b\n");
979         return 0;
980 }
981
982 /* non-client-specific mount options are parsed in lmd_parse */
983 static int ll_options(char *options, struct super_block *sb)
984 {
985         struct ll_sb_info *sbi = ll_s2sbi(sb);
986         char *s2, *s1, *opts;
987
988         ENTRY;
989         if (!options)
990                 RETURN(0);
991
992         /* Don't stomp on lmd_opts */
993         opts = kstrdup(options, GFP_KERNEL);
994         if (!opts)
995                 RETURN(-ENOMEM);
996         s1 = opts;
997         s2 = opts;
998
999         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
1000
1001         while ((s1 = strsep(&opts, ",")) != NULL) {
1002                 substring_t args[MAX_OPT_ARGS];
1003                 bool turn_off = false;
1004                 int token;
1005
1006                 if (!*s1)
1007                         continue;
1008
1009                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
1010
1011                 if (strncmp(s1, "no", 2) == 0)
1012                         turn_off = true;
1013
1014                 /*
1015                  * Initialize args struct so we know whether arg was
1016                  * found; some options take optional arguments.
1017                  */
1018                 args[0].to = NULL;
1019                 args[0].from = NULL;
1020                 token = match_token(s1, ll_sbi_flags_name, args);
1021                 if (token == LL_SBI_NUM_MOUNT_OPT) {
1022                         if (match_wildcard("context", s1) ||
1023                             match_wildcard("fscontext", s1) ||
1024                             match_wildcard("defcontext", s1) ||
1025                             match_wildcard("rootcontext",s1))
1026                                 continue;
1027
1028                         LCONSOLE_ERROR_MSG(0x152,
1029                                            "Unknown option '%s', won't mount.\n",
1030                                            s1);
1031                         RETURN(-EINVAL);
1032                 }
1033
1034                 switch (token) {
1035                 case LL_SBI_NOLCK:
1036                 case LL_SBI_32BIT_API:
1037                 case LL_SBI_64BIT_HASH:
1038                 case LL_SBI_ALWAYS_PING:
1039                         set_bit(token, sbi->ll_flags);
1040                         break;
1041
1042                 case LL_SBI_FLOCK:
1043                         clear_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags);
1044                         if (turn_off)
1045                                 clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1046                         else
1047                                 set_bit(token, sbi->ll_flags);
1048                         break;
1049
1050                 case LL_SBI_LOCALFLOCK:
1051                         clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1052                         set_bit(token, sbi->ll_flags);
1053                         break;
1054
1055                 case LL_SBI_CHECKSUM:
1056                         sbi->ll_checksum_set = 1;
1057                         fallthrough;
1058                 case LL_SBI_USER_XATTR:
1059                 case LL_SBI_USER_FID2PATH:
1060                 case LL_SBI_LRU_RESIZE:
1061                 case LL_SBI_LAZYSTATFS:
1062                 case LL_SBI_VERBOSE:
1063                         if (turn_off)
1064                                 clear_bit(token, sbi->ll_flags);
1065                         else
1066                                 set_bit(token, sbi->ll_flags);
1067                         break;
1068                 case LL_SBI_TEST_DUMMY_ENCRYPTION: {
1069 #ifdef HAVE_LUSTRE_CRYPTO
1070                         set_bit(token, sbi->ll_flags);
1071 #else
1072                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1073 #endif
1074                         break;
1075                 }
1076                 case LL_SBI_ENCRYPT:
1077 #ifdef HAVE_LUSTRE_CRYPTO
1078                         if (turn_off)
1079                                 clear_bit(token, sbi->ll_flags);
1080                         else
1081                                 set_bit(token, sbi->ll_flags);
1082 #else
1083                         LCONSOLE_WARN("noencrypt or encrypt mount option ignored: encryption not supported\n");
1084 #endif
1085                         break;
1086                 case LL_SBI_FOREIGN_SYMLINK:
1087                         /* non-default prefix provided ? */
1088                         if (args->from) {
1089                                 size_t old_len;
1090                                 char *old;
1091
1092                                 /* path must be absolute */
1093                                 if (args->from[0] != '/') {
1094                                         LCONSOLE_ERROR_MSG(0x152,
1095                                                            "foreign prefix '%s' must be an absolute path\n",
1096                                                            args->from);
1097                                         RETURN(-EINVAL);
1098                                 }
1099
1100                                 old_len = sbi->ll_foreign_symlink_prefix_size;
1101                                 old = sbi->ll_foreign_symlink_prefix;
1102                                 /* alloc for path length and '\0' */
1103                                 sbi->ll_foreign_symlink_prefix = match_strdup(args);
1104                                 if (!sbi->ll_foreign_symlink_prefix) {
1105                                         /* restore previous */
1106                                         sbi->ll_foreign_symlink_prefix = old;
1107                                         sbi->ll_foreign_symlink_prefix_size =
1108                                                 old_len;
1109                                         RETURN(-ENOMEM);
1110                                 }
1111                                 sbi->ll_foreign_symlink_prefix_size =
1112                                         args->to - args->from + 1;
1113                                 OBD_ALLOC_POST(sbi->ll_foreign_symlink_prefix,
1114                                                sbi->ll_foreign_symlink_prefix_size,
1115                                                "kmalloced");
1116                                 if (old)
1117                                         OBD_FREE(old, old_len);
1118
1119                                 /* enable foreign symlink support */
1120                                 set_bit(token, sbi->ll_flags);
1121                         } else {
1122                                 LCONSOLE_ERROR_MSG(0x152,
1123                                                    "invalid %s option\n", s1);
1124                         }
1125                 fallthrough;
1126                 default:
1127                         break;
1128                 }
1129         }
1130         kfree(opts);
1131         RETURN(0);
1132 }
1133
1134 void ll_lli_init(struct ll_inode_info *lli)
1135 {
1136         lli->lli_inode_magic = LLI_INODE_MAGIC;
1137         lli->lli_flags = 0;
1138         rwlock_init(&lli->lli_lock);
1139         lli->lli_posix_acl = NULL;
1140         /* Do not set lli_fid, it has been initialized already. */
1141         fid_zero(&lli->lli_pfid);
1142         lli->lli_mds_read_och = NULL;
1143         lli->lli_mds_write_och = NULL;
1144         lli->lli_mds_exec_och = NULL;
1145         lli->lli_open_fd_read_count = 0;
1146         lli->lli_open_fd_write_count = 0;
1147         lli->lli_open_fd_exec_count = 0;
1148         mutex_init(&lli->lli_och_mutex);
1149         spin_lock_init(&lli->lli_agl_lock);
1150         spin_lock_init(&lli->lli_layout_lock);
1151         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1152         lli->lli_clob = NULL;
1153
1154         init_rwsem(&lli->lli_xattrs_list_rwsem);
1155         mutex_init(&lli->lli_xattrs_enq_lock);
1156
1157         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1158         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1159                 lli->lli_opendir_key = NULL;
1160                 lli->lli_sai = NULL;
1161                 spin_lock_init(&lli->lli_sa_lock);
1162                 lli->lli_opendir_pid = 0;
1163                 lli->lli_sa_enabled = 0;
1164                 init_rwsem(&lli->lli_lsm_sem);
1165         } else {
1166                 mutex_init(&lli->lli_size_mutex);
1167                 mutex_init(&lli->lli_setattr_mutex);
1168                 lli->lli_symlink_name = NULL;
1169                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1170                 range_lock_tree_init(&lli->lli_write_tree);
1171                 init_rwsem(&lli->lli_glimpse_sem);
1172                 lli->lli_glimpse_time = ktime_set(0, 0);
1173                 INIT_LIST_HEAD(&lli->lli_agl_list);
1174                 lli->lli_agl_index = 0;
1175                 lli->lli_async_rc = 0;
1176                 spin_lock_init(&lli->lli_heat_lock);
1177                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1178                 lli->lli_heat_flags = 0;
1179                 mutex_init(&lli->lli_pcc_lock);
1180                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1181                 lli->lli_pcc_inode = NULL;
1182                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1183                 lli->lli_pcc_generation = 0;
1184                 mutex_init(&lli->lli_group_mutex);
1185                 lli->lli_group_users = 0;
1186                 lli->lli_group_gid = 0;
1187         }
1188         mutex_init(&lli->lli_layout_mutex);
1189         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1190         /* ll_cl_context initialize */
1191         INIT_LIST_HEAD(&lli->lli_lccs);
1192 }
1193
1194 #define MAX_STRING_SIZE 128
1195
1196 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1197
1198 #define LSI_BDI_INITIALIZED     0x00400000
1199
1200 #ifndef HAVE_BDI_CAP_MAP_COPY
1201 # define BDI_CAP_MAP_COPY       0
1202 #endif
1203
1204 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1205 {
1206         struct  lustre_sb_info *lsi = s2lsi(sb);
1207         char buf[MAX_STRING_SIZE];
1208         va_list args;
1209         int err;
1210
1211         err = bdi_init(&lsi->lsi_bdi);
1212         if (err)
1213                 return err;
1214
1215         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1216         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1217         lsi->lsi_bdi.name = "lustre";
1218         va_start(args, fmt);
1219         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1220         va_end(args);
1221         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1222         va_end(args);
1223         if (!err)
1224                 sb->s_bdi = &lsi->lsi_bdi;
1225
1226         return err;
1227 }
1228 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1229
1230 int ll_fill_super(struct super_block *sb)
1231 {
1232         struct  lustre_profile *lprof = NULL;
1233         struct  lustre_sb_info *lsi = s2lsi(sb);
1234         struct  ll_sb_info *sbi = NULL;
1235         char    *dt = NULL, *md = NULL;
1236         char    *profilenm = get_profile_name(sb);
1237         struct config_llog_instance *cfg;
1238         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1239         const int instlen = LUSTRE_MAXINSTANCE + 2;
1240         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1241         char name[MAX_STRING_SIZE];
1242         int md_len = 0;
1243         int dt_len = 0;
1244         uuid_t uuid;
1245         char *ptr;
1246         int len;
1247         int err;
1248
1249         ENTRY;
1250         /* for ASLR, to map between cfg_instance and hashed ptr */
1251         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1252                profilenm, cfg_instance, sb);
1253
1254         OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
1255
1256         OBD_ALLOC_PTR(cfg);
1257         if (cfg == NULL)
1258                 GOTO(out_free_cfg, err = -ENOMEM);
1259
1260         /* client additional sb info */
1261         lsi->lsi_llsbi = sbi = ll_init_sbi();
1262         if (IS_ERR(sbi))
1263                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1264
1265         err = ll_options(lsi->lsi_lmd->lmd_opts, sb);
1266         if (err)
1267                 GOTO(out_free_cfg, err);
1268
1269         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1270         sb->s_d_op = &ll_d_ops;
1271
1272         /* UUID handling */
1273         generate_random_uuid(uuid.b);
1274         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1275
1276         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1277
1278         /* Get fsname */
1279         len = strlen(profilenm);
1280         ptr = strrchr(profilenm, '-');
1281         if (ptr && (strcmp(ptr, "-client") == 0))
1282                 len -= 7;
1283
1284         if (len > LUSTRE_MAXFSNAME) {
1285                 if (unlikely(len >= MAX_STRING_SIZE))
1286                         len = MAX_STRING_SIZE - 1;
1287                 strncpy(name, profilenm, len);
1288                 name[len] = '\0';
1289                 err = -ENAMETOOLONG;
1290                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1291                        name, LUSTRE_MAXFSNAME, err);
1292                 GOTO(out_free_cfg, err);
1293         }
1294         strncpy(sbi->ll_fsname, profilenm, len);
1295         sbi->ll_fsname[len] = '\0';
1296
1297         /* Mount info */
1298         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1299                  profilenm, cfg_instance);
1300
1301         err = super_setup_bdi_name(sb, "%s", name);
1302         if (err)
1303                 GOTO(out_free_cfg, err);
1304
1305         /* disable kernel readahead */
1306         sb->s_bdi->ra_pages = 0;
1307
1308         /* Call ll_debugfs_register_super() before lustre_process_log()
1309          * so that "llite.*.*" params can be processed correctly.
1310          */
1311         err = ll_debugfs_register_super(sb, name);
1312         if (err < 0) {
1313                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1314                        sbi->ll_fsname, err);
1315                 err = 0;
1316         }
1317
1318         /* The cfg_instance is a value unique to this super, in case some
1319          * joker tries to mount the same fs at two mount points.
1320          */
1321         cfg->cfg_instance = cfg_instance;
1322         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1323         cfg->cfg_callback = class_config_llog_handler;
1324         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1325         /* set up client obds */
1326         err = lustre_process_log(sb, profilenm, cfg);
1327         if (err < 0)
1328                 GOTO(out_debugfs, err);
1329
1330         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1331         lprof = class_get_profile(profilenm);
1332         if (lprof == NULL) {
1333                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1334                                    " read from the MGS.  Does that filesystem "
1335                                    "exist?\n", profilenm);
1336                 GOTO(out_debugfs, err = -EINVAL);
1337         }
1338         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1339                lprof->lp_md, lprof->lp_dt);
1340
1341         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1342         OBD_ALLOC(dt, dt_len);
1343         if (!dt)
1344                 GOTO(out_profile, err = -ENOMEM);
1345         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1346
1347         md_len = strlen(lprof->lp_md) + instlen + 2;
1348         OBD_ALLOC(md, md_len);
1349         if (!md)
1350                 GOTO(out_free_dt, err = -ENOMEM);
1351         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1352
1353         /* connections, registrations, sb setup */
1354         err = client_common_fill_super(sb, md, dt);
1355         if (err < 0)
1356                 GOTO(out_free_md, err);
1357
1358         sbi->ll_client_common_fill_super_succeeded = 1;
1359
1360 out_free_md:
1361         if (md)
1362                 OBD_FREE(md, md_len);
1363 out_free_dt:
1364         if (dt)
1365                 OBD_FREE(dt, dt_len);
1366 out_profile:
1367         if (lprof)
1368                 class_put_profile(lprof);
1369 out_debugfs:
1370         if (err < 0)
1371                 ll_debugfs_unregister_super(sb);
1372 out_free_cfg:
1373         if (cfg)
1374                 OBD_FREE_PTR(cfg);
1375
1376         if (err)
1377                 ll_put_super(sb);
1378         else if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1379                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1380         RETURN(err);
1381 } /* ll_fill_super */
1382
1383 void ll_put_super(struct super_block *sb)
1384 {
1385         struct config_llog_instance cfg, params_cfg;
1386         struct obd_device *obd;
1387         struct lustre_sb_info *lsi = s2lsi(sb);
1388         struct ll_sb_info *sbi = ll_s2sbi(sb);
1389         char *profilenm = get_profile_name(sb);
1390         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1391         long ccc_count;
1392         int next, force = 1, rc = 0;
1393         ENTRY;
1394
1395         if (IS_ERR(sbi))
1396                 GOTO(out_no_sbi, 0);
1397
1398         /* Should replace instance_id with something better for ASLR */
1399         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1400                profilenm, cfg_instance, sb);
1401
1402         cfg.cfg_instance = cfg_instance;
1403         lustre_end_log(sb, profilenm, &cfg);
1404
1405         params_cfg.cfg_instance = cfg_instance;
1406         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1407
1408         if (sbi->ll_md_exp) {
1409                 obd = class_exp2obd(sbi->ll_md_exp);
1410                 if (obd)
1411                         force = obd->obd_force;
1412         }
1413
1414         /* Wait for unstable pages to be committed to stable storage */
1415         if (force == 0) {
1416                 rc = l_wait_event_abortable(
1417                         sbi->ll_cache->ccc_unstable_waitq,
1418                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1419         }
1420
1421         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1422         if (force == 0 && rc != -ERESTARTSYS)
1423                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1424
1425         /* We need to set force before the lov_disconnect in
1426          * lustre_common_put_super, since l_d cleans up osc's as well.
1427          */
1428         if (force) {
1429                 next = 0;
1430                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1431                                                      &next)) != NULL) {
1432                         obd->obd_force = force;
1433                 }
1434         }
1435
1436         if (sbi->ll_client_common_fill_super_succeeded) {
1437                 /* Only if client_common_fill_super succeeded */
1438                 client_common_put_super(sb);
1439         }
1440
1441         next = 0;
1442         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1443                 class_manual_cleanup(obd);
1444
1445         if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1446                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1447
1448         if (profilenm)
1449                 class_del_profile(profilenm);
1450
1451 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1452         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1453                 bdi_destroy(&lsi->lsi_bdi);
1454                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1455         }
1456 #endif
1457
1458         ll_free_sbi(sb);
1459         lsi->lsi_llsbi = NULL;
1460 out_no_sbi:
1461         lustre_common_put_super(sb);
1462
1463         cl_env_cache_purge(~0);
1464
1465         EXIT;
1466 } /* client_put_super */
1467
1468 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1469 {
1470         struct inode *inode = NULL;
1471
1472         /* NOTE: we depend on atomic igrab() -bzzz */
1473         lock_res_and_lock(lock);
1474         if (lock->l_resource->lr_lvb_inode) {
1475                 struct ll_inode_info * lli;
1476                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1477                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1478                         inode = igrab(lock->l_resource->lr_lvb_inode);
1479                 } else {
1480                         inode = lock->l_resource->lr_lvb_inode;
1481                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1482                                          D_WARNING, lock, "lr_lvb_inode %p is "
1483                                          "bogus: magic %08x",
1484                                          lock->l_resource->lr_lvb_inode,
1485                                          lli->lli_inode_magic);
1486                         inode = NULL;
1487                 }
1488         }
1489         unlock_res_and_lock(lock);
1490         return inode;
1491 }
1492
1493 void ll_dir_clear_lsm_md(struct inode *inode)
1494 {
1495         struct ll_inode_info *lli = ll_i2info(inode);
1496
1497         LASSERT(S_ISDIR(inode->i_mode));
1498
1499         if (lli->lli_lsm_md) {
1500                 lmv_free_memmd(lli->lli_lsm_md);
1501                 lli->lli_lsm_md = NULL;
1502         }
1503
1504         if (lli->lli_default_lsm_md) {
1505                 lmv_free_memmd(lli->lli_default_lsm_md);
1506                 lli->lli_default_lsm_md = NULL;
1507         }
1508 }
1509
1510 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1511                                       const struct lu_fid *fid,
1512                                       struct lustre_md *md)
1513 {
1514         struct ll_sb_info *sbi = ll_s2sbi(sb);
1515         struct ll_inode_info *lli;
1516         struct mdt_body *body = md->body;
1517         struct inode *inode;
1518         ino_t ino;
1519
1520         ENTRY;
1521
1522         LASSERT(md->lmv);
1523         ino = cl_fid_build_ino(fid, test_bit(LL_SBI_32BIT_API, sbi->ll_flags));
1524         inode = iget_locked(sb, ino);
1525         if (inode == NULL) {
1526                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1527                        sbi->ll_fsname, PFID(fid));
1528                 RETURN(ERR_PTR(-ENOENT));
1529         }
1530
1531         lli = ll_i2info(inode);
1532         if (inode->i_state & I_NEW) {
1533                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1534                                 (body->mbo_mode & S_IFMT);
1535                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1536                          PFID(fid));
1537
1538                 inode->i_mtime.tv_sec = 0;
1539                 inode->i_atime.tv_sec = 0;
1540                 inode->i_ctime.tv_sec = 0;
1541                 inode->i_rdev = 0;
1542
1543 #ifdef HAVE_BACKING_DEV_INFO
1544                 /* initializing backing dev info. */
1545                 inode->i_mapping->backing_dev_info =
1546                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1547 #endif
1548                 inode->i_op = &ll_dir_inode_operations;
1549                 inode->i_fop = &ll_dir_operations;
1550                 lli->lli_fid = *fid;
1551                 ll_lli_init(lli);
1552
1553                 /* master object FID */
1554                 lli->lli_pfid = body->mbo_fid1;
1555                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1556                        lli, PFID(fid), PFID(&lli->lli_pfid));
1557                 unlock_new_inode(inode);
1558         } else {
1559                 /* in directory restripe/auto-split, a directory will be
1560                  * transformed to a stripe if it's plain, set its pfid here,
1561                  * otherwise ll_lock_cancel_bits() can't find the master inode.
1562                  */
1563                 lli->lli_pfid = body->mbo_fid1;
1564         }
1565
1566         RETURN(inode);
1567 }
1568
1569 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1570 {
1571         struct lu_fid *fid;
1572         struct lmv_stripe_md *lsm = md->lmv;
1573         struct ll_inode_info *lli = ll_i2info(inode);
1574         int i;
1575
1576         LASSERT(lsm != NULL);
1577
1578         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1579                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1580         lsm_md_dump(D_INODE, lsm);
1581
1582         if (!lmv_dir_striped(lsm))
1583                 goto out;
1584
1585         /* XXX sigh, this lsm_root initialization should be in
1586          * LMV layer, but it needs ll_iget right now, so we
1587          * put this here right now. */
1588         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1589                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1590                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1591
1592                 if (!fid_is_sane(fid))
1593                         continue;
1594
1595                 /* Unfortunately ll_iget will call ll_update_inode,
1596                  * where the initialization of slave inode is slightly
1597                  * different, so it reset lsm_md to NULL to avoid
1598                  * initializing lsm for slave inode. */
1599                 lsm->lsm_md_oinfo[i].lmo_root =
1600                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1601                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1602                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1603
1604                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1605                         while (i-- > 0) {
1606                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1607                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1608                         }
1609                         return rc;
1610                 }
1611         }
1612 out:
1613         lli->lli_lsm_md = lsm;
1614
1615         return 0;
1616 }
1617
1618 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1619 {
1620         struct ll_inode_info *lli = ll_i2info(inode);
1621
1622         ENTRY;
1623
1624         if (!md->default_lmv) {
1625                 /* clear default lsm */
1626                 if (lli->lli_default_lsm_md) {
1627                         down_write(&lli->lli_lsm_sem);
1628                         if (lli->lli_default_lsm_md) {
1629                                 lmv_free_memmd(lli->lli_default_lsm_md);
1630                                 lli->lli_default_lsm_md = NULL;
1631                         }
1632                         up_write(&lli->lli_lsm_sem);
1633                 }
1634                 RETURN_EXIT;
1635         }
1636
1637         if (lli->lli_default_lsm_md) {
1638                 /* do nonthing if default lsm isn't changed */
1639                 down_read(&lli->lli_lsm_sem);
1640                 if (lli->lli_default_lsm_md &&
1641                     lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1642                         up_read(&lli->lli_lsm_sem);
1643                         RETURN_EXIT;
1644                 }
1645                 up_read(&lli->lli_lsm_sem);
1646         }
1647
1648         down_write(&lli->lli_lsm_sem);
1649         if (lli->lli_default_lsm_md)
1650                 lmv_free_memmd(lli->lli_default_lsm_md);
1651         lli->lli_default_lsm_md = md->default_lmv;
1652         lsm_md_dump(D_INODE, md->default_lmv);
1653         md->default_lmv = NULL;
1654         up_write(&lli->lli_lsm_sem);
1655         RETURN_EXIT;
1656 }
1657
1658 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1659 {
1660         struct ll_inode_info *lli = ll_i2info(inode);
1661         struct lmv_stripe_md *lsm = md->lmv;
1662         struct cl_attr  *attr;
1663         int rc = 0;
1664
1665         ENTRY;
1666
1667         LASSERT(S_ISDIR(inode->i_mode));
1668         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1669                PFID(ll_inode2fid(inode)));
1670
1671         /* update default LMV */
1672         if (md->default_lmv)
1673                 ll_update_default_lsm_md(inode, md);
1674
1675         /* after dir migration/restripe, a stripe may be turned into a
1676          * directory, in this case, zero out its lli_pfid.
1677          */
1678         if (unlikely(fid_is_norm(&lli->lli_pfid)))
1679                 fid_zero(&lli->lli_pfid);
1680
1681         /*
1682          * no striped information from request, lustre_md from req does not
1683          * include stripeEA, see ll_md_setattr()
1684          */
1685         if (!lsm)
1686                 RETURN(0);
1687
1688         /*
1689          * normally dir layout doesn't change, only take read lock to check
1690          * that to avoid blocking other MD operations.
1691          */
1692         down_read(&lli->lli_lsm_sem);
1693
1694         /* some current lookup initialized lsm, and unchanged */
1695         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1696                 GOTO(unlock, rc = 0);
1697
1698         /* if dir layout doesn't match, check whether version is increased,
1699          * which means layout is changed, this happens in dir split/merge and
1700          * lfsck.
1701          *
1702          * foreign LMV should not change.
1703          */
1704         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1705             lsm->lsm_md_layout_version <=
1706             lli->lli_lsm_md->lsm_md_layout_version) {
1707                 CERROR("%s: "DFID" dir layout mismatch:\n",
1708                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1709                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1710                 lsm_md_dump(D_ERROR, lsm);
1711                 GOTO(unlock, rc = -EINVAL);
1712         }
1713
1714         up_read(&lli->lli_lsm_sem);
1715         down_write(&lli->lli_lsm_sem);
1716         /* clear existing lsm */
1717         if (lli->lli_lsm_md) {
1718                 lmv_free_memmd(lli->lli_lsm_md);
1719                 lli->lli_lsm_md = NULL;
1720         }
1721
1722         rc = ll_init_lsm_md(inode, md);
1723         up_write(&lli->lli_lsm_sem);
1724
1725         if (rc)
1726                 RETURN(rc);
1727
1728         /* set md->lmv to NULL, so the following free lustre_md will not free
1729          * this lsm.
1730          */
1731         md->lmv = NULL;
1732
1733         /* md_merge_attr() may take long, since lsm is already set, switch to
1734          * read lock.
1735          */
1736         down_read(&lli->lli_lsm_sem);
1737
1738         if (!lmv_dir_striped(lli->lli_lsm_md))
1739                 GOTO(unlock, rc = 0);
1740
1741         OBD_ALLOC_PTR(attr);
1742         if (!attr)
1743                 GOTO(unlock, rc = -ENOMEM);
1744
1745         /* validate the lsm */
1746         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1747                            ll_md_blocking_ast);
1748         if (!rc) {
1749                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1750                         md->body->mbo_nlink = attr->cat_nlink;
1751                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1752                         md->body->mbo_size = attr->cat_size;
1753                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1754                         md->body->mbo_atime = attr->cat_atime;
1755                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1756                         md->body->mbo_ctime = attr->cat_ctime;
1757                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1758                         md->body->mbo_mtime = attr->cat_mtime;
1759         }
1760
1761         OBD_FREE_PTR(attr);
1762         GOTO(unlock, rc);
1763 unlock:
1764         up_read(&lli->lli_lsm_sem);
1765
1766         return rc;
1767 }
1768
1769 void ll_clear_inode(struct inode *inode)
1770 {
1771         struct ll_inode_info *lli = ll_i2info(inode);
1772         struct ll_sb_info *sbi = ll_i2sbi(inode);
1773
1774         ENTRY;
1775
1776         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1777                PFID(ll_inode2fid(inode)), inode);
1778
1779         if (S_ISDIR(inode->i_mode)) {
1780                 /* these should have been cleared in ll_file_release */
1781                 LASSERT(lli->lli_opendir_key == NULL);
1782                 LASSERT(lli->lli_sai == NULL);
1783                 LASSERT(lli->lli_opendir_pid == 0);
1784         } else {
1785                 pcc_inode_free(inode);
1786         }
1787
1788         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1789
1790         LASSERT(!lli->lli_open_fd_write_count);
1791         LASSERT(!lli->lli_open_fd_read_count);
1792         LASSERT(!lli->lli_open_fd_exec_count);
1793
1794         if (lli->lli_mds_write_och)
1795                 ll_md_real_close(inode, FMODE_WRITE);
1796         if (lli->lli_mds_exec_och)
1797                 ll_md_real_close(inode, FMODE_EXEC);
1798         if (lli->lli_mds_read_och)
1799                 ll_md_real_close(inode, FMODE_READ);
1800
1801         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1802                 OBD_FREE(lli->lli_symlink_name,
1803                          strlen(lli->lli_symlink_name) + 1);
1804                 lli->lli_symlink_name = NULL;
1805         }
1806
1807         ll_xattr_cache_destroy(inode);
1808
1809         forget_all_cached_acls(inode);
1810         lli_clear_acl(lli);
1811         lli->lli_inode_magic = LLI_INODE_DEAD;
1812
1813         if (S_ISDIR(inode->i_mode))
1814                 ll_dir_clear_lsm_md(inode);
1815         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1816                 LASSERT(list_empty(&lli->lli_agl_list));
1817
1818         /*
1819          * XXX This has to be done before lsm is freed below, because
1820          * cl_object still uses inode lsm.
1821          */
1822         cl_inode_fini(inode);
1823
1824         llcrypt_put_encryption_info(inode);
1825
1826         EXIT;
1827 }
1828
1829 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1830 {
1831         struct lustre_md md;
1832         struct inode *inode = dentry->d_inode;
1833         struct ll_sb_info *sbi = ll_i2sbi(inode);
1834         struct ptlrpc_request *request = NULL;
1835         int rc, ia_valid;
1836
1837         ENTRY;
1838
1839         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1840                                      LUSTRE_OPC_ANY, NULL);
1841         if (IS_ERR(op_data))
1842                 RETURN(PTR_ERR(op_data));
1843
1844         /* If this is a chgrp of a regular file, we want to reserve enough
1845          * quota to cover the entire file size.
1846          */
1847         if (S_ISREG(inode->i_mode) && op_data->op_attr.ia_valid & ATTR_GID &&
1848             from_kgid(&init_user_ns, op_data->op_attr.ia_gid) !=
1849             from_kgid(&init_user_ns, inode->i_gid)) {
1850                 op_data->op_xvalid |= OP_XVALID_BLOCKS;
1851                 op_data->op_attr_blocks = inode->i_blocks;
1852         }
1853
1854
1855         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1856         if (rc) {
1857                 ptlrpc_req_finished(request);
1858                 if (rc == -ENOENT) {
1859                         clear_nlink(inode);
1860                         /* Unlinked special device node? Or just a race?
1861                          * Pretend we done everything. */
1862                         if (!S_ISREG(inode->i_mode) &&
1863                             !S_ISDIR(inode->i_mode)) {
1864                                 ia_valid = op_data->op_attr.ia_valid;
1865                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1866                                 rc = simple_setattr(&init_user_ns, dentry,
1867                                                     &op_data->op_attr);
1868                                 op_data->op_attr.ia_valid = ia_valid;
1869                         }
1870                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1871                         CERROR("md_setattr fails: rc = %d\n", rc);
1872                 }
1873                 RETURN(rc);
1874         }
1875
1876         rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp,
1877                               sbi->ll_md_exp, &md);
1878         if (rc) {
1879                 ptlrpc_req_finished(request);
1880                 RETURN(rc);
1881         }
1882
1883         ia_valid = op_data->op_attr.ia_valid;
1884         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1885          * cache is not cleared yet. */
1886         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1887         if (S_ISREG(inode->i_mode))
1888                 inode_lock(inode);
1889         rc = simple_setattr(&init_user_ns, dentry, &op_data->op_attr);
1890         if (S_ISREG(inode->i_mode))
1891                 inode_unlock(inode);
1892         op_data->op_attr.ia_valid = ia_valid;
1893
1894         rc = ll_update_inode(inode, &md);
1895         ptlrpc_req_finished(request);
1896
1897         RETURN(rc);
1898 }
1899
1900 /**
1901  * Zero portion of page that is part of @inode.
1902  * This implies, if necessary:
1903  * - taking cl_lock on range corresponding to concerned page
1904  * - grabbing vm page
1905  * - associating cl_page
1906  * - proceeding to clio read
1907  * - zeroing range in page
1908  * - proceeding to cl_page flush
1909  * - releasing cl_lock
1910  *
1911  * \param[in] inode     inode
1912  * \param[in] index     page index
1913  * \param[in] offset    offset in page to start zero from
1914  * \param[in] len       len to zero
1915  *
1916  * \retval 0            on success
1917  * \retval negative     errno on failure
1918  */
1919 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1920                     unsigned len)
1921 {
1922         struct ll_inode_info *lli = ll_i2info(inode);
1923         struct cl_object *clob = lli->lli_clob;
1924         __u16 refcheck;
1925         struct lu_env *env = NULL;
1926         struct cl_io *io = NULL;
1927         struct cl_page *clpage = NULL;
1928         struct page *vmpage = NULL;
1929         unsigned from = index << PAGE_SHIFT;
1930         struct cl_lock *lock = NULL;
1931         struct cl_lock_descr *descr = NULL;
1932         struct cl_2queue *queue = NULL;
1933         struct cl_sync_io *anchor = NULL;
1934         bool holdinglock = false;
1935         int rc;
1936
1937         ENTRY;
1938
1939         env = cl_env_get(&refcheck);
1940         if (IS_ERR(env))
1941                 RETURN(PTR_ERR(env));
1942
1943         io = vvp_env_thread_io(env);
1944         io->ci_obj = clob;
1945         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1946         if (rc)
1947                 GOTO(putenv, rc);
1948
1949         lock = vvp_env_lock(env);
1950         descr = &lock->cll_descr;
1951         descr->cld_obj   = io->ci_obj;
1952         descr->cld_start = cl_index(io->ci_obj, from);
1953         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1954         descr->cld_mode  = CLM_WRITE;
1955         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1956
1957         /* request lock for page */
1958         rc = cl_lock_request(env, io, lock);
1959         /* -ECANCELED indicates a matching lock with a different extent
1960          * was already present, and -EEXIST indicates a matching lock
1961          * on exactly the same extent was already present.
1962          * In both cases it means we are covered.
1963          */
1964         if (rc == -ECANCELED || rc == -EEXIST)
1965                 rc = 0;
1966         else if (rc < 0)
1967                 GOTO(iofini, rc);
1968         else
1969                 holdinglock = true;
1970
1971         /* grab page */
1972         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1973         if (vmpage == NULL)
1974                 GOTO(rellock, rc = -EOPNOTSUPP);
1975
1976         if (!PageDirty(vmpage)) {
1977                 /* associate cl_page */
1978                 clpage = cl_page_find(env, clob, vmpage->index,
1979                                       vmpage, CPT_CACHEABLE);
1980                 if (IS_ERR(clpage))
1981                         GOTO(pagefini, rc = PTR_ERR(clpage));
1982
1983                 cl_page_assume(env, io, clpage);
1984         }
1985
1986         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1987             !PageWriteback(vmpage)) {
1988                 /* read page */
1989                 /* Set PagePrivate2 to detect special case of empty page
1990                  * in osc_brw_fini_request().
1991                  * It is also used to tell ll_io_read_page() that we do not
1992                  * want the vmpage to be unlocked.
1993                  */
1994                 SetPagePrivate2(vmpage);
1995                 rc = ll_io_read_page(env, io, clpage, NULL);
1996                 if (!PagePrivate2(vmpage)) {
1997                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1998                          * meaning we read an empty page. In this case, in order
1999                          * to avoid allocating unnecessary block in truncated
2000                          * file, we must not zero and write as below. Subsequent
2001                          * server-side truncate will handle things correctly.
2002                          */
2003                         cl_page_unassume(env, io, clpage);
2004                         GOTO(clpfini, rc = 0);
2005                 }
2006                 ClearPagePrivate2(vmpage);
2007                 if (rc)
2008                         GOTO(clpfini, rc);
2009         }
2010
2011         /* Thanks to PagePrivate2 flag, ll_io_read_page() did not unlock
2012          * the vmpage, so we are good to proceed and zero range in page.
2013          */
2014         zero_user(vmpage, offset, len);
2015
2016         if (holdinglock && clpage) {
2017                 /* explicitly write newly modified page */
2018                 queue = &io->ci_queue;
2019                 cl_2queue_init(queue);
2020                 anchor = &vvp_env_info(env)->vti_anchor;
2021                 cl_sync_io_init(anchor, 1);
2022                 clpage->cp_sync_io = anchor;
2023                 cl_2queue_add(queue, clpage, true);
2024                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
2025                 if (rc)
2026                         GOTO(queuefini1, rc);
2027                 rc = cl_sync_io_wait(env, anchor, 0);
2028                 if (rc)
2029                         GOTO(queuefini2, rc);
2030                 cl_page_assume(env, io, clpage);
2031
2032 queuefini2:
2033                 cl_2queue_discard(env, io, queue);
2034 queuefini1:
2035                 cl_2queue_disown(env, io, queue);
2036                 cl_2queue_fini(env, queue);
2037         }
2038
2039 clpfini:
2040         if (clpage)
2041                 cl_page_put(env, clpage);
2042 pagefini:
2043         unlock_page(vmpage);
2044         put_page(vmpage);
2045 rellock:
2046         if (holdinglock)
2047                 cl_lock_release(env, lock);
2048 iofini:
2049         cl_io_fini(env, io);
2050 putenv:
2051         if (env)
2052                 cl_env_put(env, &refcheck);
2053
2054         RETURN(rc);
2055 }
2056
2057 /**
2058  * Get reference file from volatile file name.
2059  * Volatile file name may look like:
2060  * <parent>/LUSTRE_VOLATILE_HDR:<mdt_index>:<random>:fd=<fd>
2061  * where fd is opened descriptor of reference file.
2062  *
2063  * \param[in] volatile_name     volatile file name
2064  * \param[in] volatile_len      volatile file name length
2065  * \param[out] ref_file         pointer to struct file of reference file
2066  *
2067  * \retval 0            on success
2068  * \retval negative     errno on failure
2069  */
2070 int volatile_ref_file(const char *volatile_name, int volatile_len,
2071                       struct file **ref_file)
2072 {
2073         char *p, *q, *fd_str;
2074         int fd, rc;
2075
2076         p = strnstr(volatile_name, ":fd=", volatile_len);
2077         if (!p || strlen(p + 4) == 0)
2078                 return -EINVAL;
2079
2080         q = strchrnul(p + 4, ':');
2081         fd_str = kstrndup(p + 4, q - p - 4, GFP_NOFS);
2082         if (!fd_str)
2083                 return -ENOMEM;
2084         rc = kstrtouint(fd_str, 10, &fd);
2085         kfree(fd_str);
2086         if (rc)
2087                 return -EINVAL;
2088
2089         *ref_file = fget(fd);
2090         if (!(*ref_file))
2091                 return -EINVAL;
2092         return 0;
2093 }
2094
2095 /* If this inode has objects allocated to it (lsm != NULL), then the OST
2096  * object(s) determine the file size and mtime.  Otherwise, the MDS will
2097  * keep these values until such a time that objects are allocated for it.
2098  * We do the MDS operations first, as it is checking permissions for us.
2099  * We don't to the MDS RPC if there is nothing that we want to store there,
2100  * otherwise there is no harm in updating mtime/atime on the MDS if we are
2101  * going to do an RPC anyways.
2102  *
2103  * If we are doing a truncate, we will send the mtime and ctime updates
2104  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2105  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2106  * at the same time.
2107  *
2108  * In case of HSMimport, we only set attr on MDS.
2109  */
2110 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2111                    enum op_xvalid xvalid, bool hsm_import)
2112 {
2113         struct inode *inode = dentry->d_inode;
2114         struct ll_inode_info *lli = ll_i2info(inode);
2115         struct md_op_data *op_data = NULL;
2116         ktime_t kstart = ktime_get();
2117         int rc = 0;
2118
2119         ENTRY;
2120
2121         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2122                "valid %x, hsm_import %d\n",
2123                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2124                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2125                hsm_import);
2126
2127         if (attr->ia_valid & ATTR_SIZE) {
2128                 /* Check new size against VFS/VM file size limit and rlimit */
2129                 rc = inode_newsize_ok(inode, attr->ia_size);
2130                 if (rc)
2131                         RETURN(rc);
2132
2133                 /* The maximum Lustre file size is variable, based on the
2134                  * OST maximum object size and number of stripes.  This
2135                  * needs another check in addition to the VFS check above. */
2136                 if (attr->ia_size > ll_file_maxbytes(inode)) {
2137                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2138                                PFID(&lli->lli_fid), attr->ia_size,
2139                                ll_file_maxbytes(inode));
2140                         RETURN(-EFBIG);
2141                 }
2142
2143                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2144         }
2145
2146         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2147         if (attr->ia_valid & TIMES_SET_FLAGS) {
2148                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2149                     !capable(CAP_FOWNER))
2150                         RETURN(-EPERM);
2151         }
2152
2153         /* We mark all of the fields "set" so MDS/OST does not re-set them */
2154         if (!(xvalid & OP_XVALID_CTIME_SET) &&
2155              (attr->ia_valid & ATTR_CTIME)) {
2156                 attr->ia_ctime = current_time(inode);
2157                 xvalid |= OP_XVALID_CTIME_SET;
2158         }
2159         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2160             (attr->ia_valid & ATTR_ATIME)) {
2161                 attr->ia_atime = current_time(inode);
2162                 attr->ia_valid |= ATTR_ATIME_SET;
2163         }
2164         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2165             (attr->ia_valid & ATTR_MTIME)) {
2166                 attr->ia_mtime = current_time(inode);
2167                 attr->ia_valid |= ATTR_MTIME_SET;
2168         }
2169
2170         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2171                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2172                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2173                        ktime_get_real_seconds());
2174
2175         if (S_ISREG(inode->i_mode))
2176                 inode_unlock(inode);
2177
2178         /* We always do an MDS RPC, even if we're only changing the size;
2179          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2180
2181         OBD_ALLOC_PTR(op_data);
2182         if (op_data == NULL)
2183                 GOTO(out, rc = -ENOMEM);
2184
2185         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2186                 /* If we are changing file size, file content is
2187                  * modified, flag it.
2188                  */
2189                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2190                 op_data->op_bias |= MDS_DATA_MODIFIED;
2191                 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
2192         }
2193
2194         if (attr->ia_valid & ATTR_FILE) {
2195                 struct ll_file_data *fd = attr->ia_file->private_data;
2196
2197                 if (fd->fd_lease_och)
2198                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2199         }
2200
2201         op_data->op_attr = *attr;
2202         op_data->op_xvalid = xvalid;
2203
2204         rc = ll_md_setattr(dentry, op_data);
2205         if (rc)
2206                 GOTO(out, rc);
2207
2208         if (!S_ISREG(inode->i_mode) || hsm_import)
2209                 GOTO(out, rc = 0);
2210
2211         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2212                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2213             xvalid & OP_XVALID_CTIME_SET) {
2214                 bool cached = false;
2215
2216                 rc = pcc_inode_setattr(inode, attr, &cached);
2217                 if (cached) {
2218                         if (rc) {
2219                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2220                                        "rc = %d\n",
2221                                        ll_i2sbi(inode)->ll_fsname,
2222                                        PFID(&lli->lli_fid), rc);
2223                                 GOTO(out, rc);
2224                         }
2225                 } else {
2226                         unsigned int flags = 0;
2227
2228                         /* For truncate and utimes sending attributes to OSTs,
2229                          * setting mtime/atime to the past will be performed
2230                          * under PW [0:EOF] extent lock (new_size:EOF for
2231                          * truncate). It may seem excessive to send mtime/atime
2232                          * updates to OSTs when not setting times to past, but
2233                          * it is necessary due to possible time
2234                          * de-synchronization between MDT inode and OST objects
2235                          */
2236                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
2237                                 xvalid |= OP_XVALID_FLAGS;
2238                                 flags = LUSTRE_ENCRYPT_FL;
2239                                 /* Call to ll_io_zero_page is not necessary if
2240                                  * truncating on PAGE_SIZE boundary, because
2241                                  * whole pages will be wiped.
2242                                  * In case of Direct IO, all we need is to set
2243                                  * new size.
2244                                  */
2245                                 if (attr->ia_valid & ATTR_SIZE &&
2246                                     attr->ia_size & ~PAGE_MASK &&
2247                                     !(attr->ia_valid & ATTR_FILE &&
2248                                       attr->ia_file->f_flags & O_DIRECT)) {
2249                                         pgoff_t offset =
2250                                                 attr->ia_size & (PAGE_SIZE - 1);
2251
2252                                         rc = ll_io_zero_page(inode,
2253                                                     attr->ia_size >> PAGE_SHIFT,
2254                                                     offset, PAGE_SIZE - offset);
2255                                         if (rc)
2256                                                 GOTO(out, rc);
2257                                 }
2258                                 /* If encrypted volatile file without the key,
2259                                  * we need to fetch size from reference file,
2260                                  * and set it on OST objects. This happens when
2261                                  * migrating or extending an encrypted file
2262                                  * without the key.
2263                                  */
2264                                 if (filename_is_volatile(dentry->d_name.name,
2265                                                          dentry->d_name.len,
2266                                                          NULL) &&
2267                                     llcrypt_require_key(inode) == -ENOKEY) {
2268                                         struct file *ref_file;
2269                                         struct inode *ref_inode;
2270                                         struct ll_inode_info *ref_lli;
2271                                         struct cl_object *ref_obj;
2272                                         struct cl_attr ref_attr = { 0 };
2273                                         struct lu_env *env;
2274                                         __u16 refcheck;
2275
2276                                         rc = volatile_ref_file(
2277                                                 dentry->d_name.name,
2278                                                 dentry->d_name.len,
2279                                                 &ref_file);
2280                                         if (rc)
2281                                                 GOTO(out, rc);
2282
2283                                         ref_inode = file_inode(ref_file);
2284                                         if (!ref_inode) {
2285                                                 fput(ref_file);
2286                                                 GOTO(out, rc = -EINVAL);
2287                                         }
2288
2289                                         env = cl_env_get(&refcheck);
2290                                         if (IS_ERR(env))
2291                                                 GOTO(out, rc = PTR_ERR(env));
2292
2293                                         ref_lli = ll_i2info(ref_inode);
2294                                         ref_obj = ref_lli->lli_clob;
2295                                         cl_object_attr_lock(ref_obj);
2296                                         rc = cl_object_attr_get(env, ref_obj,
2297                                                                 &ref_attr);
2298                                         cl_object_attr_unlock(ref_obj);
2299                                         cl_env_put(env, &refcheck);
2300                                         fput(ref_file);
2301                                         if (rc)
2302                                                 GOTO(out, rc);
2303
2304                                         attr->ia_valid |= ATTR_SIZE;
2305                                         attr->ia_size = ref_attr.cat_size;
2306                                 }
2307                         }
2308                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2309                 }
2310         }
2311
2312         /* If the file was restored, it needs to set dirty flag.
2313          *
2314          * We've already sent MDS_DATA_MODIFIED flag in
2315          * ll_md_setattr() for truncate. However, the MDT refuses to
2316          * set the HS_DIRTY flag on released files, so we have to set
2317          * it again if the file has been restored. Please check how
2318          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2319          *
2320          * Please notice that if the file is not released, the previous
2321          * MDS_DATA_MODIFIED has taken effect and usually
2322          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2323          * This way we can save an RPC for common open + trunc
2324          * operation. */
2325         if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
2326                 struct hsm_state_set hss = {
2327                         .hss_valid = HSS_SETMASK,
2328                         .hss_setmask = HS_DIRTY,
2329                 };
2330                 int rc2;
2331
2332                 rc2 = ll_hsm_state_set(inode, &hss);
2333                 /* truncate and write can happen at the same time, so that
2334                  * the file can be set modified even though the file is not
2335                  * restored from released state, and ll_hsm_state_set() is
2336                  * not applicable for the file, and rc2 < 0 is normal in this
2337                  * case. */
2338                 if (rc2 < 0)
2339                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2340                                PFID(ll_inode2fid(inode)), rc2);
2341         }
2342
2343         EXIT;
2344 out:
2345         if (op_data != NULL)
2346                 ll_finish_md_op_data(op_data);
2347
2348         if (S_ISREG(inode->i_mode)) {
2349                 inode_lock(inode);
2350                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2351                         inode_dio_wait(inode);
2352                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2353                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2354                  * inode flags, so there is a gap where S_NOSEC is not set.
2355                  * This can cause a writer to take the i_mutex unnecessarily,
2356                  * but this is safe to do and should be rare. */
2357                 inode_has_no_xattr(inode);
2358         }
2359
2360         if (!rc)
2361                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2362                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2363                                    ktime_us_delta(ktime_get(), kstart));
2364
2365         RETURN(rc);
2366 }
2367
2368 int ll_setattr(struct user_namespace *mnt_userns, struct dentry *de,
2369                struct iattr *attr)
2370 {
2371         int mode = de->d_inode->i_mode;
2372         enum op_xvalid xvalid = 0;
2373         int rc;
2374
2375         rc = llcrypt_prepare_setattr(de, attr);
2376         if (rc)
2377                 return rc;
2378
2379         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2380                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2381                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2382
2383         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2384                                (ATTR_SIZE|ATTR_MODE)) &&
2385             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2386              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2387               !(attr->ia_mode & S_ISGID))))
2388                 attr->ia_valid |= ATTR_FORCE;
2389
2390         if ((attr->ia_valid & ATTR_MODE) &&
2391             (mode & S_ISUID) &&
2392             !(attr->ia_mode & S_ISUID) &&
2393             !(attr->ia_valid & ATTR_KILL_SUID))
2394                 attr->ia_valid |= ATTR_KILL_SUID;
2395
2396         if ((attr->ia_valid & ATTR_MODE) &&
2397             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2398             !(attr->ia_mode & S_ISGID) &&
2399             !(attr->ia_valid & ATTR_KILL_SGID))
2400                 attr->ia_valid |= ATTR_KILL_SGID;
2401
2402         return ll_setattr_raw(de, attr, xvalid, false);
2403 }
2404
2405 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2406                        u32 flags)
2407 {
2408         struct obd_statfs obd_osfs = { 0 };
2409         time64_t max_age;
2410         int rc;
2411
2412         ENTRY;
2413         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2414
2415         if (test_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags))
2416                 flags |= OBD_STATFS_NODELAY;
2417
2418         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2419         if (rc)
2420                 RETURN(rc);
2421
2422         osfs->os_type = LL_SUPER_MAGIC;
2423
2424         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2425               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2426
2427         if (osfs->os_state & OS_STATFS_SUM)
2428                 GOTO(out, rc);
2429
2430         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2431         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2432                 GOTO(out, rc = 0);
2433
2434         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2435                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2436                obd_osfs.os_files);
2437
2438         osfs->os_bsize = obd_osfs.os_bsize;
2439         osfs->os_blocks = obd_osfs.os_blocks;
2440         osfs->os_bfree = obd_osfs.os_bfree;
2441         osfs->os_bavail = obd_osfs.os_bavail;
2442
2443         /* If we have _some_ OSTs, but don't have as many free objects on the
2444          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2445          * to compensate, so that the "inodes in use" number is correct.
2446          * This should be kept in sync with lod_statfs() behaviour.
2447          */
2448         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2449                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2450                                  obd_osfs.os_ffree;
2451                 osfs->os_ffree = obd_osfs.os_ffree;
2452         }
2453
2454 out:
2455         RETURN(rc);
2456 }
2457
2458 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2459 {
2460         struct if_quotactl qctl = {
2461                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2462                 .qc_type = PRJQUOTA,
2463                 .qc_valid = QC_GENERAL,
2464         };
2465         u64 limit, curblock;
2466         int ret;
2467
2468         qctl.qc_id = ll_i2info(inode)->lli_projid;
2469         ret = quotactl_ioctl(inode->i_sb, &qctl);
2470         if (ret) {
2471                 /* ignore errors if project ID does not have
2472                  * a quota limit or feature unsupported.
2473                  */
2474                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2475                         ret = 0;
2476                 return ret;
2477         }
2478
2479         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2480                  qctl.qc_dqblk.dqb_bsoftlimit :
2481                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2482         if (limit && sfs->f_blocks > limit) {
2483                 curblock = (qctl.qc_dqblk.dqb_curspace +
2484                                 sfs->f_bsize - 1) / sfs->f_bsize;
2485                 sfs->f_blocks = limit;
2486                 sfs->f_bfree = sfs->f_bavail =
2487                         (sfs->f_blocks > curblock) ?
2488                         (sfs->f_blocks - curblock) : 0;
2489         }
2490
2491         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2492                 qctl.qc_dqblk.dqb_isoftlimit :
2493                 qctl.qc_dqblk.dqb_ihardlimit;
2494         if (limit && sfs->f_files > limit) {
2495                 sfs->f_files = limit;
2496                 sfs->f_ffree = (sfs->f_files >
2497                         qctl.qc_dqblk.dqb_curinodes) ?
2498                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2499         }
2500
2501         return 0;
2502 }
2503
2504 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2505 {
2506         struct super_block *sb = de->d_sb;
2507         struct obd_statfs osfs;
2508         __u64 fsid = huge_encode_dev(sb->s_dev);
2509         ktime_t kstart = ktime_get();
2510         int rc;
2511
2512         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2513
2514         /* Some amount of caching on the client is allowed */
2515         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2516         if (rc)
2517                 return rc;
2518
2519         statfs_unpack(sfs, &osfs);
2520
2521         /* We need to downshift for all 32-bit kernels, because we can't
2522          * tell if the kernel is being called via sys_statfs64() or not.
2523          * Stop before overflowing f_bsize - in which case it is better
2524          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2525         if (sizeof(long) < 8) {
2526                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2527                         sfs->f_bsize <<= 1;
2528
2529                         osfs.os_blocks >>= 1;
2530                         osfs.os_bfree >>= 1;
2531                         osfs.os_bavail >>= 1;
2532                 }
2533         }
2534
2535         sfs->f_blocks = osfs.os_blocks;
2536         sfs->f_bfree = osfs.os_bfree;
2537         sfs->f_bavail = osfs.os_bavail;
2538         sfs->f_fsid.val[0] = (__u32)fsid;
2539         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2540         if (ll_i2info(de->d_inode)->lli_projid)
2541                 return ll_statfs_project(de->d_inode, sfs);
2542
2543         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2544                            ktime_us_delta(ktime_get(), kstart));
2545
2546         return 0;
2547 }
2548
2549 void ll_inode_size_lock(struct inode *inode)
2550 {
2551         struct ll_inode_info *lli;
2552
2553         LASSERT(!S_ISDIR(inode->i_mode));
2554
2555         lli = ll_i2info(inode);
2556         mutex_lock(&lli->lli_size_mutex);
2557 }
2558
2559 void ll_inode_size_unlock(struct inode *inode)
2560 {
2561         struct ll_inode_info *lli;
2562
2563         lli = ll_i2info(inode);
2564         mutex_unlock(&lli->lli_size_mutex);
2565 }
2566
2567 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
2568 {
2569         /* do not clear encryption flag */
2570         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2571         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2572         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2573                 set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2574         else
2575                 clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2576 }
2577
2578 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2579 {
2580         struct ll_inode_info *lli = ll_i2info(inode);
2581         struct mdt_body *body = md->body;
2582         struct ll_sb_info *sbi = ll_i2sbi(inode);
2583         bool api32;
2584         int rc = 0;
2585
2586         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2587                 rc = cl_file_inode_init(inode, md);
2588                 if (rc)
2589                         return rc;
2590         }
2591
2592         if (S_ISDIR(inode->i_mode)) {
2593                 rc = ll_update_lsm_md(inode, md);
2594                 if (rc != 0)
2595                         return rc;
2596         }
2597
2598         if (body->mbo_valid & OBD_MD_FLACL)
2599                 lli_replace_acl(lli, md);
2600
2601         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
2602         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1, api32);
2603         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2604
2605         if (body->mbo_valid & OBD_MD_FLATIME) {
2606                 if (body->mbo_atime > inode->i_atime.tv_sec)
2607                         inode->i_atime.tv_sec = body->mbo_atime;
2608                 lli->lli_atime = body->mbo_atime;
2609         }
2610
2611         if (body->mbo_valid & OBD_MD_FLMTIME) {
2612                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2613                         CDEBUG(D_INODE,
2614                                "setting ino %lu mtime from %lld to %llu\n",
2615                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2616                                body->mbo_mtime);
2617                         inode->i_mtime.tv_sec = body->mbo_mtime;
2618                 }
2619                 lli->lli_mtime = body->mbo_mtime;
2620         }
2621
2622         if (body->mbo_valid & OBD_MD_FLCTIME) {
2623                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2624                         inode->i_ctime.tv_sec = body->mbo_ctime;
2625                 lli->lli_ctime = body->mbo_ctime;
2626         }
2627
2628         if (body->mbo_valid & OBD_MD_FLBTIME)
2629                 lli->lli_btime = body->mbo_btime;
2630
2631         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2632         if (body->mbo_valid & OBD_MD_FLFLAGS)
2633                 ll_update_inode_flags(inode, body->mbo_flags);
2634         if (body->mbo_valid & OBD_MD_FLMODE)
2635                 inode->i_mode = (inode->i_mode & S_IFMT) |
2636                                 (body->mbo_mode & ~S_IFMT);
2637
2638         if (body->mbo_valid & OBD_MD_FLTYPE)
2639                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2640                                 (body->mbo_mode & S_IFMT);
2641
2642         LASSERT(inode->i_mode != 0);
2643         if (body->mbo_valid & OBD_MD_FLUID)
2644                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2645         if (body->mbo_valid & OBD_MD_FLGID)
2646                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2647         if (body->mbo_valid & OBD_MD_FLPROJID)
2648                 lli->lli_projid = body->mbo_projid;
2649         if (body->mbo_valid & OBD_MD_FLNLINK) {
2650                 spin_lock(&inode->i_lock);
2651                 set_nlink(inode, body->mbo_nlink);
2652                 spin_unlock(&inode->i_lock);
2653         }
2654         if (body->mbo_valid & OBD_MD_FLRDEV)
2655                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2656
2657         if (body->mbo_valid & OBD_MD_FLID) {
2658                 /* FID shouldn't be changed! */
2659                 if (fid_is_sane(&lli->lli_fid)) {
2660                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2661                                  "Trying to change FID "DFID
2662                                  " to the "DFID", inode "DFID"(%p)\n",
2663                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2664                                  PFID(ll_inode2fid(inode)), inode);
2665                 } else {
2666                         lli->lli_fid = body->mbo_fid1;
2667                 }
2668         }
2669
2670         LASSERT(fid_seq(&lli->lli_fid) != 0);
2671
2672         /* In case of encrypted file without the key, please do not lose
2673          * clear text size stored into lli_lazysize in ll_merge_attr(),
2674          * we will need it in ll_prepare_close().
2675          */
2676         if (lli->lli_attr_valid & OBD_MD_FLLAZYSIZE && lli->lli_lazysize &&
2677             llcrypt_require_key(inode) == -ENOKEY)
2678                 lli->lli_attr_valid = body->mbo_valid | OBD_MD_FLLAZYSIZE;
2679         else
2680                 lli->lli_attr_valid = body->mbo_valid;
2681         if (body->mbo_valid & OBD_MD_FLSIZE) {
2682                 i_size_write(inode, body->mbo_size);
2683
2684                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2685                        PFID(ll_inode2fid(inode)),
2686                        (unsigned long long)body->mbo_size);
2687
2688                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2689                         inode->i_blocks = body->mbo_blocks;
2690         } else {
2691                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2692                         lli->lli_lazysize = body->mbo_size;
2693                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2694                         lli->lli_lazyblocks = body->mbo_blocks;
2695         }
2696
2697         if (body->mbo_valid & OBD_MD_TSTATE) {
2698                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2699                  * clear it when done to ensure to start again
2700                  * glimpsing updated attrs
2701                  */
2702                 if (body->mbo_t_state & MS_RESTORE)
2703                         set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2704                 else
2705                         clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2706         }
2707
2708         return 0;
2709 }
2710
2711 /* update directory depth to ROOT, called after LOOKUP lock is fetched. */
2712 void ll_update_dir_depth(struct inode *dir, struct inode *inode)
2713 {
2714         struct ll_inode_info *lli;
2715
2716         if (!S_ISDIR(inode->i_mode))
2717                 return;
2718
2719         if (inode == dir)
2720                 return;
2721
2722         lli = ll_i2info(inode);
2723         lli->lli_dir_depth = ll_i2info(dir)->lli_dir_depth + 1;
2724         CDEBUG(D_INODE, DFID" depth %hu\n",
2725                PFID(&lli->lli_fid), lli->lli_dir_depth);
2726 }
2727
2728 void ll_truncate_inode_pages_final(struct inode *inode)
2729 {
2730         struct address_space *mapping = &inode->i_data;
2731         unsigned long nrpages;
2732         unsigned long flags;
2733
2734         truncate_inode_pages_final(mapping);
2735
2736         /* Workaround for LU-118: Note nrpages may not be totally updated when
2737          * truncate_inode_pages() returns, as there can be a page in the process
2738          * of deletion (inside __delete_from_page_cache()) in the specified
2739          * range. Thus mapping->nrpages can be non-zero when this function
2740          * returns even after truncation of the whole mapping.  Only do this if
2741          * npages isn't already zero.
2742          */
2743         nrpages = mapping->nrpages;
2744         if (nrpages) {
2745                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2746                 nrpages = mapping->nrpages;
2747                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2748         } /* Workaround end */
2749
2750         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2751                  "see https://jira.whamcloud.com/browse/LU-118\n",
2752                  ll_i2sbi(inode)->ll_fsname,
2753                  PFID(ll_inode2fid(inode)), inode, nrpages);
2754 }
2755
2756 int ll_read_inode2(struct inode *inode, void *opaque)
2757 {
2758         struct lustre_md *md = opaque;
2759         struct ll_inode_info *lli = ll_i2info(inode);
2760         int     rc;
2761         ENTRY;
2762
2763         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2764                PFID(&lli->lli_fid), inode);
2765
2766         /* Core attributes from the MDS first.  This is a new inode, and
2767          * the VFS doesn't zero times in the core inode so we have to do
2768          * it ourselves.  They will be overwritten by either MDS or OST
2769          * attributes - we just need to make sure they aren't newer.
2770          */
2771         inode->i_mtime.tv_sec = 0;
2772         inode->i_atime.tv_sec = 0;
2773         inode->i_ctime.tv_sec = 0;
2774         inode->i_rdev = 0;
2775         rc = ll_update_inode(inode, md);
2776         if (rc != 0)
2777                 RETURN(rc);
2778
2779         /* OIDEBUG(inode); */
2780
2781 #ifdef HAVE_BACKING_DEV_INFO
2782         /* initializing backing dev info. */
2783         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2784 #endif
2785         if (S_ISREG(inode->i_mode)) {
2786                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2787                 inode->i_op = &ll_file_inode_operations;
2788                 inode->i_fop = sbi->ll_fop;
2789                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2790                 EXIT;
2791         } else if (S_ISDIR(inode->i_mode)) {
2792                 inode->i_op = &ll_dir_inode_operations;
2793                 inode->i_fop = &ll_dir_operations;
2794                 EXIT;
2795         } else if (S_ISLNK(inode->i_mode)) {
2796                 inode->i_op = &ll_fast_symlink_inode_operations;
2797                 EXIT;
2798         } else {
2799                 inode->i_op = &ll_special_inode_operations;
2800
2801                 init_special_inode(inode, inode->i_mode,
2802                                    inode->i_rdev);
2803
2804                 EXIT;
2805         }
2806
2807         return 0;
2808 }
2809
2810 void ll_delete_inode(struct inode *inode)
2811 {
2812         struct ll_inode_info *lli = ll_i2info(inode);
2813         ENTRY;
2814
2815         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2816                 /* It is last chance to write out dirty pages,
2817                  * otherwise we may lose data while umount.
2818                  *
2819                  * If i_nlink is 0 then just discard data. This is safe because
2820                  * local inode gets i_nlink 0 from server only for the last
2821                  * unlink, so that file is not opened somewhere else
2822                  */
2823                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2824                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2825         }
2826
2827         ll_truncate_inode_pages_final(inode);
2828         ll_clear_inode(inode);
2829         clear_inode(inode);
2830
2831         EXIT;
2832 }
2833
2834 int ll_iocontrol(struct inode *inode, struct file *file,
2835                  unsigned int cmd, unsigned long arg)
2836 {
2837         struct ll_sb_info *sbi = ll_i2sbi(inode);
2838         struct ptlrpc_request *req = NULL;
2839         int rc, flags = 0;
2840         ENTRY;
2841
2842         switch (cmd) {
2843         case FS_IOC_GETFLAGS: {
2844                 struct mdt_body *body;
2845                 struct md_op_data *op_data;
2846
2847                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2848                                              0, 0, LUSTRE_OPC_ANY,
2849                                              NULL);
2850                 if (IS_ERR(op_data))
2851                         RETURN(PTR_ERR(op_data));
2852
2853                 op_data->op_valid = OBD_MD_FLFLAGS;
2854                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2855                 ll_finish_md_op_data(op_data);
2856                 if (rc) {
2857                         CERROR("%s: failure inode "DFID": rc = %d\n",
2858                                sbi->ll_md_exp->exp_obd->obd_name,
2859                                PFID(ll_inode2fid(inode)), rc);
2860                         RETURN(-abs(rc));
2861                 }
2862
2863                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2864
2865                 flags = body->mbo_flags;
2866
2867                 ptlrpc_req_finished(req);
2868
2869                 RETURN(put_user(flags, (int __user *)arg));
2870         }
2871         case FS_IOC_SETFLAGS: {
2872                 struct iattr *attr;
2873                 struct md_op_data *op_data;
2874                 struct cl_object *obj;
2875                 struct fsxattr fa = { 0 };
2876
2877                 if (get_user(flags, (int __user *)arg))
2878                         RETURN(-EFAULT);
2879
2880                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2881                 if (flags & LUSTRE_PROJINHERIT_FL)
2882                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2883
2884                 rc = ll_ioctl_check_project(inode, fa.fsx_xflags,
2885                                             fa.fsx_projid);
2886                 if (rc)
2887                         RETURN(rc);
2888
2889                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2890                                              LUSTRE_OPC_ANY, NULL);
2891                 if (IS_ERR(op_data))
2892                         RETURN(PTR_ERR(op_data));
2893
2894                 op_data->op_attr_flags = flags;
2895                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2896                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2897                 ll_finish_md_op_data(op_data);
2898                 ptlrpc_req_finished(req);
2899                 if (rc)
2900                         RETURN(rc);
2901
2902                 ll_update_inode_flags(inode, flags);
2903
2904                 obj = ll_i2info(inode)->lli_clob;
2905                 if (obj == NULL)
2906                         RETURN(0);
2907
2908                 OBD_ALLOC_PTR(attr);
2909                 if (attr == NULL)
2910                         RETURN(-ENOMEM);
2911
2912                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2913
2914                 OBD_FREE_PTR(attr);
2915                 RETURN(rc);
2916         }
2917         default:
2918                 RETURN(-ENOSYS);
2919         }
2920
2921         RETURN(0);
2922 }
2923
2924 int ll_flush_ctx(struct inode *inode)
2925 {
2926         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2927
2928         CDEBUG(D_SEC, "flush context for user %d\n",
2929                from_kuid(&init_user_ns, current_uid()));
2930
2931         obd_set_info_async(NULL, sbi->ll_md_exp,
2932                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2933                            0, NULL, NULL);
2934         obd_set_info_async(NULL, sbi->ll_dt_exp,
2935                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2936                            0, NULL, NULL);
2937         return 0;
2938 }
2939
2940 /* umount -f client means force down, don't save state */
2941 void ll_umount_begin(struct super_block *sb)
2942 {
2943         struct ll_sb_info *sbi = ll_s2sbi(sb);
2944         struct obd_device *obd;
2945         struct obd_ioctl_data *ioc_data;
2946         int cnt;
2947         ENTRY;
2948
2949         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2950                sb->s_count, atomic_read(&sb->s_active));
2951
2952         obd = class_exp2obd(sbi->ll_md_exp);
2953         if (obd == NULL) {
2954                 CERROR("Invalid MDC connection handle %#llx\n",
2955                        sbi->ll_md_exp->exp_handle.h_cookie);
2956                 EXIT;
2957                 return;
2958         }
2959         obd->obd_force = 1;
2960
2961         obd = class_exp2obd(sbi->ll_dt_exp);
2962         if (obd == NULL) {
2963                 CERROR("Invalid LOV connection handle %#llx\n",
2964                        sbi->ll_dt_exp->exp_handle.h_cookie);
2965                 EXIT;
2966                 return;
2967         }
2968         obd->obd_force = 1;
2969
2970         OBD_ALLOC_PTR(ioc_data);
2971         if (ioc_data) {
2972                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2973                               sizeof *ioc_data, ioc_data, NULL);
2974
2975                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2976                               sizeof *ioc_data, ioc_data, NULL);
2977
2978                 OBD_FREE_PTR(ioc_data);
2979         }
2980
2981         /* Really, we'd like to wait until there are no requests outstanding,
2982          * and then continue.  For now, we just periodically checking for vfs
2983          * to decrement mnt_cnt and hope to finish it within 10sec.
2984          */
2985         cnt = 10;
2986         while (cnt > 0 &&
2987                !may_umount(sbi->ll_mnt.mnt)) {
2988                 ssleep(1);
2989                 cnt -= 1;
2990         }
2991
2992         EXIT;
2993 }
2994
2995 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2996 {
2997         struct ll_sb_info *sbi = ll_s2sbi(sb);
2998         char *profilenm = get_profile_name(sb);
2999         int err;
3000         __u32 read_only;
3001
3002         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
3003                 read_only = *flags & MS_RDONLY;
3004                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
3005                                          sizeof(KEY_READ_ONLY),
3006                                          KEY_READ_ONLY, sizeof(read_only),
3007                                          &read_only, NULL);
3008                 if (err) {
3009                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
3010                                       profilenm, read_only ?
3011                                       "read-only" : "read-write", err);
3012                         return err;
3013                 }
3014
3015                 if (read_only)
3016                         sb->s_flags |= SB_RDONLY;
3017                 else
3018                         sb->s_flags &= ~SB_RDONLY;
3019
3020                 if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
3021                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
3022                                       read_only ?  "read-only" : "read-write");
3023         }
3024         return 0;
3025 }
3026
3027 /**
3028  * Cleanup the open handle that is cached on MDT-side.
3029  *
3030  * For open case, the client side open handling thread may hit error
3031  * after the MDT grant the open. Under such case, the client should
3032  * send close RPC to the MDT as cleanup; otherwise, the open handle
3033  * on the MDT will be leaked there until the client umount or evicted.
3034  *
3035  * In further, if someone unlinked the file, because the open handle
3036  * holds the reference on such file/object, then it will block the
3037  * subsequent threads that want to locate such object via FID.
3038  *
3039  * \param[in] sb        super block for this file-system
3040  * \param[in] open_req  pointer to the original open request
3041  */
3042 void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill)
3043 {
3044         struct mdt_body                 *body;
3045         struct md_op_data               *op_data;
3046         struct ptlrpc_request           *close_req = NULL;
3047         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
3048         ENTRY;
3049
3050         body = req_capsule_server_get(pill, &RMF_MDT_BODY);
3051         OBD_ALLOC_PTR(op_data);
3052         if (op_data == NULL) {
3053                 CWARN("%s: cannot allocate op_data to release open handle for "
3054                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
3055
3056                 RETURN_EXIT;
3057         }
3058
3059         op_data->op_fid1 = body->mbo_fid1;
3060         op_data->op_open_handle = body->mbo_open_handle;
3061         op_data->op_mod_time = ktime_get_real_seconds();
3062         md_close(exp, op_data, NULL, &close_req);
3063         ptlrpc_req_finished(close_req);
3064         ll_finish_md_op_data(op_data);
3065
3066         EXIT;
3067 }
3068
3069 int ll_prep_inode(struct inode **inode, struct req_capsule *pill,
3070                   struct super_block *sb, struct lookup_intent *it)
3071 {
3072         struct ll_sb_info *sbi = NULL;
3073         struct lustre_md md = { NULL };
3074         bool default_lmv_deleted = false;
3075         int rc;
3076
3077         ENTRY;
3078
3079         LASSERT(*inode || sb);
3080         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
3081         rc = md_get_lustre_md(sbi->ll_md_exp, pill, sbi->ll_dt_exp,
3082                               sbi->ll_md_exp, &md);
3083         if (rc != 0)
3084                 GOTO(out, rc);
3085
3086         /*
3087          * clear default_lmv only if intent_getattr reply doesn't contain it.
3088          * but it needs to be done after iget, check this early because
3089          * ll_update_lsm_md() may change md.
3090          */
3091         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
3092             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
3093                 default_lmv_deleted = true;
3094
3095         if (*inode) {
3096                 rc = ll_update_inode(*inode, &md);
3097                 if (rc != 0)
3098                         GOTO(out, rc);
3099         } else {
3100                 bool api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
3101                 struct lu_fid *fid1 = &md.body->mbo_fid1;
3102
3103                 LASSERT(sb != NULL);
3104
3105                 /*
3106                  * At this point server returns to client's same fid as client
3107                  * generated for creating. So using ->fid1 is okay here.
3108                  */
3109                 if (!fid_is_sane(fid1)) {
3110                         CERROR("%s: Fid is insane "DFID"\n",
3111                                 sbi->ll_fsname, PFID(fid1));
3112                         GOTO(out, rc = -EINVAL);
3113                 }
3114
3115                 *inode = ll_iget(sb, cl_fid_build_ino(fid1, api32), &md);
3116                 if (IS_ERR(*inode)) {
3117                         lmd_clear_acl(&md);
3118                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
3119                         *inode = NULL;
3120                         CERROR("new_inode -fatal: rc %d\n", rc);
3121                         GOTO(out, rc);
3122                 }
3123         }
3124
3125         /* Handling piggyback layout lock.
3126          * Layout lock can be piggybacked by getattr and open request.
3127          * The lsm can be applied to inode only if it comes with a layout lock
3128          * otherwise correct layout may be overwritten, for example:
3129          * 1. proc1: mdt returns a lsm but not granting layout
3130          * 2. layout was changed by another client
3131          * 3. proc2: refresh layout and layout lock granted
3132          * 4. proc1: to apply a stale layout */
3133         if (it != NULL && it->it_lock_mode != 0) {
3134                 struct lustre_handle lockh;
3135                 struct ldlm_lock *lock;
3136
3137                 lockh.cookie = it->it_lock_handle;
3138                 lock = ldlm_handle2lock(&lockh);
3139                 LASSERT(lock != NULL);
3140                 if (ldlm_has_layout(lock)) {
3141                         struct cl_object_conf conf;
3142
3143                         memset(&conf, 0, sizeof(conf));
3144                         conf.coc_opc = OBJECT_CONF_SET;
3145                         conf.coc_inode = *inode;
3146                         conf.coc_lock = lock;
3147                         conf.u.coc_layout = md.layout;
3148                         (void)ll_layout_conf(*inode, &conf);
3149                 }
3150                 LDLM_LOCK_PUT(lock);
3151         }
3152
3153         if (default_lmv_deleted)
3154                 ll_update_default_lsm_md(*inode, &md);
3155
3156         /* we may want to apply some policy for foreign file/dir */
3157         if (ll_sbi_has_foreign_symlink(sbi)) {
3158                 rc = ll_manage_foreign(*inode, &md);
3159                 if (rc < 0)
3160                         GOTO(out, rc);
3161         }
3162
3163         GOTO(out, rc = 0);
3164
3165 out:
3166         /* cleanup will be done if necessary */
3167         md_free_lustre_md(sbi->ll_md_exp, &md);
3168
3169         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
3170                 ll_intent_drop_lock(it);
3171                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, pill);
3172         }
3173
3174         return rc;
3175 }
3176
3177 int ll_obd_statfs(struct inode *inode, void __user *arg)
3178 {
3179         struct ll_sb_info *sbi = NULL;
3180         struct obd_export *exp;
3181         struct obd_ioctl_data *data = NULL;
3182         __u32 type;
3183         int len = 0, rc;
3184
3185         if (inode)
3186                 sbi = ll_i2sbi(inode);
3187         if (!sbi)
3188                 GOTO(out_statfs, rc = -EINVAL);
3189
3190         rc = obd_ioctl_getdata(&data, &len, arg);
3191         if (rc)
3192                 GOTO(out_statfs, rc);
3193
3194         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3195             !data->ioc_pbuf1 || !data->ioc_pbuf2)
3196                 GOTO(out_statfs, rc = -EINVAL);
3197
3198         if (data->ioc_inllen1 != sizeof(__u32) ||
3199             data->ioc_inllen2 != sizeof(__u32) ||
3200             data->ioc_plen1 != sizeof(struct obd_statfs) ||
3201             data->ioc_plen2 != sizeof(struct obd_uuid))
3202                 GOTO(out_statfs, rc = -EINVAL);
3203
3204         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3205         if (type & LL_STATFS_LMV)
3206                 exp = sbi->ll_md_exp;
3207         else if (type & LL_STATFS_LOV)
3208                 exp = sbi->ll_dt_exp;
3209         else
3210                 GOTO(out_statfs, rc = -ENODEV);
3211
3212         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3213         if (rc)
3214                 GOTO(out_statfs, rc);
3215 out_statfs:
3216         OBD_FREE_LARGE(data, len);
3217         return rc;
3218 }
3219
3220 /*
3221  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3222  * be called early to avoid deadlock.
3223  */
3224 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3225 {
3226         if (op_data->op_mea2_sem) {
3227                 up_read_non_owner(op_data->op_mea2_sem);
3228                 op_data->op_mea2_sem = NULL;
3229         }
3230
3231         if (op_data->op_mea1_sem) {
3232                 up_read_non_owner(op_data->op_mea1_sem);
3233                 op_data->op_mea1_sem = NULL;
3234         }
3235 }
3236
3237 /* this function prepares md_op_data hint for passing it down to MD stack. */
3238 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3239                                       struct inode *i1, struct inode *i2,
3240                                       const char *name, size_t namelen,
3241                                       __u32 mode, enum md_op_code opc,
3242                                       void *data)
3243 {
3244         struct llcrypt_name fname = { 0 };
3245         int rc;
3246
3247         LASSERT(i1 != NULL);
3248
3249         if (name == NULL) {
3250                 /* Do not reuse namelen for something else. */
3251                 if (namelen != 0)
3252                         return ERR_PTR(-EINVAL);
3253         } else {
3254                 if ((!IS_ENCRYPTED(i1) ||
3255                      (opc != LUSTRE_OPC_LOOKUP && opc != LUSTRE_OPC_CREATE)) &&
3256                     namelen > ll_i2sbi(i1)->ll_namelen)
3257                         return ERR_PTR(-ENAMETOOLONG);
3258
3259                 /* "/" is not valid name, but it's allowed */
3260                 if (!lu_name_is_valid_2(name, namelen) &&
3261                     strncmp("/", name, namelen) != 0)
3262                         return ERR_PTR(-EINVAL);
3263         }
3264
3265         if (op_data == NULL)
3266                 OBD_ALLOC_PTR(op_data);
3267
3268         if (op_data == NULL)
3269                 return ERR_PTR(-ENOMEM);
3270
3271         ll_i2gids(op_data->op_suppgids, i1, i2);
3272         /* If the client is using a subdir mount and looks at what it sees as
3273          * /.fscrypt, interpret it as the .fscrypt dir at the root of the fs.
3274          */
3275         if (unlikely(i1->i_sb && i1->i_sb->s_root && is_root_inode(i1) &&
3276                      !fid_is_root(ll_inode2fid(i1)) &&
3277                      name && namelen == strlen(dot_fscrypt_name) &&
3278                      strncmp(name, dot_fscrypt_name, namelen) == 0))
3279                 lu_root_fid(&op_data->op_fid1);
3280         else
3281                 op_data->op_fid1 = *ll_inode2fid(i1);
3282
3283         if (S_ISDIR(i1->i_mode)) {
3284                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3285                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3286                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3287                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3288         }
3289
3290         if (i2) {
3291                 op_data->op_fid2 = *ll_inode2fid(i2);
3292                 if (S_ISDIR(i2->i_mode)) {
3293                         if (i2 != i1) {
3294                                 /* i2 is typically a child of i1, and MUST be
3295                                  * further from the root to avoid deadlocks.
3296                                  */
3297                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3298                                 op_data->op_mea2_sem =
3299                                                 &ll_i2info(i2)->lli_lsm_sem;
3300                         }
3301                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3302                 }
3303         } else {
3304                 fid_zero(&op_data->op_fid2);
3305         }
3306
3307         if (test_bit(LL_SBI_64BIT_HASH, ll_i2sbi(i1)->ll_flags))
3308                 op_data->op_cli_flags |= CLI_HASH64;
3309
3310         if (ll_need_32bit_api(ll_i2sbi(i1)))
3311                 op_data->op_cli_flags |= CLI_API32;
3312
3313         if ((i2 && is_root_inode(i2)) ||
3314             opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_CREATE) {
3315                 /* In case of lookup, ll_setup_filename() has already been
3316                  * called in ll_lookup_it(), so just take provided name.
3317                  * Also take provided name if we are dealing with root inode.
3318                  */
3319                 fname.disk_name.name = (unsigned char *)name;
3320                 fname.disk_name.len = namelen;
3321         } else if (name && namelen) {
3322                 struct qstr dname = QSTR_INIT(name, namelen);
3323                 struct inode *dir;
3324                 struct lu_fid *pfid = NULL;
3325                 struct lu_fid fid;
3326                 int lookup;
3327
3328                 if (!S_ISDIR(i1->i_mode) && i2 && S_ISDIR(i2->i_mode)) {
3329                         /* special case when called from ll_link() */
3330                         dir = i2;
3331                         lookup = 0;
3332                 } else {
3333                         dir = i1;
3334                         lookup = (int)(opc == LUSTRE_OPC_ANY);
3335                 }
3336                 if (opc == LUSTRE_OPC_ANY && lookup)
3337                         pfid = &fid;
3338                 rc = ll_setup_filename(dir, &dname, lookup, &fname, pfid);
3339                 if (rc) {
3340                         ll_finish_md_op_data(op_data);
3341                         return ERR_PTR(rc);
3342                 }
3343                 if (pfid && !fid_is_zero(pfid)) {
3344                         if (i2 == NULL)
3345                                 op_data->op_fid2 = fid;
3346                         op_data->op_bias = MDS_FID_OP;
3347                 }
3348                 if (fname.disk_name.name &&
3349                     fname.disk_name.name != (unsigned char *)name)
3350                         /* op_data->op_name must be freed after use */
3351                         op_data->op_flags |= MF_OPNAME_KMALLOCED;
3352         }
3353
3354         /* In fact LUSTRE_OPC_LOOKUP, LUSTRE_OPC_OPEN
3355          * are LUSTRE_OPC_ANY
3356          */
3357         if (opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_OPEN)
3358                 op_data->op_code = LUSTRE_OPC_ANY;
3359         else
3360                 op_data->op_code = opc;
3361         op_data->op_name = fname.disk_name.name;
3362         op_data->op_namelen = fname.disk_name.len;
3363         op_data->op_mode = mode;
3364         op_data->op_mod_time = ktime_get_real_seconds();
3365         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3366         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3367         op_data->op_cap = current_cap();
3368         op_data->op_mds = 0;
3369         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3370              filename_is_volatile(name, namelen, &op_data->op_mds)) {
3371                 op_data->op_bias |= MDS_CREATE_VOLATILE;
3372         }
3373         op_data->op_data = data;
3374
3375         return op_data;
3376 }
3377
3378 void ll_finish_md_op_data(struct md_op_data *op_data)
3379 {
3380         ll_unlock_md_op_lsm(op_data);
3381         ll_security_release_secctx(op_data->op_file_secctx,
3382                                    op_data->op_file_secctx_size);
3383         if (op_data->op_flags & MF_OPNAME_KMALLOCED)
3384                 /* allocated via ll_setup_filename called
3385                  * from ll_prep_md_op_data
3386                  */
3387                 kfree(op_data->op_name);
3388         llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3389         OBD_FREE_PTR(op_data);
3390 }
3391
3392 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3393 {
3394         struct ll_sb_info *sbi;
3395         int i;
3396
3397         LASSERT(seq && dentry);
3398         sbi = ll_s2sbi(dentry->d_sb);
3399
3400         if (test_bit(LL_SBI_NOLCK, sbi->ll_flags))
3401                 seq_puts(seq, "nolock");
3402
3403         for (i = 1; ll_sbi_flags_name[i].token != LL_SBI_NUM_MOUNT_OPT; i++) {
3404                 /* match_table in some cases has patterns for both enabled and
3405                  * disabled cases. Ignore 'no'xxx versions if bit is set.
3406                  */
3407                 if (test_bit(ll_sbi_flags_name[i].token, sbi->ll_flags) &&
3408                     strncmp(ll_sbi_flags_name[i].pattern, "no", 2)) {
3409                         if (ll_sbi_flags_name[i].token ==
3410                             LL_SBI_FOREIGN_SYMLINK) {
3411                                 seq_show_option(seq, "foreign_symlink",
3412                                                 sbi->ll_foreign_symlink_prefix);
3413                         } else {
3414                                 seq_printf(seq, ",%s",
3415                                            ll_sbi_flags_name[i].pattern);
3416                         }
3417
3418                         /* You can have either localflock or flock but not
3419                          * both. If localflock is set don't print flock or
3420                          * noflock.
3421                          */
3422                         if (ll_sbi_flags_name[i].token == LL_SBI_LOCALFLOCK)
3423                                 i += 2;
3424                 } else if (!test_bit(ll_sbi_flags_name[i].token, sbi->ll_flags) &&
3425                            !strncmp(ll_sbi_flags_name[i].pattern, "no", 2)) {
3426                         seq_printf(seq, ",%s",
3427                                    ll_sbi_flags_name[i].pattern);
3428                 }
3429         }
3430
3431         RETURN(0);
3432 }
3433
3434 /**
3435  * Get obd name by cmd, and copy out to user space
3436  */
3437 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3438 {
3439         struct ll_sb_info *sbi = ll_i2sbi(inode);
3440         struct obd_device *obd;
3441         ENTRY;
3442
3443         if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
3444                 obd = class_exp2obd(sbi->ll_dt_exp);
3445         else if (cmd == OBD_IOC_GETMDNAME)
3446                 obd = class_exp2obd(sbi->ll_md_exp);
3447         else
3448                 RETURN(-EINVAL);
3449
3450         if (!obd)
3451                 RETURN(-ENOENT);
3452
3453         if (copy_to_user((void __user *)arg, obd->obd_name,
3454                          strlen(obd->obd_name) + 1))
3455                 RETURN(-EFAULT);
3456
3457         RETURN(0);
3458 }
3459
3460 struct dname_buf {
3461         struct work_struct db_work;
3462         struct dentry *db_dentry;
3463         /* Let's hope the path is not too long, 32 bytes for the work struct
3464          * on my kernel
3465          */
3466         char buf[PAGE_SIZE - sizeof(struct work_struct) - sizeof(void *)];
3467 };
3468
3469 static void ll_dput_later(struct work_struct *work)
3470 {
3471         struct dname_buf *db = container_of(work, struct dname_buf, db_work);
3472
3473         dput(db->db_dentry);
3474         free_page((unsigned long)db);
3475 }
3476
3477 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3478 {
3479         char *path = NULL;
3480
3481         struct path p;
3482
3483         p.dentry = dentry;
3484         p.mnt = current->fs->root.mnt;
3485         path_get(&p);
3486         path = d_path(&p, buf, bufsize);
3487         path_put(&p);
3488         return path;
3489 }
3490
3491 void ll_dirty_page_discard_warn(struct inode *inode, int ioret)
3492 {
3493         struct dname_buf *db;
3494         char  *path = NULL;
3495         struct dentry *dentry = NULL;
3496
3497         /* this can be called inside spin lock so use GFP_ATOMIC. */
3498         db = (struct dname_buf *)__get_free_page(GFP_ATOMIC);
3499         if (db != NULL) {
3500
3501                 dentry = d_find_alias(inode);
3502                 if (dentry != NULL)
3503                         path = ll_d_path(dentry, db->buf, sizeof(db->buf));
3504         }
3505
3506         /* The below message is checked in recovery-small.sh test_24b */
3507         CDEBUG(D_WARNING,
3508                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3509                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3510                s2lsi(inode->i_sb)->lsi_lmd->lmd_dev,
3511                PFID(ll_inode2fid(inode)),
3512                (path && !IS_ERR(path)) ? path : "", ioret);
3513
3514         if (dentry != NULL) {
3515                 /* We cannot dput here since if we happen to be the last holder
3516                  * then we can end up waiting for page evictions that
3517                  * in turn wait for RPCs that need this instance of ptlrpcd
3518                  * (callng brw_interpret->*page_completion*->vmpage_error->here)
3519                  * LU-15340
3520                  */
3521                 INIT_WORK(&db->db_work, ll_dput_later);
3522                 db->db_dentry = dentry;
3523                 schedule_work(&db->db_work);
3524         } else {
3525                 if (db != NULL)
3526                         free_page((unsigned long)db);
3527         }
3528 }
3529
3530 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3531                         struct lov_user_md **kbuf)
3532 {
3533         struct lov_user_md      lum;
3534         ssize_t                 lum_size;
3535         ENTRY;
3536
3537         if (copy_from_user(&lum, md, sizeof(lum)))
3538                 RETURN(-EFAULT);
3539
3540         lum_size = ll_lov_user_md_size(&lum);
3541         if (lum_size < 0)
3542                 RETURN(lum_size);
3543
3544         OBD_ALLOC_LARGE(*kbuf, lum_size);
3545         if (*kbuf == NULL)
3546                 RETURN(-ENOMEM);
3547
3548         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3549                 OBD_FREE_LARGE(*kbuf, lum_size);
3550                 RETURN(-EFAULT);
3551         }
3552
3553         RETURN(lum_size);
3554 }
3555
3556 /*
3557  * Compute llite root squash state after a change of root squash
3558  * configuration setting or add/remove of a lnet nid
3559  */
3560 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3561 {
3562         struct root_squash_info *squash = &sbi->ll_squash;
3563         int i;
3564         bool matched;
3565         struct lnet_processid id;
3566
3567         /* Update norootsquash flag */
3568         spin_lock(&squash->rsi_lock);
3569         if (list_empty(&squash->rsi_nosquash_nids))
3570                 clear_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3571         else {
3572                 /* Do not apply root squash as soon as one of our NIDs is
3573                  * in the nosquash_nids list */
3574                 matched = false;
3575                 i = 0;
3576                 while (LNetGetId(i++, &id) != -ENOENT) {
3577                         if (nid_is_lo0(&id.nid))
3578                                 continue;
3579                         if (cfs_match_nid(lnet_nid_to_nid4(&id.nid),
3580                                           &squash->rsi_nosquash_nids)) {
3581                                 matched = true;
3582                                 break;
3583                         }
3584                 }
3585                 if (matched)
3586                         set_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3587                 else
3588                         clear_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3589         }
3590         spin_unlock(&squash->rsi_lock);
3591 }
3592
3593 /**
3594  * Parse linkea content to extract information about a given hardlink
3595  *
3596  * \param[in]   ldata      - Initialized linkea data
3597  * \param[in]   linkno     - Link identifier
3598  * \param[out]  parent_fid - The entry's parent FID
3599  * \param[out]  ln         - Entry name destination buffer
3600  *
3601  * \retval 0 on success
3602  * \retval Appropriate negative error code on failure
3603  */
3604 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3605                             struct lu_fid *parent_fid, struct lu_name *ln)
3606 {
3607         unsigned int    idx;
3608         int             rc;
3609         ENTRY;
3610
3611         rc = linkea_init_with_rec(ldata);
3612         if (rc < 0)
3613                 RETURN(rc);
3614
3615         if (linkno >= ldata->ld_leh->leh_reccount)
3616                 /* beyond last link */
3617                 RETURN(-ENODATA);
3618
3619         linkea_first_entry(ldata);
3620         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3621                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3622                                     parent_fid);
3623                 if (idx == linkno)
3624                         break;
3625
3626                 linkea_next_entry(ldata);
3627         }
3628
3629         if (idx < linkno)
3630                 RETURN(-ENODATA);
3631
3632         RETURN(0);
3633 }
3634
3635 /**
3636  * Get parent FID and name of an identified link. Operation is performed for
3637  * a given link number, letting the caller iterate over linkno to list one or
3638  * all links of an entry.
3639  *
3640  * \param[in]     file - File descriptor against which to perform the operation
3641  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3642  *                       on and the available size. It is eventually filled with
3643  *                       the requested information or left untouched on error
3644  *
3645  * \retval - 0 on success
3646  * \retval - Appropriate negative error code on failure
3647  */
3648 int ll_getparent(struct file *file, struct getparent __user *arg)
3649 {
3650         struct inode            *inode = file_inode(file);
3651         struct linkea_data      *ldata;
3652         struct lu_buf            buf = LU_BUF_NULL;
3653         struct lu_name           ln;
3654         struct lu_fid            parent_fid;
3655         __u32                    linkno;
3656         __u32                    name_size;
3657         int                      rc;
3658
3659         ENTRY;
3660
3661         if (!capable(CAP_DAC_READ_SEARCH) &&
3662             !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
3663                 RETURN(-EPERM);
3664
3665         if (get_user(name_size, &arg->gp_name_size))
3666                 RETURN(-EFAULT);
3667
3668         if (get_user(linkno, &arg->gp_linkno))
3669                 RETURN(-EFAULT);
3670
3671         if (name_size > PATH_MAX)
3672                 RETURN(-EINVAL);
3673
3674         OBD_ALLOC(ldata, sizeof(*ldata));
3675         if (ldata == NULL)
3676                 RETURN(-ENOMEM);
3677
3678         rc = linkea_data_new(ldata, &buf);
3679         if (rc < 0)
3680                 GOTO(ldata_free, rc);
3681
3682         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3683                            buf.lb_len, OBD_MD_FLXATTR);
3684         if (rc < 0)
3685                 GOTO(lb_free, rc);
3686
3687         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3688         if (rc < 0)
3689                 GOTO(lb_free, rc);
3690
3691         if (ln.ln_namelen >= name_size)
3692                 GOTO(lb_free, rc = -EOVERFLOW);
3693
3694         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3695                 GOTO(lb_free, rc = -EFAULT);
3696
3697         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3698                 GOTO(lb_free, rc = -EFAULT);
3699
3700         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3701                 GOTO(lb_free, rc = -EFAULT);
3702
3703 lb_free:
3704         lu_buf_free(&buf);
3705 ldata_free:
3706         OBD_FREE(ldata, sizeof(*ldata));
3707
3708         RETURN(rc);
3709 }