Whamcloud - gitweb
LU-16335 test: add fail_abort_cleanup()
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/llite/llite_lib.c
32  *
33  * Lustre Light Super operations
34  */
35
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include <linux/cpu.h>
39 #include <linux/module.h>
40 #include <linux/random.h>
41 #include <linux/statfs.h>
42 #include <linux/time.h>
43 #include <linux/file.h>
44 #include <linux/types.h>
45 #include <libcfs/linux/linux-uuid.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48 #include <linux/user_namespace.h>
49 #include <linux/delay.h>
50 #include <linux/uidgid.h>
51 #include <linux/fs_struct.h>
52
53 #ifndef HAVE_CPUS_READ_LOCK
54 #include <libcfs/linux/linux-cpu.h>
55 #endif
56 #include <libcfs/linux/linux-misc.h>
57 #include <uapi/linux/lustre/lustre_ioctl.h>
58 #ifdef HAVE_UAPI_LINUX_MOUNT_H
59 #include <uapi/linux/mount.h>
60 #endif
61
62 #include <lustre_ha.h>
63 #include <lustre_dlm.h>
64 #include <lprocfs_status.h>
65 #include <lustre_disk.h>
66 #include <uapi/linux/lustre/lustre_param.h>
67 #include <lustre_log.h>
68 #include <cl_object.h>
69 #include <obd_cksum.h>
70 #include "llite_internal.h"
71
72 struct kmem_cache *ll_file_data_slab;
73
74 #ifndef log2
75 #define log2(n) ffz(~(n))
76 #endif
77
78 /**
79  * If there is only one number of core visible to Lustre,
80  * async readahead will be disabled, to avoid massive over
81  * subscription, we use 1/2 of active cores as default max
82  * async readahead requests.
83  */
84 static inline unsigned int ll_get_ra_async_max_active(void)
85 {
86         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
87 }
88
89 static struct ll_sb_info *ll_init_sbi(void)
90 {
91         struct ll_sb_info *sbi = NULL;
92         unsigned long pages;
93         unsigned long lru_page_max;
94         struct sysinfo si;
95         int rc;
96
97         ENTRY;
98
99         OBD_ALLOC_PTR(sbi);
100         if (sbi == NULL)
101                 RETURN(ERR_PTR(-ENOMEM));
102
103         rc = pcc_super_init(&sbi->ll_pcc_super);
104         if (rc < 0)
105                 GOTO(out_sbi, rc);
106
107         spin_lock_init(&sbi->ll_lock);
108         mutex_init(&sbi->ll_lco.lco_lock);
109         spin_lock_init(&sbi->ll_pp_extent_lock);
110         spin_lock_init(&sbi->ll_process_lock);
111         sbi->ll_rw_stats_on = 0;
112         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
113
114         si_meminfo(&si);
115         pages = si.totalram - si.totalhigh;
116         lru_page_max = pages / 2;
117
118         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
119         sbi->ll_ra_info.ll_readahead_wq =
120                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
121                                        0, CFS_CPT_ANY,
122                                        sbi->ll_ra_info.ra_async_max_active);
123         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
124                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
125
126         /* initialize ll_cache data */
127         sbi->ll_cache = cl_cache_init(lru_page_max);
128         if (sbi->ll_cache == NULL)
129                 GOTO(out_destroy_ra, rc = -ENOMEM);
130
131         /* initialize foreign symlink prefix path */
132         OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
133         if (sbi->ll_foreign_symlink_prefix == NULL)
134                 GOTO(out_destroy_ra, rc = -ENOMEM);
135         memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
136         sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
137
138         /* initialize foreign symlink upcall path, none by default */
139         OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
140         if (sbi->ll_foreign_symlink_upcall == NULL)
141                 GOTO(out_destroy_ra, rc = -ENOMEM);
142         memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
143         sbi->ll_foreign_symlink_upcall_items = NULL;
144         sbi->ll_foreign_symlink_upcall_nb_items = 0;
145         init_rwsem(&sbi->ll_foreign_symlink_sem);
146         /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
147          * not enabled by default
148          */
149
150         sbi->ll_ra_info.ra_max_pages =
151                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
152         sbi->ll_ra_info.ra_max_pages_per_file =
153                 min(sbi->ll_ra_info.ra_max_pages / 4,
154                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
155         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
156                                 sbi->ll_ra_info.ra_max_pages_per_file;
157         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
158         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
159         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
160
161         set_bit(LL_SBI_VERBOSE, sbi->ll_flags);
162 #ifdef ENABLE_CHECKSUM
163         set_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
164 #endif
165 #ifdef ENABLE_FLOCK
166         set_bit(LL_SBI_FLOCK, sbi->ll_flags);
167 #endif
168
169 #ifdef HAVE_LRU_RESIZE_SUPPORT
170         set_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags);
171 #endif
172         set_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags);
173
174         /* metadata statahead is enabled by default */
175         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
176         sbi->ll_sa_max = LL_SA_RPC_DEF;
177         atomic_set(&sbi->ll_sa_total, 0);
178         atomic_set(&sbi->ll_sa_wrong, 0);
179         atomic_set(&sbi->ll_sa_running, 0);
180         atomic_set(&sbi->ll_agl_total, 0);
181         atomic_set(&sbi->ll_sa_hit_total, 0);
182         atomic_set(&sbi->ll_sa_miss_total, 0);
183         set_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags);
184         set_bit(LL_SBI_FAST_READ, sbi->ll_flags);
185         set_bit(LL_SBI_TINY_WRITE, sbi->ll_flags);
186         set_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags);
187         ll_sbi_set_encrypt(sbi, true);
188         ll_sbi_set_name_encrypt(sbi, true);
189
190         /* root squash */
191         sbi->ll_squash.rsi_uid = 0;
192         sbi->ll_squash.rsi_gid = 0;
193         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
194         spin_lock_init(&sbi->ll_squash.rsi_lock);
195
196         /* Per-filesystem file heat */
197         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
198         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
199
200         /* Per-fs open heat level before requesting open lock */
201         sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT;
202         sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS;
203         sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS;
204         RETURN(sbi);
205 out_destroy_ra:
206         if (sbi->ll_foreign_symlink_prefix)
207                 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
208         if (sbi->ll_cache) {
209                 cl_cache_decref(sbi->ll_cache);
210                 sbi->ll_cache = NULL;
211         }
212         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
213 out_pcc:
214         pcc_super_fini(&sbi->ll_pcc_super);
215 out_sbi:
216         OBD_FREE_PTR(sbi);
217         RETURN(ERR_PTR(rc));
218 }
219
220 static void ll_free_sbi(struct super_block *sb)
221 {
222         struct ll_sb_info *sbi = ll_s2sbi(sb);
223         ENTRY;
224
225         if (sbi != NULL) {
226                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
227                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
228                 if (sbi->ll_ra_info.ll_readahead_wq)
229                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
230                 if (sbi->ll_cache != NULL) {
231                         cl_cache_decref(sbi->ll_cache);
232                         sbi->ll_cache = NULL;
233                 }
234                 if (sbi->ll_foreign_symlink_prefix) {
235                         OBD_FREE(sbi->ll_foreign_symlink_prefix,
236                                  sbi->ll_foreign_symlink_prefix_size);
237                         sbi->ll_foreign_symlink_prefix = NULL;
238                 }
239                 if (sbi->ll_foreign_symlink_upcall) {
240                         OBD_FREE(sbi->ll_foreign_symlink_upcall,
241                                  strlen(sbi->ll_foreign_symlink_upcall) +
242                                        1);
243                         sbi->ll_foreign_symlink_upcall = NULL;
244                 }
245                 if (sbi->ll_foreign_symlink_upcall_items) {
246                         int i;
247                         int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
248                         struct ll_foreign_symlink_upcall_item *items =
249                                 sbi->ll_foreign_symlink_upcall_items;
250
251                         for (i = 0 ; i < nb_items; i++)
252                                 if (items[i].type == STRING_TYPE)
253                                         OBD_FREE(items[i].string,
254                                                        items[i].size);
255
256                         OBD_FREE_LARGE(items, nb_items *
257                                 sizeof(struct ll_foreign_symlink_upcall_item));
258                         sbi->ll_foreign_symlink_upcall_items = NULL;
259                 }
260                 ll_free_rw_stats_info(sbi);
261                 pcc_super_fini(&sbi->ll_pcc_super);
262                 OBD_FREE(sbi, sizeof(*sbi));
263         }
264         EXIT;
265 }
266
267 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
268 {
269         struct inode *root = NULL;
270         struct ll_sb_info *sbi = ll_s2sbi(sb);
271         struct obd_statfs *osfs = NULL;
272         struct ptlrpc_request *request = NULL;
273         struct obd_connect_data *data = NULL;
274         struct obd_uuid *uuid;
275         struct md_op_data *op_data;
276         struct lustre_md lmd;
277         u64 valid;
278         int size, err, checksum;
279         bool api32;
280         void *encctx;
281         int encctxlen;
282
283         ENTRY;
284         sbi->ll_md_obd = class_name2obd(md);
285         if (!sbi->ll_md_obd) {
286                 CERROR("MD %s: not setup or attached\n", md);
287                 RETURN(-EINVAL);
288         }
289
290         OBD_ALLOC_PTR(data);
291         if (data == NULL)
292                 RETURN(-ENOMEM);
293
294         OBD_ALLOC_PTR(osfs);
295         if (osfs == NULL) {
296                 OBD_FREE_PTR(data);
297                 RETURN(-ENOMEM);
298         }
299
300         /* pass client page size via ocd_grant_blkbits, the server should report
301          * back its backend blocksize for grant calculation purpose */
302         data->ocd_grant_blkbits = PAGE_SHIFT;
303
304         /* indicate MDT features supported by this client */
305         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
306                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
307                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
308                                   OBD_CONNECT_SRVLOCK  |
309                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
310                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
311                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
312                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
313                                   OBD_CONNECT_64BITHASH |
314                                   OBD_CONNECT_EINPROGRESS |
315                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
316                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
317                                   OBD_CONNECT_MAX_EASIZE |
318                                   OBD_CONNECT_FLOCK_DEAD |
319                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
320                                   OBD_CONNECT_OPEN_BY_FID |
321                                   OBD_CONNECT_DIR_STRIPE |
322                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
323                                   OBD_CONNECT_SUBTREE |
324                                   OBD_CONNECT_MULTIMODRPCS |
325                                   OBD_CONNECT_GRANT_PARAM |
326                                   OBD_CONNECT_GRANT_SHRINK |
327                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
328
329         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
330                                    OBD_CONNECT2_SUM_STATFS |
331                                    OBD_CONNECT2_OVERSTRIPING |
332                                    OBD_CONNECT2_FLR |
333                                    OBD_CONNECT2_LOCK_CONVERT |
334                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
335                                    OBD_CONNECT2_INC_XID |
336                                    OBD_CONNECT2_LSOM |
337                                    OBD_CONNECT2_ASYNC_DISCARD |
338                                    OBD_CONNECT2_PCC |
339                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
340                                    OBD_CONNECT2_GETATTR_PFID |
341                                    OBD_CONNECT2_DOM_LVB |
342                                    OBD_CONNECT2_REP_MBITS |
343                                    OBD_CONNECT2_ATOMIC_OPEN_LOCK;
344
345 #ifdef HAVE_LRU_RESIZE_SUPPORT
346         if (test_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags))
347                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
348 #endif
349         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
350
351         data->ocd_cksum_types = obd_cksum_types_supported_client();
352
353         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
354                 /* flag mdc connection as lightweight, only used for test
355                  * purpose, use with care */
356                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
357
358         data->ocd_ibits_known = MDS_INODELOCK_FULL;
359         data->ocd_version = LUSTRE_VERSION_CODE;
360
361         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags))
362                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
363
364 #ifdef SB_NOSEC
365         /* Setting this indicates we correctly support S_NOSEC (See kernel
366          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
367          */
368         sb->s_flags |= SB_NOSEC;
369 #endif
370         sbi->ll_fop = ll_select_file_operations(sbi);
371
372         /* always ping even if server suppress_pings */
373         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
374                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
375
376         obd_connect_set_secctx(data);
377         if (ll_sbi_has_encrypt(sbi)) {
378                 obd_connect_set_name_enc(data);
379                 obd_connect_set_enc(data);
380         }
381
382 #if defined(CONFIG_SECURITY)
383         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
384 #endif
385
386         data->ocd_brw_size = MD_MAX_BRW_SIZE;
387
388 retry_connect:
389         if (sb->s_flags & SB_RDONLY)
390                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
391         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
392                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
393         if (err == -EBUSY) {
394                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
395                                    "recovery, of which this client is not a "
396                                    "part. Please wait for recovery to complete,"
397                                    " abort, or time out.\n", md);
398                 GOTO(out, err);
399         } else if (err) {
400                 CERROR("cannot connect to %s: rc = %d\n", md, err);
401                 GOTO(out, err);
402         }
403
404         sbi->ll_md_exp->exp_connect_data = *data;
405
406         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
407                            LUSTRE_SEQ_METADATA);
408         if (err) {
409                 CERROR("%s: Can't init metadata layer FID infrastructure, "
410                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
411                 GOTO(out_md, err);
412         }
413
414         /* For mount, we only need fs info from MDT0, and also in DNE, it
415          * can make sure the client can be mounted as long as MDT0 is
416          * avaible */
417         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
418                         ktime_get_seconds() - sbi->ll_statfs_max_age,
419                         OBD_STATFS_FOR_MDT0);
420         if (err == -EROFS && !(sb->s_flags & SB_RDONLY)) {
421                 /* We got -EROFS from the server, maybe it is imposing
422                  * read-only mount. So just retry like this.
423                  */
424                 cfs_tty_write_msg("Forcing read-only mount.\n\r");
425                 CERROR("%s: mount failed with %d, forcing read-only mount.\n",
426                        sbi->ll_md_exp->exp_obd->obd_name, err);
427                 sb->s_flags |= SB_RDONLY;
428                 obd_fid_fini(sbi->ll_md_exp->exp_obd);
429                 obd_disconnect(sbi->ll_md_exp);
430                 GOTO(retry_connect, err);
431         } else if (err) {
432                 GOTO(out_md_fid, err);
433         }
434
435         /* This needs to be after statfs to ensure connect has finished.
436          * Note that "data" does NOT contain the valid connect reply.
437          * If connecting to a 1.8 server there will be no LMV device, so
438          * we can access the MDC export directly and exp_connect_flags will
439          * be non-zero, but if accessing an upgraded 2.1 server it will
440          * have the correct flags filled in.
441          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
442         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
443         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
444             valid != CLIENT_CONNECT_MDT_REQD) {
445                 char *buf;
446
447                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
448                 obd_connect_flags2str(buf, PAGE_SIZE,
449                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
450                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
451                                    "feature(s) needed for correct operation "
452                                    "of this client (%s). Please upgrade "
453                                    "server or downgrade client.\n",
454                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
455                 OBD_FREE(buf, PAGE_SIZE);
456                 GOTO(out_md_fid, err = -EPROTO);
457         }
458
459         size = sizeof(*data);
460         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
461                            KEY_CONN_DATA,  &size, data);
462         if (err) {
463                 CERROR("%s: Get connect data failed: rc = %d\n",
464                        sbi->ll_md_exp->exp_obd->obd_name, err);
465                 GOTO(out_md_fid, err);
466         }
467
468         LASSERT(osfs->os_bsize);
469         sb->s_blocksize = osfs->os_bsize;
470         sb->s_blocksize_bits = log2(osfs->os_bsize);
471         sb->s_magic = LL_SUPER_MAGIC;
472         sb->s_maxbytes = MAX_LFS_FILESIZE;
473         sbi->ll_inode_cache_enabled = 1;
474         sbi->ll_namelen = osfs->os_namelen;
475         sbi->ll_mnt.mnt = current->fs->root.mnt;
476         sbi->ll_mnt_ns = current->nsproxy->mnt_ns;
477
478         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags) &&
479             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
480                 LCONSOLE_INFO("Disabling user_xattr feature because "
481                               "it is not supported on the server\n");
482                 clear_bit(LL_SBI_USER_XATTR, sbi->ll_flags);
483         }
484
485         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
486 #ifdef SB_POSIXACL
487                 sb->s_flags |= SB_POSIXACL;
488 #endif
489                 set_bit(LL_SBI_ACL, sbi->ll_flags);
490         } else {
491                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
492 #ifdef SB_POSIXACL
493                 sb->s_flags &= ~SB_POSIXACL;
494 #endif
495                 clear_bit(LL_SBI_ACL, sbi->ll_flags);
496         }
497
498         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
499                 set_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
500
501         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
502                 set_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags);
503
504         if (obd_connect_has_secctx(data))
505                 set_bit(LL_SBI_FILE_SECCTX, sbi->ll_flags);
506
507         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
508                 if (ll_sb_has_test_dummy_encryption(sb))
509                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
510                                       sbi->ll_fsname,
511                                       sbi->ll_md_exp->exp_obd->obd_name);
512                 ll_sbi_set_encrypt(sbi, false);
513         }
514
515         if (ll_sbi_has_name_encrypt(sbi) && !obd_connect_has_name_enc(data)) {
516                 struct  lustre_sb_info *lsi = s2lsi(sb);
517
518                 if (ll_sb_has_test_dummy_encryption(sb))
519                         LCONSOLE_WARN("%s: server %s does not support name encryption, not using it.\n",
520                                       sbi->ll_fsname,
521                                       sbi->ll_md_exp->exp_obd->obd_name);
522                 lsi->lsi_flags &= ~LSI_FILENAME_ENC;
523                 ll_sbi_set_name_encrypt(sbi, false);
524         }
525
526         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
527                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
528                         LCONSOLE_INFO("%s: disabling xattr cache due to "
529                                       "unknown maximum xattr size.\n", dt);
530                 } else if (!sbi->ll_xattr_cache_set) {
531                         /* If xattr_cache is already set (no matter 0 or 1)
532                          * during processing llog, it won't be enabled here. */
533                         set_bit(LL_SBI_XATTR_CACHE, sbi->ll_flags);
534                         sbi->ll_xattr_cache_enabled = 1;
535                 }
536         }
537
538         sbi->ll_dt_obd = class_name2obd(dt);
539         if (!sbi->ll_dt_obd) {
540                 CERROR("DT %s: not setup or attached\n", dt);
541                 GOTO(out_md_fid, err = -ENODEV);
542         }
543
544         /* pass client page size via ocd_grant_blkbits, the server should report
545          * back its backend blocksize for grant calculation purpose */
546         data->ocd_grant_blkbits = PAGE_SHIFT;
547
548         /* indicate OST features supported by this client */
549         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
550                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
551                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
552                                   OBD_CONNECT_SRVLOCK |
553                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
554                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
555                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
556                                   OBD_CONNECT_EINPROGRESS |
557                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
558                                   OBD_CONNECT_LAYOUTLOCK |
559                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
560                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
561                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
562         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
563                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK |
564                                    OBD_CONNECT2_REP_MBITS;
565
566         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
567                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
568
569         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
570          * disabled by default, because it can still be enabled on the
571          * fly via /sys. As a consequence, we still need to come to an
572          * agreement on the supported algorithms at connect time
573          */
574         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
575
576         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
577                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
578         else
579                 data->ocd_cksum_types = obd_cksum_types_supported_client();
580
581 #ifdef HAVE_LRU_RESIZE_SUPPORT
582         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
583 #endif
584         /* always ping even if server suppress_pings */
585         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
586                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
587
588         if (ll_sbi_has_encrypt(sbi))
589                 obd_connect_set_enc(data);
590
591         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
592                "ocd_grant: %d\n", data->ocd_connect_flags,
593                data->ocd_version, data->ocd_grant);
594
595         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
596         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
597
598         data->ocd_brw_size = DT_MAX_BRW_SIZE;
599
600         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
601                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
602         if (err == -EBUSY) {
603                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
604                                    "recovery, of which this client is not a "
605                                    "part.  Please wait for recovery to "
606                                    "complete, abort, or time out.\n", dt);
607                 GOTO(out_md, err);
608         } else if (err) {
609                 CERROR("%s: Cannot connect to %s: rc = %d\n",
610                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
611                 GOTO(out_md, err);
612         }
613
614         if (ll_sbi_has_encrypt(sbi) &&
615             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
616                 if (ll_sb_has_test_dummy_encryption(sb))
617                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
618                                       sbi->ll_fsname, dt);
619                 ll_sbi_set_encrypt(sbi, false);
620         } else if (ll_sb_has_test_dummy_encryption(sb)) {
621                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
622         }
623
624         sbi->ll_dt_exp->exp_connect_data = *data;
625
626         /* Don't change value if it was specified in the config log */
627         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
628                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
629                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
630                               (data->ocd_brw_size >> PAGE_SHIFT));
631                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
632                     sbi->ll_ra_info.ra_max_pages_per_file)
633                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
634                                 sbi->ll_ra_info.ra_max_pages_per_file;
635         }
636
637         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
638                            LUSTRE_SEQ_METADATA);
639         if (err) {
640                 CERROR("%s: Can't init data layer FID infrastructure, "
641                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
642                 GOTO(out_dt, err);
643         }
644
645         mutex_lock(&sbi->ll_lco.lco_lock);
646         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
647         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
648         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
649         mutex_unlock(&sbi->ll_lco.lco_lock);
650
651         fid_zero(&sbi->ll_root_fid);
652         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
653                            &sbi->ll_root_fid);
654         if (err) {
655                 CERROR("cannot mds_connect: rc = %d\n", err);
656                 GOTO(out_lock_cn_cb, err);
657         }
658         if (!fid_is_sane(&sbi->ll_root_fid)) {
659                 CERROR("%s: Invalid root fid "DFID" during mount\n",
660                        sbi->ll_md_exp->exp_obd->obd_name,
661                        PFID(&sbi->ll_root_fid));
662                 GOTO(out_lock_cn_cb, err = -EINVAL);
663         }
664         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
665
666         sb->s_op = &lustre_super_operations;
667         sb->s_xattr = ll_xattr_handlers;
668 #if THREAD_SIZE >= 8192 /*b=17630*/
669         sb->s_export_op = &lustre_export_operations;
670 #endif
671 #ifdef HAVE_LUSTRE_CRYPTO
672         llcrypt_set_ops(sb, &lustre_cryptops);
673 #endif
674
675         /* make root inode
676          * XXX: move this to after cbd setup? */
677         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE |
678                 OBD_MD_ENCCTX;
679         if (test_bit(LL_SBI_ACL, sbi->ll_flags))
680                 valid |= OBD_MD_FLACL;
681
682         OBD_ALLOC_PTR(op_data);
683         if (op_data == NULL)
684                 GOTO(out_lock_cn_cb, err = -ENOMEM);
685
686         op_data->op_fid1 = sbi->ll_root_fid;
687         op_data->op_mode = 0;
688         op_data->op_valid = valid;
689
690         err = md_getattr(sbi->ll_md_exp, op_data, &request);
691
692         /* We need enc ctx info, so reset it in op_data to
693          * prevent it from being freed.
694          */
695         encctx = op_data->op_file_encctx;
696         encctxlen = op_data->op_file_encctx_size;
697         op_data->op_file_encctx = NULL;
698         op_data->op_file_encctx_size = 0;
699         OBD_FREE_PTR(op_data);
700         if (err) {
701                 CERROR("%s: md_getattr failed for root: rc = %d\n",
702                        sbi->ll_md_exp->exp_obd->obd_name, err);
703                 GOTO(out_lock_cn_cb, err);
704         }
705
706         err = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill,
707                                sbi->ll_dt_exp, sbi->ll_md_exp, &lmd);
708         if (err) {
709                 CERROR("failed to understand root inode md: rc = %d\n", err);
710                 ptlrpc_req_finished(request);
711                 GOTO(out_lock_cn_cb, err);
712         }
713
714         LASSERT(fid_is_sane(&sbi->ll_root_fid));
715         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
716         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid, api32), &lmd);
717         md_free_lustre_md(sbi->ll_md_exp, &lmd);
718
719         if (IS_ERR(root)) {
720                 lmd_clear_acl(&lmd);
721                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
722                 root = NULL;
723                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
724                        sbi->ll_fsname, err);
725                 ptlrpc_req_finished(request);
726                 GOTO(out_root, err);
727         }
728
729         if (encctxlen) {
730                 CDEBUG(D_SEC,
731                        "server returned encryption ctx for root inode "DFID"\n",
732                        PFID(&sbi->ll_root_fid));
733                 err = ll_set_encflags(root, encctx, encctxlen, true);
734                 if (err)
735                         CWARN("%s: cannot set enc ctx for "DFID": rc = %d\n",
736                               sbi->ll_fsname,
737                               PFID(&sbi->ll_root_fid), err);
738         }
739         ptlrpc_req_finished(request);
740
741         checksum = test_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
742         if (sbi->ll_checksum_set) {
743                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
744                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
745                                          sizeof(checksum), &checksum, NULL);
746                 if (err) {
747                         CERROR("%s: Set checksum failed: rc = %d\n",
748                                sbi->ll_dt_exp->exp_obd->obd_name, err);
749                         GOTO(out_root, err);
750                 }
751         }
752         cl_sb_init(sb);
753
754         sb->s_root = d_make_root(root);
755         if (sb->s_root == NULL) {
756                 err = -ENOMEM;
757                 CERROR("%s: can't make root dentry: rc = %d\n",
758                        sbi->ll_fsname, err);
759                 GOTO(out_root, err);
760         }
761
762         sbi->ll_sdev_orig = sb->s_dev;
763
764         /* We set sb->s_dev equal on all lustre clients in order to support
765          * NFS export clustering.  NFSD requires that the FSID be the same
766          * on all clients. */
767         /* s_dev is also used in lt_compare() to compare two fs, but that is
768          * only a node-local comparison. */
769         uuid = obd_get_uuid(sbi->ll_md_exp);
770         if (uuid != NULL)
771                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
772
773         if (data != NULL)
774                 OBD_FREE_PTR(data);
775         if (osfs != NULL)
776                 OBD_FREE_PTR(osfs);
777
778         if (sbi->ll_dt_obd) {
779                 err = sysfs_create_link(&sbi->ll_kset.kobj,
780                                         &sbi->ll_dt_obd->obd_kset.kobj,
781                                         sbi->ll_dt_obd->obd_type->typ_name);
782                 if (err < 0) {
783                         CERROR("%s: could not register %s in llite: rc = %d\n",
784                                dt, sbi->ll_fsname, err);
785                         err = 0;
786                 }
787         }
788
789         if (sbi->ll_md_obd) {
790                 err = sysfs_create_link(&sbi->ll_kset.kobj,
791                                         &sbi->ll_md_obd->obd_kset.kobj,
792                                         sbi->ll_md_obd->obd_type->typ_name);
793                 if (err < 0) {
794                         CERROR("%s: could not register %s in llite: rc = %d\n",
795                                md, sbi->ll_fsname, err);
796                         err = 0;
797                 }
798         }
799
800         RETURN(err);
801 out_root:
802         iput(root);
803 out_lock_cn_cb:
804         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
805 out_dt:
806         obd_disconnect(sbi->ll_dt_exp);
807         sbi->ll_dt_exp = NULL;
808         sbi->ll_dt_obd = NULL;
809 out_md_fid:
810         obd_fid_fini(sbi->ll_md_exp->exp_obd);
811 out_md:
812         obd_disconnect(sbi->ll_md_exp);
813         sbi->ll_md_exp = NULL;
814         sbi->ll_md_obd = NULL;
815 out:
816         if (data != NULL)
817                 OBD_FREE_PTR(data);
818         if (osfs != NULL)
819                 OBD_FREE_PTR(osfs);
820         return err;
821 }
822
823 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
824 {
825         int size, rc;
826
827         size = sizeof(*lmmsize);
828         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
829                           KEY_MAX_EASIZE, &size, lmmsize);
830         if (rc != 0) {
831                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
832                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
833                 RETURN(rc);
834         }
835
836         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
837
838         size = sizeof(int);
839         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
840                           KEY_MAX_EASIZE, &size, lmmsize);
841         if (rc)
842                 CERROR("Get max mdsize error rc %d\n", rc);
843
844         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
845
846         RETURN(rc);
847 }
848
849 /**
850  * Get the value of the default_easize parameter.
851  *
852  * \see client_obd::cl_default_mds_easize
853  *
854  * \param[in] sbi       superblock info for this filesystem
855  * \param[out] lmmsize  pointer to storage location for value
856  *
857  * \retval 0            on success
858  * \retval negative     negated errno on failure
859  */
860 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
861 {
862         int size, rc;
863
864         size = sizeof(int);
865         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
866                          KEY_DEFAULT_EASIZE, &size, lmmsize);
867         if (rc)
868                 CERROR("Get default mdsize error rc %d\n", rc);
869
870         RETURN(rc);
871 }
872
873 /**
874  * Set the default_easize parameter to the given value.
875  *
876  * \see client_obd::cl_default_mds_easize
877  *
878  * \param[in] sbi       superblock info for this filesystem
879  * \param[in] lmmsize   the size to set
880  *
881  * \retval 0            on success
882  * \retval negative     negated errno on failure
883  */
884 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
885 {
886         int rc;
887
888         if (lmmsize < sizeof(struct lov_mds_md) ||
889             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
890                 return -EINVAL;
891
892         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
893                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
894                                 sizeof(int), &lmmsize, NULL);
895
896         RETURN(rc);
897 }
898
899 static void client_common_put_super(struct super_block *sb)
900 {
901         struct ll_sb_info *sbi = ll_s2sbi(sb);
902         ENTRY;
903
904         cl_sb_fini(sb);
905
906         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
907         obd_disconnect(sbi->ll_dt_exp);
908         sbi->ll_dt_exp = NULL;
909
910         ll_debugfs_unregister_super(sb);
911
912         obd_fid_fini(sbi->ll_md_exp->exp_obd);
913         obd_disconnect(sbi->ll_md_exp);
914         sbi->ll_md_exp = NULL;
915
916         EXIT;
917 }
918
919 void ll_kill_super(struct super_block *sb)
920 {
921         struct ll_sb_info *sbi;
922         ENTRY;
923
924         /* not init sb ?*/
925         if (!(sb->s_flags & SB_ACTIVE))
926                 return;
927
928         sbi = ll_s2sbi(sb);
929         /* we need restore s_dev from changed for clustred NFS before put_super
930          * because new kernels have cached s_dev and change sb->s_dev in
931          * put_super not affected real removing devices */
932         if (sbi) {
933                 sb->s_dev = sbi->ll_sdev_orig;
934
935                 /* wait running statahead threads to quit */
936                 while (atomic_read(&sbi->ll_sa_running) > 0)
937                         schedule_timeout_uninterruptible(
938                                 cfs_time_seconds(1) >> 3);
939         }
940
941         EXIT;
942 }
943
944 /* Since we use this table for ll_sbi_flags_seq_show make
945  * sure what you want displayed for a specific token that
946  * is listed more than once below be listed first. For
947  * example we want "checksum" displayed, not "nochecksum"
948  * for the sbi_flags.
949  */
950 static const match_table_t ll_sbi_flags_name = {
951         {LL_SBI_NOLCK,                  "nolock"},
952         {LL_SBI_CHECKSUM,               "checksum"},
953         {LL_SBI_CHECKSUM,               "nochecksum"},
954         {LL_SBI_LOCALFLOCK,             "localflock"},
955         {LL_SBI_FLOCK,                  "flock"},
956         {LL_SBI_FLOCK,                  "noflock"},
957         {LL_SBI_USER_XATTR,             "user_xattr"},
958         {LL_SBI_USER_XATTR,             "nouser_xattr"},
959         {LL_SBI_LRU_RESIZE,             "lruresize"},
960         {LL_SBI_LRU_RESIZE,             "nolruresize"},
961         {LL_SBI_LAZYSTATFS,             "lazystatfs"},
962         {LL_SBI_LAZYSTATFS,             "nolazystatfs"},
963         {LL_SBI_32BIT_API,              "32bitapi"},
964         {LL_SBI_USER_FID2PATH,          "user_fid2path"},
965         {LL_SBI_USER_FID2PATH,          "nouser_fid2path"},
966         {LL_SBI_VERBOSE,                "verbose"},
967         {LL_SBI_VERBOSE,                "noverbose"},
968         {LL_SBI_ALWAYS_PING,            "always_ping"},
969         {LL_SBI_TEST_DUMMY_ENCRYPTION,  "test_dummy_encryption=%s"},
970         {LL_SBI_TEST_DUMMY_ENCRYPTION,  "test_dummy_encryption"},
971         {LL_SBI_ENCRYPT,                "encrypt"},
972         {LL_SBI_ENCRYPT,                "noencrypt"},
973         {LL_SBI_FOREIGN_SYMLINK,        "foreign_symlink=%s"},
974         {LL_SBI_NUM_MOUNT_OPT,          NULL},
975
976         {LL_SBI_ACL,                    "acl"},
977         {LL_SBI_AGL_ENABLED,            "agl"},
978         {LL_SBI_64BIT_HASH,             "64bit_hash"},
979         {LL_SBI_LAYOUT_LOCK,            "layout"},
980         {LL_SBI_XATTR_CACHE,            "xattr_cache"},
981         {LL_SBI_NOROOTSQUASH,           "norootsquash"},
982         {LL_SBI_FAST_READ,              "fast_read"},
983         {LL_SBI_FILE_SECCTX,            "file_secctx"},
984         {LL_SBI_TINY_WRITE,             "tiny_write"},
985         {LL_SBI_FILE_HEAT,              "file_heat"},
986         {LL_SBI_PARALLEL_DIO,           "parallel_dio"},
987         {LL_SBI_ENCRYPT_NAME,           "name_encrypt"},
988 };
989
990 int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
991 {
992         struct super_block *sb = m->private;
993         int i;
994
995         for (i = 0; i < LL_SBI_NUM_FLAGS; i++) {
996                 int j;
997
998                 if (!test_bit(i, ll_s2sbi(sb)->ll_flags))
999                         continue;
1000
1001                 for (j = 0; j < ARRAY_SIZE(ll_sbi_flags_name); j++) {
1002                         if (ll_sbi_flags_name[j].token == i &&
1003                             ll_sbi_flags_name[j].pattern) {
1004                                 seq_printf(m, "%s ",
1005                                            ll_sbi_flags_name[j].pattern);
1006                                 break;
1007                         }
1008                 }
1009         }
1010         seq_puts(m, "\b\n");
1011         return 0;
1012 }
1013
1014 /* non-client-specific mount options are parsed in lmd_parse */
1015 static int ll_options(char *options, struct super_block *sb)
1016 {
1017         struct ll_sb_info *sbi = ll_s2sbi(sb);
1018         char *s2, *s1, *opts;
1019         int err = 0;
1020
1021         ENTRY;
1022         if (!options)
1023                 RETURN(0);
1024
1025         /* Don't stomp on lmd_opts */
1026         opts = kstrdup(options, GFP_KERNEL);
1027         if (!opts)
1028                 RETURN(-ENOMEM);
1029         s1 = opts;
1030         s2 = opts;
1031
1032         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
1033
1034         while ((s1 = strsep(&opts, ",")) != NULL) {
1035                 substring_t args[MAX_OPT_ARGS];
1036                 bool turn_off = false;
1037                 int token;
1038
1039                 if (!*s1)
1040                         continue;
1041
1042                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
1043
1044                 if (strncmp(s1, "no", 2) == 0)
1045                         turn_off = true;
1046
1047                 /*
1048                  * Initialize args struct so we know whether arg was
1049                  * found; some options take optional arguments.
1050                  */
1051                 args[0].to = NULL;
1052                 args[0].from = NULL;
1053                 token = match_token(s1, ll_sbi_flags_name, args);
1054                 if (token == LL_SBI_NUM_MOUNT_OPT) {
1055                         if (match_wildcard("context", s1) ||
1056                             match_wildcard("fscontext", s1) ||
1057                             match_wildcard("defcontext", s1) ||
1058                             match_wildcard("rootcontext",s1))
1059                                 continue;
1060
1061                         LCONSOLE_ERROR_MSG(0x152,
1062                                            "Unknown option '%s', won't mount.\n",
1063                                            s1);
1064                         RETURN(-EINVAL);
1065                 }
1066
1067                 switch (token) {
1068                 case LL_SBI_NOLCK:
1069                 case LL_SBI_32BIT_API:
1070                 case LL_SBI_64BIT_HASH:
1071                 case LL_SBI_ALWAYS_PING:
1072                         set_bit(token, sbi->ll_flags);
1073                         break;
1074
1075                 case LL_SBI_FLOCK:
1076                         clear_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags);
1077                         if (turn_off)
1078                                 clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1079                         else
1080                                 set_bit(token, sbi->ll_flags);
1081                         break;
1082
1083                 case LL_SBI_LOCALFLOCK:
1084                         clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1085                         set_bit(token, sbi->ll_flags);
1086                         break;
1087
1088                 case LL_SBI_CHECKSUM:
1089                         sbi->ll_checksum_set = 1;
1090                         fallthrough;
1091                 case LL_SBI_USER_XATTR:
1092                 case LL_SBI_USER_FID2PATH:
1093                 case LL_SBI_LRU_RESIZE:
1094                 case LL_SBI_LAZYSTATFS:
1095                 case LL_SBI_VERBOSE:
1096                         if (turn_off)
1097                                 clear_bit(token, sbi->ll_flags);
1098                         else
1099                                 set_bit(token, sbi->ll_flags);
1100                         break;
1101                 case LL_SBI_TEST_DUMMY_ENCRYPTION: {
1102 #ifdef HAVE_LUSTRE_CRYPTO
1103 #ifdef HAVE_FSCRYPT_DUMMY_CONTEXT_ENABLED
1104                         set_bit(token, sbi->ll_flags);
1105 #else
1106                         struct lustre_sb_info *lsi = s2lsi(sb);
1107
1108                         err = llcrypt_set_test_dummy_encryption(sb, &args[0],
1109                                                                 &lsi->lsi_dummy_enc_ctx);
1110                         if (!err)
1111                                 break;
1112
1113                         if (err == -EEXIST)
1114                                 LCONSOLE_WARN(
1115                                          "Can't change test_dummy_encryption");
1116                         else if (err == -EINVAL)
1117                                 LCONSOLE_WARN(
1118                                         "Value of option \"%s\" unrecognized",
1119                                         options);
1120                         else
1121                                 LCONSOLE_WARN(
1122                                          "Error processing option \"%s\" [%d]",
1123                                          options, err);
1124                         err = -1;
1125 #endif
1126 #else
1127                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1128 #endif
1129                         break;
1130                 }
1131                 case LL_SBI_ENCRYPT:
1132 #ifdef HAVE_LUSTRE_CRYPTO
1133                         if (turn_off)
1134                                 clear_bit(token, sbi->ll_flags);
1135                         else
1136                                 set_bit(token, sbi->ll_flags);
1137 #else
1138                         LCONSOLE_WARN("noencrypt or encrypt mount option ignored: encryption not supported\n");
1139 #endif
1140                         break;
1141                 case LL_SBI_FOREIGN_SYMLINK:
1142                         /* non-default prefix provided ? */
1143                         if (args->from) {
1144                                 size_t old_len;
1145                                 char *old;
1146
1147                                 /* path must be absolute */
1148                                 if (args->from[0] != '/') {
1149                                         LCONSOLE_ERROR_MSG(0x152,
1150                                                            "foreign prefix '%s' must be an absolute path\n",
1151                                                            args->from);
1152                                         RETURN(-EINVAL);
1153                                 }
1154
1155                                 old_len = sbi->ll_foreign_symlink_prefix_size;
1156                                 old = sbi->ll_foreign_symlink_prefix;
1157                                 /* alloc for path length and '\0' */
1158                                 sbi->ll_foreign_symlink_prefix = match_strdup(args);
1159                                 if (!sbi->ll_foreign_symlink_prefix) {
1160                                         /* restore previous */
1161                                         sbi->ll_foreign_symlink_prefix = old;
1162                                         sbi->ll_foreign_symlink_prefix_size =
1163                                                 old_len;
1164                                         RETURN(-ENOMEM);
1165                                 }
1166                                 sbi->ll_foreign_symlink_prefix_size =
1167                                         args->to - args->from + 1;
1168                                 OBD_ALLOC_POST(sbi->ll_foreign_symlink_prefix,
1169                                                sbi->ll_foreign_symlink_prefix_size,
1170                                                "kmalloced");
1171                                 if (old)
1172                                         OBD_FREE(old, old_len);
1173
1174                                 /* enable foreign symlink support */
1175                                 set_bit(token, sbi->ll_flags);
1176                         } else {
1177                                 LCONSOLE_ERROR_MSG(0x152,
1178                                                    "invalid %s option\n", s1);
1179                         }
1180                 fallthrough;
1181                 default:
1182                         break;
1183                 }
1184         }
1185         kfree(opts);
1186         RETURN(err);
1187 }
1188
1189 void ll_lli_init(struct ll_inode_info *lli)
1190 {
1191         lli->lli_inode_magic = LLI_INODE_MAGIC;
1192         lli->lli_flags = 0;
1193         rwlock_init(&lli->lli_lock);
1194         lli->lli_posix_acl = NULL;
1195         /* Do not set lli_fid, it has been initialized already. */
1196         fid_zero(&lli->lli_pfid);
1197         lli->lli_mds_read_och = NULL;
1198         lli->lli_mds_write_och = NULL;
1199         lli->lli_mds_exec_och = NULL;
1200         lli->lli_open_fd_read_count = 0;
1201         lli->lli_open_fd_write_count = 0;
1202         lli->lli_open_fd_exec_count = 0;
1203         mutex_init(&lli->lli_och_mutex);
1204         spin_lock_init(&lli->lli_agl_lock);
1205         spin_lock_init(&lli->lli_layout_lock);
1206         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1207         lli->lli_clob = NULL;
1208
1209         init_rwsem(&lli->lli_xattrs_list_rwsem);
1210         mutex_init(&lli->lli_xattrs_enq_lock);
1211
1212         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1213         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1214                 lli->lli_opendir_key = NULL;
1215                 lli->lli_sai = NULL;
1216                 spin_lock_init(&lli->lli_sa_lock);
1217                 lli->lli_opendir_pid = 0;
1218                 lli->lli_sa_enabled = 0;
1219                 init_rwsem(&lli->lli_lsm_sem);
1220         } else {
1221                 mutex_init(&lli->lli_size_mutex);
1222                 mutex_init(&lli->lli_setattr_mutex);
1223                 lli->lli_symlink_name = NULL;
1224                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1225                 range_lock_tree_init(&lli->lli_write_tree);
1226                 init_rwsem(&lli->lli_glimpse_sem);
1227                 lli->lli_glimpse_time = ktime_set(0, 0);
1228                 INIT_LIST_HEAD(&lli->lli_agl_list);
1229                 lli->lli_agl_index = 0;
1230                 lli->lli_async_rc = 0;
1231                 spin_lock_init(&lli->lli_heat_lock);
1232                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1233                 lli->lli_heat_flags = 0;
1234                 mutex_init(&lli->lli_pcc_lock);
1235                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1236                 lli->lli_pcc_inode = NULL;
1237                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1238                 lli->lli_pcc_generation = 0;
1239                 mutex_init(&lli->lli_group_mutex);
1240                 lli->lli_group_users = 0;
1241                 lli->lli_group_gid = 0;
1242         }
1243         mutex_init(&lli->lli_layout_mutex);
1244         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1245         /* ll_cl_context initialize */
1246         INIT_LIST_HEAD(&lli->lli_lccs);
1247 }
1248
1249 #define MAX_STRING_SIZE 128
1250
1251 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1252 #ifndef HAVE_BDI_CAP_MAP_COPY
1253 # define BDI_CAP_MAP_COPY       0
1254 #endif
1255
1256 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1257 {
1258         struct  lustre_sb_info *lsi = s2lsi(sb);
1259         char buf[MAX_STRING_SIZE];
1260         va_list args;
1261         int err;
1262
1263         err = bdi_init(&lsi->lsi_bdi);
1264         if (err)
1265                 return err;
1266
1267         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1268         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1269         lsi->lsi_bdi.name = "lustre";
1270         va_start(args, fmt);
1271         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1272         va_end(args);
1273         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1274         va_end(args);
1275         if (!err)
1276                 sb->s_bdi = &lsi->lsi_bdi;
1277
1278         return err;
1279 }
1280 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1281
1282 int ll_fill_super(struct super_block *sb)
1283 {
1284         struct  lustre_profile *lprof = NULL;
1285         struct  lustre_sb_info *lsi = s2lsi(sb);
1286         struct  ll_sb_info *sbi = NULL;
1287         char    *dt = NULL, *md = NULL;
1288         char    *profilenm = get_profile_name(sb);
1289         struct config_llog_instance *cfg;
1290         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1291         const int instlen = LUSTRE_MAXINSTANCE + 2;
1292         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1293         char name[MAX_STRING_SIZE];
1294         int md_len = 0;
1295         int dt_len = 0;
1296         uuid_t uuid;
1297         char *ptr;
1298         int len;
1299         int err;
1300
1301         ENTRY;
1302         /* for ASLR, to map between cfg_instance and hashed ptr */
1303         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1304                profilenm, cfg_instance, sb);
1305
1306         OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
1307
1308         OBD_ALLOC_PTR(cfg);
1309         if (cfg == NULL)
1310                 GOTO(out_free_cfg, err = -ENOMEM);
1311
1312         /* client additional sb info */
1313         lsi->lsi_llsbi = sbi = ll_init_sbi();
1314         if (IS_ERR(sbi))
1315                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1316
1317         err = ll_options(lsi->lsi_lmd->lmd_opts, sb);
1318         if (err)
1319                 GOTO(out_free_cfg, err);
1320
1321         /* LSI_FILENAME_ENC is only used by embedded llcrypt */
1322 #ifdef CONFIG_LL_ENCRYPTION
1323         if (ll_sb_has_test_dummy_encryption(sb))
1324                 /* enable filename encryption by default for dummy enc mode */
1325                 lsi->lsi_flags |= LSI_FILENAME_ENC;
1326         else
1327                 /* filename encryption is disabled by default */
1328                 lsi->lsi_flags &= ~LSI_FILENAME_ENC;
1329 #endif
1330
1331         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1332         sb->s_d_op = &ll_d_ops;
1333
1334         /* UUID handling */
1335         generate_random_uuid(uuid.b);
1336         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1337
1338         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1339
1340         /* Get fsname */
1341         len = strlen(profilenm);
1342         ptr = strrchr(profilenm, '-');
1343         if (ptr && (strcmp(ptr, "-client") == 0))
1344                 len -= 7;
1345
1346         if (len > LUSTRE_MAXFSNAME) {
1347                 if (unlikely(len >= MAX_STRING_SIZE))
1348                         len = MAX_STRING_SIZE - 1;
1349                 strncpy(name, profilenm, len);
1350                 name[len] = '\0';
1351                 err = -ENAMETOOLONG;
1352                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1353                        name, LUSTRE_MAXFSNAME, err);
1354                 GOTO(out_free_cfg, err);
1355         }
1356         strncpy(sbi->ll_fsname, profilenm, len);
1357         sbi->ll_fsname[len] = '\0';
1358
1359         /* Mount info */
1360         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1361                  profilenm, cfg_instance);
1362
1363         err = super_setup_bdi_name(sb, "%s", name);
1364         if (err)
1365                 GOTO(out_free_cfg, err);
1366
1367         /* disable kernel readahead */
1368         sb->s_bdi->ra_pages = 0;
1369 #ifdef HAVE_BDI_IO_PAGES
1370         sb->s_bdi->io_pages = 0;
1371 #endif
1372
1373         /* Call ll_debugfs_register_super() before lustre_process_log()
1374          * so that "llite.*.*" params can be processed correctly.
1375          */
1376         err = ll_debugfs_register_super(sb, name);
1377         if (err < 0) {
1378                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1379                        sbi->ll_fsname, err);
1380                 err = 0;
1381         }
1382
1383         /* The cfg_instance is a value unique to this super, in case some
1384          * joker tries to mount the same fs at two mount points.
1385          */
1386         cfg->cfg_instance = cfg_instance;
1387         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1388         cfg->cfg_callback = class_config_llog_handler;
1389         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1390         /* set up client obds */
1391         err = lustre_process_log(sb, profilenm, cfg);
1392         if (err < 0)
1393                 GOTO(out_debugfs, err);
1394
1395         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1396         lprof = class_get_profile(profilenm);
1397         if (lprof == NULL) {
1398                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1399                                    " read from the MGS.  Does that filesystem "
1400                                    "exist?\n", profilenm);
1401                 GOTO(out_debugfs, err = -EINVAL);
1402         }
1403         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1404                lprof->lp_md, lprof->lp_dt);
1405
1406         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1407         OBD_ALLOC(dt, dt_len);
1408         if (!dt)
1409                 GOTO(out_profile, err = -ENOMEM);
1410         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1411
1412         md_len = strlen(lprof->lp_md) + instlen + 2;
1413         OBD_ALLOC(md, md_len);
1414         if (!md)
1415                 GOTO(out_free_dt, err = -ENOMEM);
1416         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1417
1418         /* connections, registrations, sb setup */
1419         err = client_common_fill_super(sb, md, dt);
1420         if (err < 0)
1421                 GOTO(out_free_md, err);
1422
1423         sbi->ll_client_common_fill_super_succeeded = 1;
1424
1425 out_free_md:
1426         if (md)
1427                 OBD_FREE(md, md_len);
1428 out_free_dt:
1429         if (dt)
1430                 OBD_FREE(dt, dt_len);
1431 out_profile:
1432         if (lprof)
1433                 class_put_profile(lprof);
1434 out_debugfs:
1435         if (err < 0)
1436                 ll_debugfs_unregister_super(sb);
1437 out_free_cfg:
1438         if (cfg)
1439                 OBD_FREE_PTR(cfg);
1440
1441         if (err)
1442                 ll_put_super(sb);
1443         else if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1444                 LCONSOLE_WARN("Mounted %s%s\n", profilenm,
1445                               sb->s_flags & SB_RDONLY ? " read-only" : "");
1446         RETURN(err);
1447 } /* ll_fill_super */
1448
1449 void ll_put_super(struct super_block *sb)
1450 {
1451         struct config_llog_instance cfg, params_cfg;
1452         struct obd_device *obd;
1453         struct lustre_sb_info *lsi = s2lsi(sb);
1454         struct ll_sb_info *sbi = ll_s2sbi(sb);
1455         char *profilenm = get_profile_name(sb);
1456         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1457         long ccc_count;
1458         int next, force = 1, rc = 0;
1459         ENTRY;
1460
1461         if (IS_ERR(sbi))
1462                 GOTO(out_no_sbi, 0);
1463
1464         /* Should replace instance_id with something better for ASLR */
1465         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1466                profilenm, cfg_instance, sb);
1467
1468         cfg.cfg_instance = cfg_instance;
1469         lustre_end_log(sb, profilenm, &cfg);
1470
1471         params_cfg.cfg_instance = cfg_instance;
1472         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1473
1474         if (sbi->ll_md_exp) {
1475                 obd = class_exp2obd(sbi->ll_md_exp);
1476                 if (obd)
1477                         force = obd->obd_force;
1478         }
1479
1480         /* Wait for unstable pages to be committed to stable storage */
1481         if (force == 0) {
1482                 rc = l_wait_event_abortable(
1483                         sbi->ll_cache->ccc_unstable_waitq,
1484                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1485         }
1486
1487         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1488         if (force == 0 && rc != -ERESTARTSYS)
1489                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1490
1491         /* We need to set force before the lov_disconnect in
1492          * lustre_common_put_super, since l_d cleans up osc's as well.
1493          */
1494         if (force) {
1495                 next = 0;
1496                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1497                                                      &next)) != NULL) {
1498                         obd->obd_force = force;
1499                 }
1500         }
1501
1502         if (sbi->ll_client_common_fill_super_succeeded) {
1503                 /* Only if client_common_fill_super succeeded */
1504                 client_common_put_super(sb);
1505         }
1506
1507         /* imitate failed cleanup */
1508         if (OBD_FAIL_CHECK(OBD_FAIL_OBD_CLEANUP))
1509                 goto skip_cleanup;
1510
1511         next = 0;
1512         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1513                 class_manual_cleanup(obd);
1514
1515 skip_cleanup:
1516         if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1517                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1518
1519         if (profilenm)
1520                 class_del_profile(profilenm);
1521
1522 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1523         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1524                 bdi_destroy(&lsi->lsi_bdi);
1525                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1526         }
1527 #endif
1528
1529         llcrypt_free_dummy_context(&lsi->lsi_dummy_enc_ctx);
1530         ll_free_sbi(sb);
1531         lsi->lsi_llsbi = NULL;
1532 out_no_sbi:
1533         lustre_common_put_super(sb);
1534
1535         cl_env_cache_purge(~0);
1536
1537         EXIT;
1538 } /* client_put_super */
1539
1540 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1541 {
1542         struct inode *inode = NULL;
1543
1544         /* NOTE: we depend on atomic igrab() -bzzz */
1545         lock_res_and_lock(lock);
1546         if (lock->l_resource->lr_lvb_inode) {
1547                 struct ll_inode_info * lli;
1548                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1549                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1550                         inode = igrab(lock->l_resource->lr_lvb_inode);
1551                 } else {
1552                         inode = lock->l_resource->lr_lvb_inode;
1553                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1554                                          D_WARNING, lock, "lr_lvb_inode %p is "
1555                                          "bogus: magic %08x",
1556                                          lock->l_resource->lr_lvb_inode,
1557                                          lli->lli_inode_magic);
1558                         inode = NULL;
1559                 }
1560         }
1561         unlock_res_and_lock(lock);
1562         return inode;
1563 }
1564
1565 void ll_dir_clear_lsm_md(struct inode *inode)
1566 {
1567         struct ll_inode_info *lli = ll_i2info(inode);
1568
1569         LASSERT(S_ISDIR(inode->i_mode));
1570
1571         if (lli->lli_lsm_md) {
1572                 lmv_free_memmd(lli->lli_lsm_md);
1573                 lli->lli_lsm_md = NULL;
1574         }
1575
1576         if (lli->lli_default_lsm_md) {
1577                 lmv_free_memmd(lli->lli_default_lsm_md);
1578                 lli->lli_default_lsm_md = NULL;
1579         }
1580 }
1581
1582 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1583                                       const struct lu_fid *fid,
1584                                       struct lustre_md *md)
1585 {
1586         struct ll_sb_info *sbi = ll_s2sbi(sb);
1587         struct ll_inode_info *lli;
1588         struct mdt_body *body = md->body;
1589         struct inode *inode;
1590         ino_t ino;
1591
1592         ENTRY;
1593
1594         LASSERT(md->lmv);
1595         ino = cl_fid_build_ino(fid, test_bit(LL_SBI_32BIT_API, sbi->ll_flags));
1596         inode = iget_locked(sb, ino);
1597         if (inode == NULL) {
1598                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1599                        sbi->ll_fsname, PFID(fid));
1600                 RETURN(ERR_PTR(-ENOENT));
1601         }
1602
1603         lli = ll_i2info(inode);
1604         if (inode->i_state & I_NEW) {
1605                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1606                                 (body->mbo_mode & S_IFMT);
1607                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1608                          PFID(fid));
1609
1610                 inode->i_mtime.tv_sec = 0;
1611                 inode->i_atime.tv_sec = 0;
1612                 inode->i_ctime.tv_sec = 0;
1613                 inode->i_rdev = 0;
1614
1615 #ifdef HAVE_BACKING_DEV_INFO
1616                 /* initializing backing dev info. */
1617                 inode->i_mapping->backing_dev_info =
1618                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1619 #endif
1620                 inode->i_op = &ll_dir_inode_operations;
1621                 inode->i_fop = &ll_dir_operations;
1622                 lli->lli_fid = *fid;
1623                 ll_lli_init(lli);
1624
1625                 /* master object FID */
1626                 lli->lli_pfid = body->mbo_fid1;
1627                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1628                        lli, PFID(fid), PFID(&lli->lli_pfid));
1629                 unlock_new_inode(inode);
1630         } else {
1631                 /* in directory restripe/auto-split, a directory will be
1632                  * transformed to a stripe if it's plain, set its pfid here,
1633                  * otherwise ll_lock_cancel_bits() can't find the master inode.
1634                  */
1635                 lli->lli_pfid = body->mbo_fid1;
1636         }
1637
1638         RETURN(inode);
1639 }
1640
1641 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1642 {
1643         struct lu_fid *fid;
1644         struct lmv_stripe_md *lsm = md->lmv;
1645         struct ll_inode_info *lli = ll_i2info(inode);
1646         int i;
1647
1648         LASSERT(lsm != NULL);
1649
1650         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1651                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1652         lsm_md_dump(D_INODE, lsm);
1653
1654         if (!lmv_dir_striped(lsm))
1655                 goto out;
1656
1657         /* XXX sigh, this lsm_root initialization should be in
1658          * LMV layer, but it needs ll_iget right now, so we
1659          * put this here right now. */
1660         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1661                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1662                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1663
1664                 if (!fid_is_sane(fid))
1665                         continue;
1666
1667                 /* Unfortunately ll_iget will call ll_update_inode,
1668                  * where the initialization of slave inode is slightly
1669                  * different, so it reset lsm_md to NULL to avoid
1670                  * initializing lsm for slave inode. */
1671                 lsm->lsm_md_oinfo[i].lmo_root =
1672                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1673                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1674                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1675
1676                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1677                         while (i-- > 0) {
1678                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1679                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1680                         }
1681                         return rc;
1682                 }
1683         }
1684 out:
1685         lli->lli_lsm_md = lsm;
1686
1687         return 0;
1688 }
1689
1690 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1691 {
1692         struct ll_inode_info *lli = ll_i2info(inode);
1693
1694         ENTRY;
1695
1696         if (!md->default_lmv) {
1697                 /* clear default lsm */
1698                 if (lli->lli_default_lsm_md) {
1699                         down_write(&lli->lli_lsm_sem);
1700                         if (lli->lli_default_lsm_md) {
1701                                 lmv_free_memmd(lli->lli_default_lsm_md);
1702                                 lli->lli_default_lsm_md = NULL;
1703                         }
1704                         lli->lli_inherit_depth = 0;
1705                         up_write(&lli->lli_lsm_sem);
1706                 }
1707                 RETURN_EXIT;
1708         }
1709
1710         if (lli->lli_default_lsm_md) {
1711                 /* do nonthing if default lsm isn't changed */
1712                 down_read(&lli->lli_lsm_sem);
1713                 if (lli->lli_default_lsm_md &&
1714                     lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1715                         up_read(&lli->lli_lsm_sem);
1716                         RETURN_EXIT;
1717                 }
1718                 up_read(&lli->lli_lsm_sem);
1719         }
1720
1721         down_write(&lli->lli_lsm_sem);
1722         if (lli->lli_default_lsm_md)
1723                 lmv_free_memmd(lli->lli_default_lsm_md);
1724         lli->lli_default_lsm_md = md->default_lmv;
1725         lsm_md_dump(D_INODE, md->default_lmv);
1726         md->default_lmv = NULL;
1727         up_write(&lli->lli_lsm_sem);
1728         RETURN_EXIT;
1729 }
1730
1731 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1732 {
1733         struct ll_inode_info *lli = ll_i2info(inode);
1734         struct lmv_stripe_md *lsm = md->lmv;
1735         struct cl_attr  *attr;
1736         int rc = 0;
1737
1738         ENTRY;
1739
1740         LASSERT(S_ISDIR(inode->i_mode));
1741         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1742                PFID(ll_inode2fid(inode)));
1743
1744         /* update default LMV */
1745         if (md->default_lmv)
1746                 ll_update_default_lsm_md(inode, md);
1747
1748         /* after dir migration/restripe, a stripe may be turned into a
1749          * directory, in this case, zero out its lli_pfid.
1750          */
1751         if (unlikely(fid_is_norm(&lli->lli_pfid)))
1752                 fid_zero(&lli->lli_pfid);
1753
1754         /*
1755          * no striped information from request, lustre_md from req does not
1756          * include stripeEA, see ll_md_setattr()
1757          */
1758         if (!lsm)
1759                 RETURN(0);
1760
1761         /*
1762          * normally dir layout doesn't change, only take read lock to check
1763          * that to avoid blocking other MD operations.
1764          */
1765         down_read(&lli->lli_lsm_sem);
1766
1767         /* some current lookup initialized lsm, and unchanged */
1768         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1769                 GOTO(unlock, rc = 0);
1770
1771         /* if dir layout doesn't match, check whether version is increased,
1772          * which means layout is changed, this happens in dir split/merge and
1773          * lfsck.
1774          *
1775          * foreign LMV should not change.
1776          */
1777         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1778             lsm->lsm_md_layout_version <=
1779             lli->lli_lsm_md->lsm_md_layout_version) {
1780                 CERROR("%s: "DFID" dir layout mismatch:\n",
1781                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1782                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1783                 lsm_md_dump(D_ERROR, lsm);
1784                 GOTO(unlock, rc = -EINVAL);
1785         }
1786
1787         up_read(&lli->lli_lsm_sem);
1788         down_write(&lli->lli_lsm_sem);
1789         /* clear existing lsm */
1790         if (lli->lli_lsm_md) {
1791                 lmv_free_memmd(lli->lli_lsm_md);
1792                 lli->lli_lsm_md = NULL;
1793         }
1794
1795         rc = ll_init_lsm_md(inode, md);
1796         if (rc) {
1797                 up_write(&lli->lli_lsm_sem);
1798                 RETURN(rc);
1799         }
1800
1801         /* md_merge_attr() may take long, since lsm is already set, switch to
1802          * read lock.
1803          */
1804         downgrade_write(&lli->lli_lsm_sem);
1805
1806         /* set md->lmv to NULL, so the following free lustre_md will not free
1807          * this lsm.
1808          */
1809         md->lmv = NULL;
1810
1811         if (!lmv_dir_striped(lli->lli_lsm_md))
1812                 GOTO(unlock, rc = 0);
1813
1814         OBD_ALLOC_PTR(attr);
1815         if (!attr)
1816                 GOTO(unlock, rc = -ENOMEM);
1817
1818         /* validate the lsm */
1819         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1820                            ll_md_blocking_ast);
1821         if (!rc) {
1822                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1823                         md->body->mbo_nlink = attr->cat_nlink;
1824                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1825                         md->body->mbo_size = attr->cat_size;
1826                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1827                         md->body->mbo_atime = attr->cat_atime;
1828                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1829                         md->body->mbo_ctime = attr->cat_ctime;
1830                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1831                         md->body->mbo_mtime = attr->cat_mtime;
1832         }
1833
1834         OBD_FREE_PTR(attr);
1835         GOTO(unlock, rc);
1836 unlock:
1837         up_read(&lli->lli_lsm_sem);
1838
1839         return rc;
1840 }
1841
1842 void ll_clear_inode(struct inode *inode)
1843 {
1844         struct ll_inode_info *lli = ll_i2info(inode);
1845         struct ll_sb_info *sbi = ll_i2sbi(inode);
1846
1847         ENTRY;
1848
1849         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1850                PFID(ll_inode2fid(inode)), inode);
1851
1852         if (S_ISDIR(inode->i_mode)) {
1853                 /* these should have been cleared in ll_file_release */
1854                 LASSERT(lli->lli_opendir_key == NULL);
1855                 LASSERT(lli->lli_sai == NULL);
1856                 LASSERT(lli->lli_opendir_pid == 0);
1857         } else {
1858                 pcc_inode_free(inode);
1859         }
1860
1861         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1862
1863         LASSERT(!lli->lli_open_fd_write_count);
1864         LASSERT(!lli->lli_open_fd_read_count);
1865         LASSERT(!lli->lli_open_fd_exec_count);
1866
1867         if (lli->lli_mds_write_och)
1868                 ll_md_real_close(inode, FMODE_WRITE);
1869         if (lli->lli_mds_exec_och)
1870                 ll_md_real_close(inode, FMODE_EXEC);
1871         if (lli->lli_mds_read_och)
1872                 ll_md_real_close(inode, FMODE_READ);
1873
1874         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1875                 OBD_FREE(lli->lli_symlink_name,
1876                          strlen(lli->lli_symlink_name) + 1);
1877                 lli->lli_symlink_name = NULL;
1878         }
1879
1880         ll_xattr_cache_destroy(inode);
1881
1882         forget_all_cached_acls(inode);
1883         lli_clear_acl(lli);
1884         lli->lli_inode_magic = LLI_INODE_DEAD;
1885
1886         if (S_ISDIR(inode->i_mode))
1887                 ll_dir_clear_lsm_md(inode);
1888         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1889                 LASSERT(list_empty(&lli->lli_agl_list));
1890
1891         /*
1892          * XXX This has to be done before lsm is freed below, because
1893          * cl_object still uses inode lsm.
1894          */
1895         cl_inode_fini(inode);
1896
1897         llcrypt_put_encryption_info(inode);
1898
1899         EXIT;
1900 }
1901
1902 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1903 {
1904         struct lustre_md md;
1905         struct inode *inode = dentry->d_inode;
1906         struct ll_sb_info *sbi = ll_i2sbi(inode);
1907         struct ptlrpc_request *request = NULL;
1908         int rc, ia_valid;
1909
1910         ENTRY;
1911
1912         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1913                                      LUSTRE_OPC_ANY, NULL);
1914         if (IS_ERR(op_data))
1915                 RETURN(PTR_ERR(op_data));
1916
1917         /* If this is a chgrp of a regular file, we want to reserve enough
1918          * quota to cover the entire file size.
1919          */
1920         if (S_ISREG(inode->i_mode) && op_data->op_attr.ia_valid & ATTR_GID &&
1921             from_kgid(&init_user_ns, op_data->op_attr.ia_gid) !=
1922             from_kgid(&init_user_ns, inode->i_gid)) {
1923                 op_data->op_xvalid |= OP_XVALID_BLOCKS;
1924                 op_data->op_attr_blocks = inode->i_blocks;
1925         }
1926
1927
1928         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1929         if (rc) {
1930                 ptlrpc_req_finished(request);
1931                 if (rc == -ENOENT) {
1932                         clear_nlink(inode);
1933                         /* Unlinked special device node? Or just a race?
1934                          * Pretend we done everything. */
1935                         if (!S_ISREG(inode->i_mode) &&
1936                             !S_ISDIR(inode->i_mode)) {
1937                                 ia_valid = op_data->op_attr.ia_valid;
1938                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1939                                 rc = simple_setattr(&init_user_ns, dentry,
1940                                                     &op_data->op_attr);
1941                                 op_data->op_attr.ia_valid = ia_valid;
1942                         }
1943                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1944                         CERROR("md_setattr fails: rc = %d\n", rc);
1945                 }
1946                 RETURN(rc);
1947         }
1948
1949         rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp,
1950                               sbi->ll_md_exp, &md);
1951         if (rc) {
1952                 ptlrpc_req_finished(request);
1953                 RETURN(rc);
1954         }
1955
1956         ia_valid = op_data->op_attr.ia_valid;
1957         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1958          * cache is not cleared yet. */
1959         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1960         if (S_ISREG(inode->i_mode))
1961                 inode_lock(inode);
1962         rc = simple_setattr(&init_user_ns, dentry, &op_data->op_attr);
1963         if (S_ISREG(inode->i_mode))
1964                 inode_unlock(inode);
1965         op_data->op_attr.ia_valid = ia_valid;
1966
1967         rc = ll_update_inode(inode, &md);
1968         ptlrpc_req_finished(request);
1969
1970         RETURN(rc);
1971 }
1972
1973 /**
1974  * Zero portion of page that is part of @inode.
1975  * This implies, if necessary:
1976  * - taking cl_lock on range corresponding to concerned page
1977  * - grabbing vm page
1978  * - associating cl_page
1979  * - proceeding to clio read
1980  * - zeroing range in page
1981  * - proceeding to cl_page flush
1982  * - releasing cl_lock
1983  *
1984  * \param[in] inode     inode
1985  * \param[in] index     page index
1986  * \param[in] offset    offset in page to start zero from
1987  * \param[in] len       len to zero
1988  *
1989  * \retval 0            on success
1990  * \retval negative     errno on failure
1991  */
1992 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1993                     unsigned len)
1994 {
1995         struct ll_inode_info *lli = ll_i2info(inode);
1996         struct cl_object *clob = lli->lli_clob;
1997         __u16 refcheck;
1998         struct lu_env *env = NULL;
1999         struct cl_io *io = NULL;
2000         struct cl_page *clpage = NULL;
2001         struct page *vmpage = NULL;
2002         unsigned from = index << PAGE_SHIFT;
2003         struct cl_lock *lock = NULL;
2004         struct cl_lock_descr *descr = NULL;
2005         struct cl_2queue *queue = NULL;
2006         struct cl_sync_io *anchor = NULL;
2007         bool holdinglock = false;
2008         int rc;
2009
2010         ENTRY;
2011
2012         env = cl_env_get(&refcheck);
2013         if (IS_ERR(env))
2014                 RETURN(PTR_ERR(env));
2015
2016         io = vvp_env_thread_io(env);
2017         io->ci_obj = clob;
2018         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
2019         if (rc)
2020                 GOTO(putenv, rc);
2021
2022         lock = vvp_env_lock(env);
2023         descr = &lock->cll_descr;
2024         descr->cld_obj   = io->ci_obj;
2025         descr->cld_start = cl_index(io->ci_obj, from);
2026         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
2027         descr->cld_mode  = CLM_WRITE;
2028         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
2029
2030         /* request lock for page */
2031         rc = cl_lock_request(env, io, lock);
2032         /* -ECANCELED indicates a matching lock with a different extent
2033          * was already present, and -EEXIST indicates a matching lock
2034          * on exactly the same extent was already present.
2035          * In both cases it means we are covered.
2036          */
2037         if (rc == -ECANCELED || rc == -EEXIST)
2038                 rc = 0;
2039         else if (rc < 0)
2040                 GOTO(iofini, rc);
2041         else
2042                 holdinglock = true;
2043
2044         /* grab page */
2045         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
2046         if (vmpage == NULL)
2047                 GOTO(rellock, rc = -EOPNOTSUPP);
2048
2049         if (!PageDirty(vmpage)) {
2050                 /* associate cl_page */
2051                 clpage = cl_page_find(env, clob, vmpage->index,
2052                                       vmpage, CPT_CACHEABLE);
2053                 if (IS_ERR(clpage))
2054                         GOTO(pagefini, rc = PTR_ERR(clpage));
2055
2056                 cl_page_assume(env, io, clpage);
2057         }
2058
2059         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
2060             !PageWriteback(vmpage)) {
2061                 /* read page */
2062                 /* Set PagePrivate2 to detect special case of empty page
2063                  * in osc_brw_fini_request().
2064                  * It is also used to tell ll_io_read_page() that we do not
2065                  * want the vmpage to be unlocked.
2066                  */
2067                 SetPagePrivate2(vmpage);
2068                 rc = ll_io_read_page(env, io, clpage, NULL);
2069                 if (!PagePrivate2(vmpage)) {
2070                         /* PagePrivate2 was cleared in osc_brw_fini_request()
2071                          * meaning we read an empty page. In this case, in order
2072                          * to avoid allocating unnecessary block in truncated
2073                          * file, we must not zero and write as below. Subsequent
2074                          * server-side truncate will handle things correctly.
2075                          */
2076                         cl_page_unassume(env, io, clpage);
2077                         GOTO(clpfini, rc = 0);
2078                 }
2079                 ClearPagePrivate2(vmpage);
2080                 if (rc)
2081                         GOTO(clpfini, rc);
2082         }
2083
2084         /* Thanks to PagePrivate2 flag, ll_io_read_page() did not unlock
2085          * the vmpage, so we are good to proceed and zero range in page.
2086          */
2087         zero_user(vmpage, offset, len);
2088
2089         if (holdinglock && clpage) {
2090                 /* explicitly write newly modified page */
2091                 queue = &io->ci_queue;
2092                 cl_2queue_init(queue);
2093                 anchor = &vvp_env_info(env)->vti_anchor;
2094                 cl_sync_io_init(anchor, 1);
2095                 clpage->cp_sync_io = anchor;
2096                 cl_page_list_add(&queue->c2_qin, clpage, true);
2097                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
2098                 if (rc)
2099                         GOTO(queuefini1, rc);
2100                 rc = cl_sync_io_wait(env, anchor, 0);
2101                 if (rc)
2102                         GOTO(queuefini2, rc);
2103                 cl_page_assume(env, io, clpage);
2104
2105 queuefini2:
2106                 cl_2queue_discard(env, io, queue);
2107 queuefini1:
2108                 cl_2queue_disown(env, queue);
2109                 cl_2queue_fini(env, queue);
2110         }
2111
2112 clpfini:
2113         if (clpage)
2114                 cl_page_put(env, clpage);
2115 pagefini:
2116         unlock_page(vmpage);
2117         put_page(vmpage);
2118 rellock:
2119         if (holdinglock)
2120                 cl_lock_release(env, lock);
2121 iofini:
2122         cl_io_fini(env, io);
2123 putenv:
2124         if (env)
2125                 cl_env_put(env, &refcheck);
2126
2127         RETURN(rc);
2128 }
2129
2130 /**
2131  * Get reference file from volatile file name.
2132  * Volatile file name may look like:
2133  * <parent>/LUSTRE_VOLATILE_HDR:<mdt_index>:<random>:fd=<fd>
2134  * where fd is opened descriptor of reference file.
2135  *
2136  * \param[in] volatile_name     volatile file name
2137  * \param[in] volatile_len      volatile file name length
2138  * \param[out] ref_file         pointer to struct file of reference file
2139  *
2140  * \retval 0            on success
2141  * \retval negative     errno on failure
2142  */
2143 int volatile_ref_file(const char *volatile_name, int volatile_len,
2144                       struct file **ref_file)
2145 {
2146         char *p, *q, *fd_str;
2147         int fd, rc;
2148
2149         p = strnstr(volatile_name, ":fd=", volatile_len);
2150         if (!p || strlen(p + 4) == 0)
2151                 return -EINVAL;
2152
2153         q = strchrnul(p + 4, ':');
2154         fd_str = kstrndup(p + 4, q - p - 4, GFP_NOFS);
2155         if (!fd_str)
2156                 return -ENOMEM;
2157         rc = kstrtouint(fd_str, 10, &fd);
2158         kfree(fd_str);
2159         if (rc)
2160                 return -EINVAL;
2161
2162         *ref_file = fget(fd);
2163         if (!(*ref_file))
2164                 return -EINVAL;
2165         return 0;
2166 }
2167
2168 /* If this inode has objects allocated to it (lsm != NULL), then the OST
2169  * object(s) determine the file size and mtime.  Otherwise, the MDS will
2170  * keep these values until such a time that objects are allocated for it.
2171  * We do the MDS operations first, as it is checking permissions for us.
2172  * We don't to the MDS RPC if there is nothing that we want to store there,
2173  * otherwise there is no harm in updating mtime/atime on the MDS if we are
2174  * going to do an RPC anyways.
2175  *
2176  * If we are doing a truncate, we will send the mtime and ctime updates
2177  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2178  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2179  * at the same time.
2180  *
2181  * In case of HSMimport, we only set attr on MDS.
2182  */
2183 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2184                    enum op_xvalid xvalid, bool hsm_import)
2185 {
2186         struct inode *inode = dentry->d_inode;
2187         struct ll_inode_info *lli = ll_i2info(inode);
2188         struct md_op_data *op_data = NULL;
2189         ktime_t kstart = ktime_get();
2190         int rc = 0;
2191
2192         ENTRY;
2193
2194         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2195                "valid %x, hsm_import %d\n",
2196                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2197                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2198                hsm_import);
2199
2200         if (attr->ia_valid & ATTR_SIZE) {
2201                 /* Check new size against VFS/VM file size limit and rlimit */
2202                 rc = inode_newsize_ok(inode, attr->ia_size);
2203                 if (rc)
2204                         RETURN(rc);
2205
2206                 /* The maximum Lustre file size is variable, based on the
2207                  * OST maximum object size and number of stripes.  This
2208                  * needs another check in addition to the VFS check above. */
2209                 if (attr->ia_size > ll_file_maxbytes(inode)) {
2210                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2211                                PFID(&lli->lli_fid), attr->ia_size,
2212                                ll_file_maxbytes(inode));
2213                         RETURN(-EFBIG);
2214                 }
2215
2216                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2217         }
2218
2219         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2220         if (attr->ia_valid & TIMES_SET_FLAGS) {
2221                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2222                     !capable(CAP_FOWNER))
2223                         RETURN(-EPERM);
2224         }
2225
2226         /* We mark all of the fields "set" so MDS/OST does not re-set them */
2227         if (!(xvalid & OP_XVALID_CTIME_SET) &&
2228              (attr->ia_valid & ATTR_CTIME)) {
2229                 attr->ia_ctime = current_time(inode);
2230                 xvalid |= OP_XVALID_CTIME_SET;
2231         }
2232         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2233             (attr->ia_valid & ATTR_ATIME)) {
2234                 attr->ia_atime = current_time(inode);
2235                 attr->ia_valid |= ATTR_ATIME_SET;
2236         }
2237         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2238             (attr->ia_valid & ATTR_MTIME)) {
2239                 attr->ia_mtime = current_time(inode);
2240                 attr->ia_valid |= ATTR_MTIME_SET;
2241         }
2242
2243         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2244                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2245                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2246                        ktime_get_real_seconds());
2247
2248         if (S_ISREG(inode->i_mode))
2249                 inode_unlock(inode);
2250
2251         /* We always do an MDS RPC, even if we're only changing the size;
2252          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2253
2254         OBD_ALLOC_PTR(op_data);
2255         if (op_data == NULL)
2256                 GOTO(out, rc = -ENOMEM);
2257
2258         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2259                 /* If we are changing file size, file content is
2260                  * modified, flag it.
2261                  */
2262                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2263                 op_data->op_bias |= MDS_DATA_MODIFIED;
2264                 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
2265         }
2266
2267         if (attr->ia_valid & ATTR_FILE) {
2268                 struct ll_file_data *fd = attr->ia_file->private_data;
2269
2270                 if (fd->fd_lease_och)
2271                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2272         }
2273
2274         op_data->op_attr = *attr;
2275         op_data->op_xvalid = xvalid;
2276
2277         rc = ll_md_setattr(dentry, op_data);
2278         if (rc)
2279                 GOTO(out, rc);
2280
2281         if (!S_ISREG(inode->i_mode) || hsm_import)
2282                 GOTO(out, rc = 0);
2283
2284         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2285                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2286             xvalid & OP_XVALID_CTIME_SET) {
2287                 bool cached = false;
2288
2289                 rc = pcc_inode_setattr(inode, attr, &cached);
2290                 if (cached) {
2291                         if (rc) {
2292                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2293                                        "rc = %d\n",
2294                                        ll_i2sbi(inode)->ll_fsname,
2295                                        PFID(&lli->lli_fid), rc);
2296                                 GOTO(out, rc);
2297                         }
2298                 } else {
2299                         unsigned int flags = 0;
2300
2301                         /* For truncate and utimes sending attributes to OSTs,
2302                          * setting mtime/atime to the past will be performed
2303                          * under PW [0:EOF] extent lock (new_size:EOF for
2304                          * truncate). It may seem excessive to send mtime/atime
2305                          * updates to OSTs when not setting times to past, but
2306                          * it is necessary due to possible time
2307                          * de-synchronization between MDT inode and OST objects
2308                          */
2309                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
2310                                 xvalid |= OP_XVALID_FLAGS;
2311                                 flags = LUSTRE_ENCRYPT_FL;
2312                                 /* Call to ll_io_zero_page is not necessary if
2313                                  * truncating on PAGE_SIZE boundary, because
2314                                  * whole pages will be wiped.
2315                                  * In case of Direct IO, all we need is to set
2316                                  * new size.
2317                                  */
2318                                 if (attr->ia_valid & ATTR_SIZE &&
2319                                     attr->ia_size & ~PAGE_MASK &&
2320                                     !(attr->ia_valid & ATTR_FILE &&
2321                                       attr->ia_file->f_flags & O_DIRECT)) {
2322                                         pgoff_t offset =
2323                                                 attr->ia_size & (PAGE_SIZE - 1);
2324
2325                                         rc = ll_io_zero_page(inode,
2326                                                     attr->ia_size >> PAGE_SHIFT,
2327                                                     offset, PAGE_SIZE - offset);
2328                                         if (rc)
2329                                                 GOTO(out, rc);
2330                                 }
2331                                 /* If encrypted volatile file without the key,
2332                                  * we need to fetch size from reference file,
2333                                  * and set it on OST objects. This happens when
2334                                  * migrating or extending an encrypted file
2335                                  * without the key.
2336                                  */
2337                                 if (filename_is_volatile(dentry->d_name.name,
2338                                                          dentry->d_name.len,
2339                                                          NULL) &&
2340                                     llcrypt_require_key(inode) == -ENOKEY) {
2341                                         struct file *ref_file;
2342                                         struct inode *ref_inode;
2343                                         struct ll_inode_info *ref_lli;
2344                                         struct cl_object *ref_obj;
2345                                         struct cl_attr ref_attr = { 0 };
2346                                         struct lu_env *env;
2347                                         __u16 refcheck;
2348
2349                                         rc = volatile_ref_file(
2350                                                 dentry->d_name.name,
2351                                                 dentry->d_name.len,
2352                                                 &ref_file);
2353                                         if (rc)
2354                                                 GOTO(out, rc);
2355
2356                                         ref_inode = file_inode(ref_file);
2357                                         if (!ref_inode) {
2358                                                 fput(ref_file);
2359                                                 GOTO(out, rc = -EINVAL);
2360                                         }
2361
2362                                         env = cl_env_get(&refcheck);
2363                                         if (IS_ERR(env))
2364                                                 GOTO(out, rc = PTR_ERR(env));
2365
2366                                         ref_lli = ll_i2info(ref_inode);
2367                                         ref_obj = ref_lli->lli_clob;
2368                                         cl_object_attr_lock(ref_obj);
2369                                         rc = cl_object_attr_get(env, ref_obj,
2370                                                                 &ref_attr);
2371                                         cl_object_attr_unlock(ref_obj);
2372                                         cl_env_put(env, &refcheck);
2373                                         fput(ref_file);
2374                                         if (rc)
2375                                                 GOTO(out, rc);
2376
2377                                         attr->ia_valid |= ATTR_SIZE;
2378                                         attr->ia_size = ref_attr.cat_size;
2379                                 }
2380                         }
2381                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2382                 }
2383         }
2384
2385         /* If the file was restored, it needs to set dirty flag.
2386          *
2387          * We've already sent MDS_DATA_MODIFIED flag in
2388          * ll_md_setattr() for truncate. However, the MDT refuses to
2389          * set the HS_DIRTY flag on released files, so we have to set
2390          * it again if the file has been restored. Please check how
2391          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2392          *
2393          * Please notice that if the file is not released, the previous
2394          * MDS_DATA_MODIFIED has taken effect and usually
2395          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2396          * This way we can save an RPC for common open + trunc
2397          * operation. */
2398         if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
2399                 struct hsm_state_set hss = {
2400                         .hss_valid = HSS_SETMASK,
2401                         .hss_setmask = HS_DIRTY,
2402                 };
2403                 int rc2;
2404
2405                 rc2 = ll_hsm_state_set(inode, &hss);
2406                 /* truncate and write can happen at the same time, so that
2407                  * the file can be set modified even though the file is not
2408                  * restored from released state, and ll_hsm_state_set() is
2409                  * not applicable for the file, and rc2 < 0 is normal in this
2410                  * case. */
2411                 if (rc2 < 0)
2412                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2413                                PFID(ll_inode2fid(inode)), rc2);
2414         }
2415
2416         EXIT;
2417 out:
2418         if (op_data != NULL)
2419                 ll_finish_md_op_data(op_data);
2420
2421         if (S_ISREG(inode->i_mode)) {
2422                 inode_lock(inode);
2423                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2424                         inode_dio_wait(inode);
2425                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2426                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2427                  * inode flags, so there is a gap where S_NOSEC is not set.
2428                  * This can cause a writer to take the i_mutex unnecessarily,
2429                  * but this is safe to do and should be rare. */
2430                 inode_has_no_xattr(inode);
2431         }
2432
2433         if (!rc)
2434                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2435                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2436                                    ktime_us_delta(ktime_get(), kstart));
2437
2438         RETURN(rc);
2439 }
2440
2441 int ll_setattr(struct user_namespace *mnt_userns, struct dentry *de,
2442                struct iattr *attr)
2443 {
2444         int mode = de->d_inode->i_mode;
2445         enum op_xvalid xvalid = 0;
2446         int rc;
2447
2448         rc = llcrypt_prepare_setattr(de, attr);
2449         if (rc)
2450                 return rc;
2451
2452         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2453                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2454                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2455
2456         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2457                                (ATTR_SIZE|ATTR_MODE)) &&
2458             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2459              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2460               !(attr->ia_mode & S_ISGID))))
2461                 attr->ia_valid |= ATTR_FORCE;
2462
2463         if ((attr->ia_valid & ATTR_MODE) &&
2464             (mode & S_ISUID) &&
2465             !(attr->ia_mode & S_ISUID) &&
2466             !(attr->ia_valid & ATTR_KILL_SUID))
2467                 attr->ia_valid |= ATTR_KILL_SUID;
2468
2469         if ((attr->ia_valid & ATTR_MODE) &&
2470             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2471             !(attr->ia_mode & S_ISGID) &&
2472             !(attr->ia_valid & ATTR_KILL_SGID))
2473                 attr->ia_valid |= ATTR_KILL_SGID;
2474
2475         return ll_setattr_raw(de, attr, xvalid, false);
2476 }
2477
2478 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2479                        u32 flags)
2480 {
2481         struct obd_statfs obd_osfs = { 0 };
2482         time64_t max_age;
2483         int rc;
2484
2485         ENTRY;
2486         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2487
2488         if (test_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags))
2489                 flags |= OBD_STATFS_NODELAY;
2490
2491         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2492         if (rc)
2493                 RETURN(rc);
2494
2495         osfs->os_type = LL_SUPER_MAGIC;
2496
2497         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2498               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2499
2500         if (osfs->os_state & OS_STATFS_SUM)
2501                 GOTO(out, rc);
2502
2503         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2504         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2505                 GOTO(out, rc = 0);
2506
2507         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2508                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2509                obd_osfs.os_files);
2510
2511         osfs->os_bsize = obd_osfs.os_bsize;
2512         osfs->os_blocks = obd_osfs.os_blocks;
2513         osfs->os_bfree = obd_osfs.os_bfree;
2514         osfs->os_bavail = obd_osfs.os_bavail;
2515
2516         /* If we have _some_ OSTs, but don't have as many free objects on the
2517          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2518          * to compensate, so that the "inodes in use" number is correct.
2519          * This should be kept in sync with lod_statfs() behaviour.
2520          */
2521         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2522                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2523                                  obd_osfs.os_ffree;
2524                 osfs->os_ffree = obd_osfs.os_ffree;
2525         }
2526
2527 out:
2528         RETURN(rc);
2529 }
2530
2531 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2532 {
2533         struct if_quotactl qctl = {
2534                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2535                 .qc_type = PRJQUOTA,
2536                 .qc_valid = QC_GENERAL,
2537         };
2538         u64 limit, curblock;
2539         int ret;
2540
2541         qctl.qc_id = ll_i2info(inode)->lli_projid;
2542         ret = quotactl_ioctl(inode->i_sb, &qctl);
2543         if (ret) {
2544                 /* ignore errors if project ID does not have
2545                  * a quota limit or feature unsupported.
2546                  */
2547                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2548                         ret = 0;
2549                 return ret;
2550         }
2551
2552         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2553                  qctl.qc_dqblk.dqb_bsoftlimit :
2554                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2555         if (limit && sfs->f_blocks > limit) {
2556                 curblock = (qctl.qc_dqblk.dqb_curspace +
2557                                 sfs->f_bsize - 1) / sfs->f_bsize;
2558                 sfs->f_blocks = limit;
2559                 sfs->f_bfree = sfs->f_bavail =
2560                         (sfs->f_blocks > curblock) ?
2561                         (sfs->f_blocks - curblock) : 0;
2562         }
2563
2564         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2565                 qctl.qc_dqblk.dqb_isoftlimit :
2566                 qctl.qc_dqblk.dqb_ihardlimit;
2567         if (limit && sfs->f_files > limit) {
2568                 sfs->f_files = limit;
2569                 sfs->f_ffree = (sfs->f_files >
2570                         qctl.qc_dqblk.dqb_curinodes) ?
2571                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2572         }
2573
2574         return 0;
2575 }
2576
2577 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2578 {
2579         struct super_block *sb = de->d_sb;
2580         struct obd_statfs osfs;
2581         __u64 fsid = huge_encode_dev(sb->s_dev);
2582         ktime_t kstart = ktime_get();
2583         int rc;
2584
2585         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2586
2587         /* Some amount of caching on the client is allowed */
2588         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2589         if (rc)
2590                 return rc;
2591
2592         statfs_unpack(sfs, &osfs);
2593
2594         /* We need to downshift for all 32-bit kernels, because we can't
2595          * tell if the kernel is being called via sys_statfs64() or not.
2596          * Stop before overflowing f_bsize - in which case it is better
2597          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2598         if (sizeof(long) < 8) {
2599                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2600                         sfs->f_bsize <<= 1;
2601
2602                         osfs.os_blocks >>= 1;
2603                         osfs.os_bfree >>= 1;
2604                         osfs.os_bavail >>= 1;
2605                 }
2606         }
2607
2608         sfs->f_blocks = osfs.os_blocks;
2609         sfs->f_bfree = osfs.os_bfree;
2610         sfs->f_bavail = osfs.os_bavail;
2611         sfs->f_fsid.val[0] = (__u32)fsid;
2612         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2613         if (ll_i2info(de->d_inode)->lli_projid &&
2614             test_bit(LLIF_PROJECT_INHERIT, &ll_i2info(de->d_inode)->lli_flags))
2615                 return ll_statfs_project(de->d_inode, sfs);
2616
2617         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2618                            ktime_us_delta(ktime_get(), kstart));
2619
2620         return 0;
2621 }
2622
2623 void ll_inode_size_lock(struct inode *inode)
2624 {
2625         struct ll_inode_info *lli;
2626
2627         LASSERT(!S_ISDIR(inode->i_mode));
2628
2629         lli = ll_i2info(inode);
2630         mutex_lock(&lli->lli_size_mutex);
2631 }
2632
2633 void ll_inode_size_unlock(struct inode *inode)
2634 {
2635         struct ll_inode_info *lli;
2636
2637         lli = ll_i2info(inode);
2638         mutex_unlock(&lli->lli_size_mutex);
2639 }
2640
2641 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
2642 {
2643         /* do not clear encryption flag */
2644         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2645         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2646         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2647                 set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2648         else
2649                 clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2650 }
2651
2652 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2653 {
2654         struct ll_inode_info *lli = ll_i2info(inode);
2655         struct mdt_body *body = md->body;
2656         struct ll_sb_info *sbi = ll_i2sbi(inode);
2657         bool api32;
2658         int rc = 0;
2659
2660         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2661                 rc = cl_file_inode_init(inode, md);
2662                 if (rc)
2663                         return rc;
2664         }
2665
2666         if (S_ISDIR(inode->i_mode)) {
2667                 rc = ll_update_lsm_md(inode, md);
2668                 if (rc != 0)
2669                         return rc;
2670         }
2671
2672         if (body->mbo_valid & OBD_MD_FLACL)
2673                 lli_replace_acl(lli, md);
2674
2675         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
2676         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1, api32);
2677         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2678
2679         if (body->mbo_valid & OBD_MD_FLATIME) {
2680                 if (body->mbo_atime > inode->i_atime.tv_sec)
2681                         inode->i_atime.tv_sec = body->mbo_atime;
2682                 lli->lli_atime = body->mbo_atime;
2683         }
2684
2685         if (body->mbo_valid & OBD_MD_FLMTIME) {
2686                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2687                         CDEBUG(D_INODE,
2688                                "setting ino %lu mtime from %lld to %llu\n",
2689                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2690                                body->mbo_mtime);
2691                         inode->i_mtime.tv_sec = body->mbo_mtime;
2692                 }
2693                 lli->lli_mtime = body->mbo_mtime;
2694         }
2695
2696         if (body->mbo_valid & OBD_MD_FLCTIME) {
2697                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2698                         inode->i_ctime.tv_sec = body->mbo_ctime;
2699                 lli->lli_ctime = body->mbo_ctime;
2700         }
2701
2702         if (body->mbo_valid & OBD_MD_FLBTIME)
2703                 lli->lli_btime = body->mbo_btime;
2704
2705         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2706         if (body->mbo_valid & OBD_MD_FLFLAGS)
2707                 ll_update_inode_flags(inode, body->mbo_flags);
2708         if (body->mbo_valid & OBD_MD_FLMODE)
2709                 inode->i_mode = (inode->i_mode & S_IFMT) |
2710                                 (body->mbo_mode & ~S_IFMT);
2711
2712         if (body->mbo_valid & OBD_MD_FLTYPE)
2713                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2714                                 (body->mbo_mode & S_IFMT);
2715
2716         LASSERT(inode->i_mode != 0);
2717         if (body->mbo_valid & OBD_MD_FLUID)
2718                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2719         if (body->mbo_valid & OBD_MD_FLGID)
2720                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2721         if (body->mbo_valid & OBD_MD_FLPROJID)
2722                 lli->lli_projid = body->mbo_projid;
2723         if (body->mbo_valid & OBD_MD_FLNLINK) {
2724                 spin_lock(&inode->i_lock);
2725                 set_nlink(inode, body->mbo_nlink);
2726                 spin_unlock(&inode->i_lock);
2727         }
2728         if (body->mbo_valid & OBD_MD_FLRDEV)
2729                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2730
2731         if (body->mbo_valid & OBD_MD_FLID) {
2732                 /* FID shouldn't be changed! */
2733                 if (fid_is_sane(&lli->lli_fid)) {
2734                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2735                                  "Trying to change FID "DFID
2736                                  " to the "DFID", inode "DFID"(%p)\n",
2737                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2738                                  PFID(ll_inode2fid(inode)), inode);
2739                 } else {
2740                         lli->lli_fid = body->mbo_fid1;
2741                 }
2742         }
2743
2744         LASSERT(fid_seq(&lli->lli_fid) != 0);
2745
2746         /* In case of encrypted file without the key, please do not lose
2747          * clear text size stored into lli_lazysize in ll_merge_attr(),
2748          * we will need it in ll_prepare_close().
2749          */
2750         if (lli->lli_attr_valid & OBD_MD_FLLAZYSIZE && lli->lli_lazysize &&
2751             llcrypt_require_key(inode) == -ENOKEY)
2752                 lli->lli_attr_valid = body->mbo_valid | OBD_MD_FLLAZYSIZE;
2753         else
2754                 lli->lli_attr_valid = body->mbo_valid;
2755         if (body->mbo_valid & OBD_MD_FLSIZE) {
2756                 i_size_write(inode, body->mbo_size);
2757
2758                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2759                        PFID(ll_inode2fid(inode)),
2760                        (unsigned long long)body->mbo_size);
2761
2762                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2763                         inode->i_blocks = body->mbo_blocks;
2764         } else {
2765                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2766                         lli->lli_lazysize = body->mbo_size;
2767                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2768                         lli->lli_lazyblocks = body->mbo_blocks;
2769         }
2770
2771         if (body->mbo_valid & OBD_MD_TSTATE) {
2772                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2773                  * clear it when done to ensure to start again
2774                  * glimpsing updated attrs
2775                  */
2776                 if (body->mbo_t_state & MS_RESTORE)
2777                         set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2778                 else
2779                         clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2780         }
2781
2782         return 0;
2783 }
2784
2785 /* child default LMV is inherited from parent */
2786 static inline bool ll_default_lmv_inherited(struct lmv_stripe_md *pdmv,
2787                                             struct lmv_stripe_md *cdmv)
2788 {
2789         if (!pdmv || !cdmv)
2790                 return false;
2791
2792         if (pdmv->lsm_md_magic != cdmv->lsm_md_magic ||
2793             pdmv->lsm_md_stripe_count != cdmv->lsm_md_stripe_count ||
2794             pdmv->lsm_md_master_mdt_index != cdmv->lsm_md_master_mdt_index ||
2795             pdmv->lsm_md_hash_type != cdmv->lsm_md_hash_type)
2796                 return false;
2797
2798         if (cdmv->lsm_md_max_inherit !=
2799             lmv_inherit_next(pdmv->lsm_md_max_inherit))
2800                 return false;
2801
2802         if (cdmv->lsm_md_max_inherit_rr !=
2803             lmv_inherit_rr_next(pdmv->lsm_md_max_inherit_rr))
2804                 return false;
2805
2806         return true;
2807 }
2808
2809 /* update directory depth to ROOT, called after LOOKUP lock is fetched. */
2810 void ll_update_dir_depth(struct inode *dir, struct inode *inode)
2811 {
2812         struct ll_inode_info *plli;
2813         struct ll_inode_info *lli;
2814
2815         if (!S_ISDIR(inode->i_mode))
2816                 return;
2817
2818         if (inode == dir)
2819                 return;
2820
2821         plli = ll_i2info(dir);
2822         lli = ll_i2info(inode);
2823         lli->lli_dir_depth = plli->lli_dir_depth + 1;
2824         if (plli->lli_default_lsm_md && lli->lli_default_lsm_md) {
2825                 down_read(&plli->lli_lsm_sem);
2826                 down_read(&lli->lli_lsm_sem);
2827                 if (ll_default_lmv_inherited(plli->lli_default_lsm_md,
2828                                              lli->lli_default_lsm_md))
2829                         lli->lli_inherit_depth =
2830                                 plli->lli_inherit_depth + 1;
2831                 else
2832                         lli->lli_inherit_depth = 0;
2833                 up_read(&lli->lli_lsm_sem);
2834                 up_read(&plli->lli_lsm_sem);
2835         } else {
2836                 lli->lli_inherit_depth = 0;
2837         }
2838
2839         CDEBUG(D_INODE, DFID" depth %hu default LMV depth %hu\n",
2840                PFID(&lli->lli_fid), lli->lli_dir_depth, lli->lli_inherit_depth);
2841 }
2842
2843 void ll_truncate_inode_pages_final(struct inode *inode)
2844 {
2845         struct address_space *mapping = &inode->i_data;
2846         unsigned long nrpages;
2847         unsigned long flags;
2848
2849         truncate_inode_pages_final(mapping);
2850
2851         /* Workaround for LU-118: Note nrpages may not be totally updated when
2852          * truncate_inode_pages() returns, as there can be a page in the process
2853          * of deletion (inside __delete_from_page_cache()) in the specified
2854          * range. Thus mapping->nrpages can be non-zero when this function
2855          * returns even after truncation of the whole mapping.  Only do this if
2856          * npages isn't already zero.
2857          */
2858         nrpages = mapping->nrpages;
2859         if (nrpages) {
2860                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2861                 nrpages = mapping->nrpages;
2862                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2863         } /* Workaround end */
2864
2865         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2866                  "see https://jira.whamcloud.com/browse/LU-118\n",
2867                  ll_i2sbi(inode)->ll_fsname,
2868                  PFID(ll_inode2fid(inode)), inode, nrpages);
2869 }
2870
2871 int ll_read_inode2(struct inode *inode, void *opaque)
2872 {
2873         struct lustre_md *md = opaque;
2874         struct ll_inode_info *lli = ll_i2info(inode);
2875         int     rc;
2876         ENTRY;
2877
2878         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2879                PFID(&lli->lli_fid), inode);
2880
2881         /* Core attributes from the MDS first.  This is a new inode, and
2882          * the VFS doesn't zero times in the core inode so we have to do
2883          * it ourselves.  They will be overwritten by either MDS or OST
2884          * attributes - we just need to make sure they aren't newer.
2885          */
2886         inode->i_mtime.tv_sec = 0;
2887         inode->i_atime.tv_sec = 0;
2888         inode->i_ctime.tv_sec = 0;
2889         inode->i_rdev = 0;
2890         rc = ll_update_inode(inode, md);
2891         if (rc != 0)
2892                 RETURN(rc);
2893
2894         /* OIDEBUG(inode); */
2895
2896 #ifdef HAVE_BACKING_DEV_INFO
2897         /* initializing backing dev info. */
2898         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2899 #endif
2900         if (S_ISREG(inode->i_mode)) {
2901                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2902                 inode->i_op = &ll_file_inode_operations;
2903                 inode->i_fop = sbi->ll_fop;
2904                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2905                 EXIT;
2906         } else if (S_ISDIR(inode->i_mode)) {
2907                 inode->i_op = &ll_dir_inode_operations;
2908                 inode->i_fop = &ll_dir_operations;
2909                 EXIT;
2910         } else if (S_ISLNK(inode->i_mode)) {
2911                 inode->i_op = &ll_fast_symlink_inode_operations;
2912                 EXIT;
2913         } else {
2914                 inode->i_op = &ll_special_inode_operations;
2915
2916                 init_special_inode(inode, inode->i_mode,
2917                                    inode->i_rdev);
2918
2919                 EXIT;
2920         }
2921
2922         return 0;
2923 }
2924
2925 void ll_delete_inode(struct inode *inode)
2926 {
2927         struct ll_inode_info *lli = ll_i2info(inode);
2928         ENTRY;
2929
2930         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2931                 /* It is last chance to write out dirty pages,
2932                  * otherwise we may lose data while umount.
2933                  *
2934                  * If i_nlink is 0 then just discard data. This is safe because
2935                  * local inode gets i_nlink 0 from server only for the last
2936                  * unlink, so that file is not opened somewhere else
2937                  */
2938                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2939                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2940         }
2941
2942         ll_truncate_inode_pages_final(inode);
2943         ll_clear_inode(inode);
2944         clear_inode(inode);
2945
2946         EXIT;
2947 }
2948
2949 int ll_iocontrol(struct inode *inode, struct file *file,
2950                  unsigned int cmd, unsigned long arg)
2951 {
2952         struct ll_sb_info *sbi = ll_i2sbi(inode);
2953         struct ptlrpc_request *req = NULL;
2954         int rc, flags = 0;
2955         ENTRY;
2956
2957         switch (cmd) {
2958         case FS_IOC_GETFLAGS: {
2959                 struct mdt_body *body;
2960                 struct md_op_data *op_data;
2961
2962                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2963                                              0, 0, LUSTRE_OPC_ANY,
2964                                              NULL);
2965                 if (IS_ERR(op_data))
2966                         RETURN(PTR_ERR(op_data));
2967
2968                 op_data->op_valid = OBD_MD_FLFLAGS;
2969                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2970                 ll_finish_md_op_data(op_data);
2971                 if (rc) {
2972                         CERROR("%s: failure inode "DFID": rc = %d\n",
2973                                sbi->ll_md_exp->exp_obd->obd_name,
2974                                PFID(ll_inode2fid(inode)), rc);
2975                         RETURN(-abs(rc));
2976                 }
2977
2978                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2979
2980                 flags = body->mbo_flags;
2981
2982                 ptlrpc_req_finished(req);
2983
2984                 RETURN(put_user(flags, (int __user *)arg));
2985         }
2986         case FS_IOC_SETFLAGS: {
2987                 struct iattr *attr;
2988                 struct md_op_data *op_data;
2989                 struct cl_object *obj;
2990                 struct fsxattr fa = { 0 };
2991
2992                 if (get_user(flags, (int __user *)arg))
2993                         RETURN(-EFAULT);
2994
2995                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2996                 if (flags & LUSTRE_PROJINHERIT_FL)
2997                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2998
2999                 rc = ll_ioctl_check_project(inode, fa.fsx_xflags,
3000                                             fa.fsx_projid);
3001                 if (rc)
3002                         RETURN(rc);
3003
3004                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
3005                                              LUSTRE_OPC_ANY, NULL);
3006                 if (IS_ERR(op_data))
3007                         RETURN(PTR_ERR(op_data));
3008
3009                 op_data->op_attr_flags = flags;
3010                 op_data->op_xvalid |= OP_XVALID_FLAGS;
3011                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
3012                 ll_finish_md_op_data(op_data);
3013                 ptlrpc_req_finished(req);
3014                 if (rc)
3015                         RETURN(rc);
3016
3017                 ll_update_inode_flags(inode, flags);
3018
3019                 obj = ll_i2info(inode)->lli_clob;
3020                 if (obj == NULL)
3021                         RETURN(0);
3022
3023                 OBD_ALLOC_PTR(attr);
3024                 if (attr == NULL)
3025                         RETURN(-ENOMEM);
3026
3027                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
3028
3029                 OBD_FREE_PTR(attr);
3030                 RETURN(rc);
3031         }
3032         default:
3033                 RETURN(-ENOSYS);
3034         }
3035
3036         RETURN(0);
3037 }
3038
3039 int ll_flush_ctx(struct inode *inode)
3040 {
3041         struct ll_sb_info  *sbi = ll_i2sbi(inode);
3042
3043         CDEBUG(D_SEC, "flush context for user %d\n",
3044                from_kuid(&init_user_ns, current_uid()));
3045
3046         obd_set_info_async(NULL, sbi->ll_md_exp,
3047                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
3048                            0, NULL, NULL);
3049         obd_set_info_async(NULL, sbi->ll_dt_exp,
3050                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
3051                            0, NULL, NULL);
3052         return 0;
3053 }
3054
3055 /* umount -f client means force down, don't save state */
3056 void ll_umount_begin(struct super_block *sb)
3057 {
3058         struct ll_sb_info *sbi = ll_s2sbi(sb);
3059         struct obd_device *obd;
3060         struct obd_ioctl_data *ioc_data;
3061         int cnt;
3062         ENTRY;
3063
3064         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
3065                sb->s_count, atomic_read(&sb->s_active));
3066
3067         obd = class_exp2obd(sbi->ll_md_exp);
3068         if (obd == NULL) {
3069                 CERROR("Invalid MDC connection handle %#llx\n",
3070                        sbi->ll_md_exp->exp_handle.h_cookie);
3071                 EXIT;
3072                 return;
3073         }
3074         obd->obd_force = 1;
3075
3076         obd = class_exp2obd(sbi->ll_dt_exp);
3077         if (obd == NULL) {
3078                 CERROR("Invalid LOV connection handle %#llx\n",
3079                        sbi->ll_dt_exp->exp_handle.h_cookie);
3080                 EXIT;
3081                 return;
3082         }
3083         obd->obd_force = 1;
3084
3085         OBD_ALLOC_PTR(ioc_data);
3086         if (ioc_data) {
3087                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
3088                               sizeof *ioc_data, ioc_data, NULL);
3089
3090                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
3091                               sizeof *ioc_data, ioc_data, NULL);
3092
3093                 OBD_FREE_PTR(ioc_data);
3094         }
3095
3096         /* Really, we'd like to wait until there are no requests outstanding,
3097          * and then continue.  For now, we just periodically checking for vfs
3098          * to decrement mnt_cnt and hope to finish it within 10sec.
3099          */
3100         cnt = 10;
3101         while (cnt > 0 &&
3102                !may_umount(sbi->ll_mnt.mnt)) {
3103                 ssleep(1);
3104                 cnt -= 1;
3105         }
3106
3107         EXIT;
3108 }
3109
3110 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
3111 {
3112         struct ll_sb_info *sbi = ll_s2sbi(sb);
3113         char *profilenm = get_profile_name(sb);
3114         int err;
3115         __u32 read_only;
3116
3117         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
3118                 read_only = *flags & MS_RDONLY;
3119                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
3120                                          sizeof(KEY_READ_ONLY),
3121                                          KEY_READ_ONLY, sizeof(read_only),
3122                                          &read_only, NULL);
3123                 if (err) {
3124                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
3125                                       profilenm, read_only ?
3126                                       "read-only" : "read-write", err);
3127                         return err;
3128                 }
3129
3130                 if (read_only)
3131                         sb->s_flags |= SB_RDONLY;
3132                 else
3133                         sb->s_flags &= ~SB_RDONLY;
3134
3135                 if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
3136                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
3137                                       read_only ?  "read-only" : "read-write");
3138         }
3139         return 0;
3140 }
3141
3142 /**
3143  * Cleanup the open handle that is cached on MDT-side.
3144  *
3145  * For open case, the client side open handling thread may hit error
3146  * after the MDT grant the open. Under such case, the client should
3147  * send close RPC to the MDT as cleanup; otherwise, the open handle
3148  * on the MDT will be leaked there until the client umount or evicted.
3149  *
3150  * In further, if someone unlinked the file, because the open handle
3151  * holds the reference on such file/object, then it will block the
3152  * subsequent threads that want to locate such object via FID.
3153  *
3154  * \param[in] sb        super block for this file-system
3155  * \param[in] open_req  pointer to the original open request
3156  */
3157 void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill)
3158 {
3159         struct mdt_body                 *body;
3160         struct md_op_data               *op_data;
3161         struct ptlrpc_request           *close_req = NULL;
3162         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
3163         ENTRY;
3164
3165         body = req_capsule_server_get(pill, &RMF_MDT_BODY);
3166         OBD_ALLOC_PTR(op_data);
3167         if (op_data == NULL) {
3168                 CWARN("%s: cannot allocate op_data to release open handle for "
3169                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
3170
3171                 RETURN_EXIT;
3172         }
3173
3174         op_data->op_fid1 = body->mbo_fid1;
3175         op_data->op_open_handle = body->mbo_open_handle;
3176         op_data->op_mod_time = ktime_get_real_seconds();
3177         md_close(exp, op_data, NULL, &close_req);
3178         ptlrpc_req_finished(close_req);
3179         ll_finish_md_op_data(op_data);
3180
3181         EXIT;
3182 }
3183
3184 /* set filesystem-wide default LMV for subdir mount if it's enabled on ROOT. */
3185 static int ll_fileset_default_lmv_fixup(struct inode *inode,
3186                                         struct lustre_md *md)
3187 {
3188         struct ll_sb_info *sbi = ll_i2sbi(inode);
3189         struct ptlrpc_request *req = NULL;
3190         union lmv_mds_md *lmm = NULL;
3191         int size = 0;
3192         int rc;
3193
3194         LASSERT(is_root_inode(inode));
3195         LASSERT(!fid_is_root(&sbi->ll_root_fid));
3196         LASSERT(!md->default_lmv);
3197
3198         rc = ll_dir_get_default_layout(inode, (void **)&lmm, &size, &req,
3199                                        OBD_MD_DEFAULT_MEA,
3200                                        GET_DEFAULT_LAYOUT_ROOT);
3201         if (rc && rc != -ENODATA)
3202                 GOTO(out, rc);
3203
3204         rc = 0;
3205         if (lmm && size) {
3206                 rc = md_unpackmd(sbi->ll_md_exp, &md->default_lmv, lmm, size);
3207                 if (rc < 0)
3208                         GOTO(out, rc);
3209
3210                 rc = 0;
3211         }
3212         EXIT;
3213 out:
3214         if (req)
3215                 ptlrpc_req_finished(req);
3216         return rc;
3217 }
3218
3219 int ll_prep_inode(struct inode **inode, struct req_capsule *pill,
3220                   struct super_block *sb, struct lookup_intent *it)
3221 {
3222         struct ll_sb_info *sbi = NULL;
3223         struct lustre_md md = { NULL };
3224         bool default_lmv_deleted = false;
3225         int rc;
3226
3227         ENTRY;
3228
3229         LASSERT(*inode || sb);
3230         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
3231         rc = md_get_lustre_md(sbi->ll_md_exp, pill, sbi->ll_dt_exp,
3232                               sbi->ll_md_exp, &md);
3233         if (rc != 0)
3234                 GOTO(out, rc);
3235
3236         /*
3237          * clear default_lmv only if intent_getattr reply doesn't contain it.
3238          * but it needs to be done after iget, check this early because
3239          * ll_update_lsm_md() may change md.
3240          */
3241         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
3242             S_ISDIR(md.body->mbo_mode) && !md.default_lmv) {
3243                 if (unlikely(*inode && is_root_inode(*inode) &&
3244                              !fid_is_root(&sbi->ll_root_fid))) {
3245                         rc = ll_fileset_default_lmv_fixup(*inode, &md);
3246                         if (rc)
3247                                 GOTO(out, rc);
3248                 }
3249
3250                 if (!md.default_lmv)
3251                         default_lmv_deleted = true;
3252         }
3253
3254         if (*inode) {
3255                 rc = ll_update_inode(*inode, &md);
3256                 if (rc != 0)
3257                         GOTO(out, rc);
3258         } else {
3259                 bool api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
3260                 struct lu_fid *fid1 = &md.body->mbo_fid1;
3261
3262                 LASSERT(sb != NULL);
3263
3264                 /*
3265                  * At this point server returns to client's same fid as client
3266                  * generated for creating. So using ->fid1 is okay here.
3267                  */
3268                 if (!fid_is_sane(fid1)) {
3269                         CERROR("%s: Fid is insane "DFID"\n",
3270                                 sbi->ll_fsname, PFID(fid1));
3271                         GOTO(out, rc = -EINVAL);
3272                 }
3273
3274                 *inode = ll_iget(sb, cl_fid_build_ino(fid1, api32), &md);
3275                 if (IS_ERR(*inode)) {
3276                         lmd_clear_acl(&md);
3277                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
3278                         *inode = NULL;
3279                         CERROR("new_inode -fatal: rc %d\n", rc);
3280                         GOTO(out, rc);
3281                 }
3282         }
3283
3284         /* Handling piggyback layout lock.
3285          * Layout lock can be piggybacked by getattr and open request.
3286          * The lsm can be applied to inode only if it comes with a layout lock
3287          * otherwise correct layout may be overwritten, for example:
3288          * 1. proc1: mdt returns a lsm but not granting layout
3289          * 2. layout was changed by another client
3290          * 3. proc2: refresh layout and layout lock granted
3291          * 4. proc1: to apply a stale layout */
3292         if (it != NULL && it->it_lock_mode != 0) {
3293                 struct lustre_handle lockh;
3294                 struct ldlm_lock *lock;
3295
3296                 lockh.cookie = it->it_lock_handle;
3297                 lock = ldlm_handle2lock(&lockh);
3298                 LASSERT(lock != NULL);
3299                 if (ldlm_has_layout(lock)) {
3300                         struct cl_object_conf conf;
3301
3302                         memset(&conf, 0, sizeof(conf));
3303                         conf.coc_opc = OBJECT_CONF_SET;
3304                         conf.coc_inode = *inode;
3305                         conf.coc_lock = lock;
3306                         conf.u.coc_layout = md.layout;
3307                         (void)ll_layout_conf(*inode, &conf);
3308                 }
3309                 LDLM_LOCK_PUT(lock);
3310         }
3311
3312         if (default_lmv_deleted)
3313                 ll_update_default_lsm_md(*inode, &md);
3314
3315         /* we may want to apply some policy for foreign file/dir */
3316         if (ll_sbi_has_foreign_symlink(sbi)) {
3317                 rc = ll_manage_foreign(*inode, &md);
3318                 if (rc < 0)
3319                         GOTO(out, rc);
3320         }
3321
3322         GOTO(out, rc = 0);
3323
3324 out:
3325         /* cleanup will be done if necessary */
3326         md_free_lustre_md(sbi->ll_md_exp, &md);
3327
3328         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
3329                 ll_intent_drop_lock(it);
3330                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, pill);
3331         }
3332
3333         return rc;
3334 }
3335
3336 int ll_obd_statfs(struct inode *inode, void __user *arg)
3337 {
3338         struct ll_sb_info *sbi = NULL;
3339         struct obd_export *exp;
3340         struct obd_ioctl_data *data = NULL;
3341         __u32 type;
3342         int len = 0, rc;
3343
3344         if (inode)
3345                 sbi = ll_i2sbi(inode);
3346         if (!sbi)
3347                 GOTO(out_statfs, rc = -EINVAL);
3348
3349         rc = obd_ioctl_getdata(&data, &len, arg);
3350         if (rc)
3351                 GOTO(out_statfs, rc);
3352
3353         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3354             !data->ioc_pbuf1 || !data->ioc_pbuf2)
3355                 GOTO(out_statfs, rc = -EINVAL);
3356
3357         if (data->ioc_inllen1 != sizeof(__u32) ||
3358             data->ioc_inllen2 != sizeof(__u32) ||
3359             data->ioc_plen1 != sizeof(struct obd_statfs) ||
3360             data->ioc_plen2 != sizeof(struct obd_uuid))
3361                 GOTO(out_statfs, rc = -EINVAL);
3362
3363         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3364         if (type & LL_STATFS_LMV)
3365                 exp = sbi->ll_md_exp;
3366         else if (type & LL_STATFS_LOV)
3367                 exp = sbi->ll_dt_exp;
3368         else
3369                 GOTO(out_statfs, rc = -ENODEV);
3370
3371         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3372         if (rc)
3373                 GOTO(out_statfs, rc);
3374 out_statfs:
3375         OBD_FREE_LARGE(data, len);
3376         return rc;
3377 }
3378
3379 /*
3380  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3381  * be called early to avoid deadlock.
3382  */
3383 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3384 {
3385         if (op_data->op_mea2_sem) {
3386                 up_read_non_owner(op_data->op_mea2_sem);
3387                 op_data->op_mea2_sem = NULL;
3388         }
3389
3390         if (op_data->op_mea1_sem) {
3391                 up_read_non_owner(op_data->op_mea1_sem);
3392                 op_data->op_mea1_sem = NULL;
3393         }
3394 }
3395
3396 /* this function prepares md_op_data hint for passing it down to MD stack. */
3397 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3398                                       struct inode *i1, struct inode *i2,
3399                                       const char *name, size_t namelen,
3400                                       __u32 mode, enum md_op_code opc,
3401                                       void *data)
3402 {
3403         struct llcrypt_name fname = { 0 };
3404         int rc;
3405
3406         LASSERT(i1 != NULL);
3407
3408         if (name == NULL) {
3409                 /* Do not reuse namelen for something else. */
3410                 if (namelen != 0)
3411                         return ERR_PTR(-EINVAL);
3412         } else {
3413                 if ((!IS_ENCRYPTED(i1) ||
3414                      (opc != LUSTRE_OPC_LOOKUP && opc != LUSTRE_OPC_CREATE)) &&
3415                     namelen > ll_i2sbi(i1)->ll_namelen)
3416                         return ERR_PTR(-ENAMETOOLONG);
3417
3418                 /* "/" is not valid name, but it's allowed */
3419                 if (!lu_name_is_valid_2(name, namelen) &&
3420                     strncmp("/", name, namelen) != 0)
3421                         return ERR_PTR(-EINVAL);
3422         }
3423
3424         if (op_data == NULL)
3425                 OBD_ALLOC_PTR(op_data);
3426
3427         if (op_data == NULL)
3428                 return ERR_PTR(-ENOMEM);
3429
3430         ll_i2gids(op_data->op_suppgids, i1, i2);
3431         /* If the client is using a subdir mount and looks at what it sees as
3432          * /.fscrypt, interpret it as the .fscrypt dir at the root of the fs.
3433          */
3434         if (unlikely(i1->i_sb && i1->i_sb->s_root && is_root_inode(i1) &&
3435                      !fid_is_root(ll_inode2fid(i1)) &&
3436                      name && namelen == strlen(dot_fscrypt_name) &&
3437                      strncmp(name, dot_fscrypt_name, namelen) == 0))
3438                 lu_root_fid(&op_data->op_fid1);
3439         else
3440                 op_data->op_fid1 = *ll_inode2fid(i1);
3441
3442         if (S_ISDIR(i1->i_mode)) {
3443                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3444                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3445                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3446                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3447         }
3448
3449         if (i2) {
3450                 op_data->op_fid2 = *ll_inode2fid(i2);
3451                 if (S_ISDIR(i2->i_mode)) {
3452                         if (i2 != i1) {
3453                                 /* i2 is typically a child of i1, and MUST be
3454                                  * further from the root to avoid deadlocks.
3455                                  */
3456                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3457                                 op_data->op_mea2_sem =
3458                                                 &ll_i2info(i2)->lli_lsm_sem;
3459                         }
3460                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3461                 }
3462         } else {
3463                 fid_zero(&op_data->op_fid2);
3464         }
3465
3466         if (test_bit(LL_SBI_64BIT_HASH, ll_i2sbi(i1)->ll_flags))
3467                 op_data->op_cli_flags |= CLI_HASH64;
3468
3469         if (ll_need_32bit_api(ll_i2sbi(i1)))
3470                 op_data->op_cli_flags |= CLI_API32;
3471
3472         if ((i2 && is_root_inode(i2)) ||
3473             opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_CREATE) {
3474                 /* In case of lookup, ll_setup_filename() has already been
3475                  * called in ll_lookup_it(), so just take provided name.
3476                  * Also take provided name if we are dealing with root inode.
3477                  */
3478                 fname.disk_name.name = (unsigned char *)name;
3479                 fname.disk_name.len = namelen;
3480         } else if (name && namelen) {
3481                 struct qstr dname = QSTR_INIT(name, namelen);
3482                 struct inode *dir;
3483                 struct lu_fid *pfid = NULL;
3484                 struct lu_fid fid;
3485                 int lookup;
3486
3487                 if (!S_ISDIR(i1->i_mode) && i2 && S_ISDIR(i2->i_mode)) {
3488                         /* special case when called from ll_link() */
3489                         dir = i2;
3490                         lookup = 0;
3491                 } else {
3492                         dir = i1;
3493                         lookup = (int)(opc == LUSTRE_OPC_ANY);
3494                 }
3495                 if (opc == LUSTRE_OPC_ANY && lookup)
3496                         pfid = &fid;
3497                 rc = ll_setup_filename(dir, &dname, lookup, &fname, pfid);
3498                 if (rc) {
3499                         ll_finish_md_op_data(op_data);
3500                         return ERR_PTR(rc);
3501                 }
3502                 if (pfid && !fid_is_zero(pfid)) {
3503                         if (i2 == NULL)
3504                                 op_data->op_fid2 = fid;
3505                         op_data->op_bias = MDS_FID_OP;
3506                 }
3507                 if (fname.disk_name.name &&
3508                     fname.disk_name.name != (unsigned char *)name) {
3509                         /* op_data->op_name must be freed after use */
3510                         op_data->op_flags |= MF_OPNAME_KMALLOCED;
3511                 }
3512         }
3513
3514         /* In fact LUSTRE_OPC_LOOKUP, LUSTRE_OPC_OPEN
3515          * are LUSTRE_OPC_ANY
3516          */
3517         if (opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_OPEN)
3518                 op_data->op_code = LUSTRE_OPC_ANY;
3519         else
3520                 op_data->op_code = opc;
3521         op_data->op_name = fname.disk_name.name;
3522         op_data->op_namelen = fname.disk_name.len;
3523         op_data->op_mode = mode;
3524         op_data->op_mod_time = ktime_get_real_seconds();
3525         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3526         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3527         op_data->op_cap = current_cap();
3528         op_data->op_mds = 0;
3529         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3530              filename_is_volatile(name, namelen, &op_data->op_mds)) {
3531                 op_data->op_bias |= MDS_CREATE_VOLATILE;
3532         }
3533         op_data->op_data = data;
3534
3535         return op_data;
3536 }
3537
3538 void ll_finish_md_op_data(struct md_op_data *op_data)
3539 {
3540         ll_unlock_md_op_lsm(op_data);
3541         ll_security_release_secctx(op_data->op_file_secctx,
3542                                    op_data->op_file_secctx_size);
3543         if (op_data->op_flags & MF_OPNAME_KMALLOCED)
3544                 /* allocated via ll_setup_filename called
3545                  * from ll_prep_md_op_data
3546                  */
3547                 kfree(op_data->op_name);
3548         llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3549         OBD_FREE_PTR(op_data);
3550 }
3551
3552 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3553 {
3554         struct ll_sb_info *sbi;
3555         int i;
3556
3557         LASSERT(seq && dentry);
3558         sbi = ll_s2sbi(dentry->d_sb);
3559
3560         if (test_bit(LL_SBI_NOLCK, sbi->ll_flags))
3561                 seq_puts(seq, "nolock");
3562
3563         for (i = 1; ll_sbi_flags_name[i].token != LL_SBI_NUM_MOUNT_OPT; i++) {
3564                 /* match_table in some cases has patterns for both enabled and
3565                  * disabled cases. Ignore 'no'xxx versions if bit is set.
3566                  */
3567                 if (test_bit(ll_sbi_flags_name[i].token, sbi->ll_flags) &&
3568                     strncmp(ll_sbi_flags_name[i].pattern, "no", 2)) {
3569                         if (ll_sbi_flags_name[i].token ==
3570                             LL_SBI_FOREIGN_SYMLINK) {
3571                                 seq_show_option(seq, "foreign_symlink",
3572                                                 sbi->ll_foreign_symlink_prefix);
3573                         } else {
3574                                 seq_printf(seq, ",%s",
3575                                            ll_sbi_flags_name[i].pattern);
3576                         }
3577
3578                         /* You can have either localflock or flock but not
3579                          * both. If localflock is set don't print flock or
3580                          * noflock.
3581                          */
3582                         if (ll_sbi_flags_name[i].token == LL_SBI_LOCALFLOCK)
3583                                 i += 2;
3584                 } else if (!test_bit(ll_sbi_flags_name[i].token, sbi->ll_flags) &&
3585                            !strncmp(ll_sbi_flags_name[i].pattern, "no", 2)) {
3586                         seq_printf(seq, ",%s",
3587                                    ll_sbi_flags_name[i].pattern);
3588                 }
3589         }
3590
3591         llcrypt_show_test_dummy_encryption(seq, ',', dentry->d_sb);
3592
3593         RETURN(0);
3594 }
3595
3596 /**
3597  * Get obd name by cmd, and copy out to user space
3598  */
3599 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3600 {
3601         struct ll_sb_info *sbi = ll_i2sbi(inode);
3602         struct obd_device *obd;
3603         ENTRY;
3604
3605         if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
3606                 obd = class_exp2obd(sbi->ll_dt_exp);
3607         else if (cmd == OBD_IOC_GETMDNAME)
3608                 obd = class_exp2obd(sbi->ll_md_exp);
3609         else
3610                 RETURN(-EINVAL);
3611
3612         if (!obd)
3613                 RETURN(-ENOENT);
3614
3615         if (copy_to_user((void __user *)arg, obd->obd_name,
3616                          strlen(obd->obd_name) + 1))
3617                 RETURN(-EFAULT);
3618
3619         RETURN(0);
3620 }
3621
3622 struct dname_buf {
3623         struct work_struct db_work;
3624         struct dentry *db_dentry;
3625         /* Let's hope the path is not too long, 32 bytes for the work struct
3626          * on my kernel
3627          */
3628         char buf[PAGE_SIZE - sizeof(struct work_struct) - sizeof(void *)];
3629 };
3630
3631 static void ll_dput_later(struct work_struct *work)
3632 {
3633         struct dname_buf *db = container_of(work, struct dname_buf, db_work);
3634
3635         dput(db->db_dentry);
3636         free_page((unsigned long)db);
3637 }
3638
3639 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3640 {
3641         char *path = NULL;
3642
3643         struct path p;
3644
3645         p.dentry = dentry;
3646         p.mnt = current->fs->root.mnt;
3647         path_get(&p);
3648         path = d_path(&p, buf, bufsize);
3649         path_put(&p);
3650         return path;
3651 }
3652
3653 void ll_dirty_page_discard_warn(struct inode *inode, int ioret)
3654 {
3655         struct dname_buf *db;
3656         char  *path = NULL;
3657         struct dentry *dentry = NULL;
3658
3659         /* this can be called inside spin lock so use GFP_ATOMIC. */
3660         db = (struct dname_buf *)__get_free_page(GFP_ATOMIC);
3661         if (db != NULL) {
3662
3663                 dentry = d_find_alias(inode);
3664                 if (dentry != NULL)
3665                         path = ll_d_path(dentry, db->buf, sizeof(db->buf));
3666         }
3667
3668         /* The below message is checked in recovery-small.sh test_24b */
3669         CDEBUG(D_WARNING,
3670                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3671                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3672                s2lsi(inode->i_sb)->lsi_lmd->lmd_dev,
3673                PFID(ll_inode2fid(inode)),
3674                (path && !IS_ERR(path)) ? path : "", ioret);
3675
3676         if (dentry != NULL) {
3677                 /* We cannot dput here since if we happen to be the last holder
3678                  * then we can end up waiting for page evictions that
3679                  * in turn wait for RPCs that need this instance of ptlrpcd
3680                  * (callng brw_interpret->*page_completion*->vmpage_error->here)
3681                  * LU-15340
3682                  */
3683                 INIT_WORK(&db->db_work, ll_dput_later);
3684                 db->db_dentry = dentry;
3685                 schedule_work(&db->db_work);
3686         } else {
3687                 if (db != NULL)
3688                         free_page((unsigned long)db);
3689         }
3690 }
3691
3692 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3693                         struct lov_user_md **kbuf)
3694 {
3695         struct lov_user_md      lum;
3696         ssize_t                 lum_size;
3697         ENTRY;
3698
3699         if (copy_from_user(&lum, md, sizeof(lum)))
3700                 RETURN(-EFAULT);
3701
3702         lum_size = ll_lov_user_md_size(&lum);
3703         if (lum_size < 0)
3704                 RETURN(lum_size);
3705
3706         OBD_ALLOC_LARGE(*kbuf, lum_size);
3707         if (*kbuf == NULL)
3708                 RETURN(-ENOMEM);
3709
3710         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3711                 OBD_FREE_LARGE(*kbuf, lum_size);
3712                 RETURN(-EFAULT);
3713         }
3714
3715         RETURN(lum_size);
3716 }
3717
3718 /*
3719  * Compute llite root squash state after a change of root squash
3720  * configuration setting or add/remove of a lnet nid
3721  */
3722 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3723 {
3724         struct root_squash_info *squash = &sbi->ll_squash;
3725         int i;
3726         bool matched;
3727         struct lnet_processid id;
3728
3729         /* Update norootsquash flag */
3730         spin_lock(&squash->rsi_lock);
3731         if (list_empty(&squash->rsi_nosquash_nids))
3732                 clear_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3733         else {
3734                 /* Do not apply root squash as soon as one of our NIDs is
3735                  * in the nosquash_nids list */
3736                 matched = false;
3737                 i = 0;
3738                 while (LNetGetId(i++, &id) != -ENOENT) {
3739                         if (nid_is_lo0(&id.nid))
3740                                 continue;
3741                         if (cfs_match_nid(lnet_nid_to_nid4(&id.nid),
3742                                           &squash->rsi_nosquash_nids)) {
3743                                 matched = true;
3744                                 break;
3745                         }
3746                 }
3747                 if (matched)
3748                         set_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3749                 else
3750                         clear_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3751         }
3752         spin_unlock(&squash->rsi_lock);
3753 }
3754
3755 /**
3756  * Parse linkea content to extract information about a given hardlink
3757  *
3758  * \param[in]   ldata      - Initialized linkea data
3759  * \param[in]   linkno     - Link identifier
3760  * \param[out]  parent_fid - The entry's parent FID
3761  * \param[out]  ln         - Entry name destination buffer
3762  *
3763  * \retval 0 on success
3764  * \retval Appropriate negative error code on failure
3765  */
3766 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3767                             struct lu_fid *parent_fid, struct lu_name *ln)
3768 {
3769         unsigned int    idx;
3770         int             rc;
3771         ENTRY;
3772
3773         rc = linkea_init_with_rec(ldata);
3774         if (rc < 0)
3775                 RETURN(rc);
3776
3777         if (linkno >= ldata->ld_leh->leh_reccount)
3778                 /* beyond last link */
3779                 RETURN(-ENODATA);
3780
3781         linkea_first_entry(ldata);
3782         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3783                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3784                                     parent_fid);
3785                 if (idx == linkno)
3786                         break;
3787
3788                 linkea_next_entry(ldata);
3789         }
3790
3791         if (idx < linkno)
3792                 RETURN(-ENODATA);
3793
3794         RETURN(0);
3795 }
3796
3797 /**
3798  * Get parent FID and name of an identified link. Operation is performed for
3799  * a given link number, letting the caller iterate over linkno to list one or
3800  * all links of an entry.
3801  *
3802  * \param[in]     file - File descriptor against which to perform the operation
3803  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3804  *                       on and the available size. It is eventually filled with
3805  *                       the requested information or left untouched on error
3806  *
3807  * \retval - 0 on success
3808  * \retval - Appropriate negative error code on failure
3809  */
3810 int ll_getparent(struct file *file, struct getparent __user *arg)
3811 {
3812         struct inode            *inode = file_inode(file);
3813         struct linkea_data      *ldata;
3814         struct lu_buf            buf = LU_BUF_NULL;
3815         struct lu_name           ln;
3816         struct lu_fid            parent_fid;
3817         __u32                    linkno;
3818         __u32                    name_size;
3819         int                      rc;
3820
3821         ENTRY;
3822
3823         if (!capable(CAP_DAC_READ_SEARCH) &&
3824             !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
3825                 RETURN(-EPERM);
3826
3827         if (get_user(name_size, &arg->gp_name_size))
3828                 RETURN(-EFAULT);
3829
3830         if (get_user(linkno, &arg->gp_linkno))
3831                 RETURN(-EFAULT);
3832
3833         if (name_size > PATH_MAX)
3834                 RETURN(-EINVAL);
3835
3836         OBD_ALLOC(ldata, sizeof(*ldata));
3837         if (ldata == NULL)
3838                 RETURN(-ENOMEM);
3839
3840         rc = linkea_data_new(ldata, &buf);
3841         if (rc < 0)
3842                 GOTO(ldata_free, rc);
3843
3844         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3845                            buf.lb_len, OBD_MD_FLXATTR);
3846         if (rc < 0)
3847                 GOTO(lb_free, rc);
3848
3849         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3850         if (rc < 0)
3851                 GOTO(lb_free, rc);
3852
3853         if (ln.ln_namelen >= name_size)
3854                 GOTO(lb_free, rc = -EOVERFLOW);
3855
3856         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3857                 GOTO(lb_free, rc = -EFAULT);
3858
3859         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3860                 GOTO(lb_free, rc = -EFAULT);
3861
3862         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3863                 GOTO(lb_free, rc = -EFAULT);
3864
3865 lb_free:
3866         lu_buf_free(&buf);
3867 ldata_free:
3868         OBD_FREE(ldata, sizeof(*ldata));
3869
3870         RETURN(rc);
3871 }