Whamcloud - gitweb
LU-13601 llite: avoid needless large stats alloc
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/llite/llite_lib.c
32  *
33  * Lustre Light Super operations
34  */
35
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include <linux/cpu.h>
39 #include <linux/module.h>
40 #include <linux/random.h>
41 #include <linux/statfs.h>
42 #include <linux/time.h>
43 #include <linux/types.h>
44 #include <libcfs/linux/linux-uuid.h>
45 #include <linux/version.h>
46 #include <linux/mm.h>
47 #include <linux/user_namespace.h>
48 #include <linux/delay.h>
49 #include <linux/uidgid.h>
50 #include <linux/fs_struct.h>
51
52 #ifndef HAVE_CPUS_READ_LOCK
53 #include <libcfs/linux/linux-cpu.h>
54 #endif
55 #include <libcfs/linux/linux-misc.h>
56 #include <uapi/linux/lustre/lustre_ioctl.h>
57 #ifdef HAVE_UAPI_LINUX_MOUNT_H
58 #include <uapi/linux/mount.h>
59 #endif
60
61 #include <lustre_ha.h>
62 #include <lustre_dlm.h>
63 #include <lprocfs_status.h>
64 #include <lustre_disk.h>
65 #include <uapi/linux/lustre/lustre_param.h>
66 #include <lustre_log.h>
67 #include <cl_object.h>
68 #include <obd_cksum.h>
69 #include "llite_internal.h"
70
71 struct kmem_cache *ll_file_data_slab;
72
73 #ifndef log2
74 #define log2(n) ffz(~(n))
75 #endif
76
77 /**
78  * If there is only one number of core visible to Lustre,
79  * async readahead will be disabled, to avoid massive over
80  * subscription, we use 1/2 of active cores as default max
81  * async readahead requests.
82  */
83 static inline unsigned int ll_get_ra_async_max_active(void)
84 {
85         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
86 }
87
88 static struct ll_sb_info *ll_init_sbi(void)
89 {
90         struct ll_sb_info *sbi = NULL;
91         unsigned long pages;
92         unsigned long lru_page_max;
93         struct sysinfo si;
94         int rc;
95
96         ENTRY;
97
98         OBD_ALLOC_PTR(sbi);
99         if (sbi == NULL)
100                 RETURN(ERR_PTR(-ENOMEM));
101
102         rc = pcc_super_init(&sbi->ll_pcc_super);
103         if (rc < 0)
104                 GOTO(out_sbi, rc);
105
106         spin_lock_init(&sbi->ll_lock);
107         mutex_init(&sbi->ll_lco.lco_lock);
108         spin_lock_init(&sbi->ll_pp_extent_lock);
109         spin_lock_init(&sbi->ll_process_lock);
110         sbi->ll_rw_stats_on = 0;
111         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
112
113         si_meminfo(&si);
114         pages = si.totalram - si.totalhigh;
115         lru_page_max = pages / 2;
116
117         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
118         sbi->ll_ra_info.ll_readahead_wq =
119                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
120                                        0, CFS_CPT_ANY,
121                                        sbi->ll_ra_info.ra_async_max_active);
122         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
123                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
124
125         /* initialize ll_cache data */
126         sbi->ll_cache = cl_cache_init(lru_page_max);
127         if (sbi->ll_cache == NULL)
128                 GOTO(out_destroy_ra, rc = -ENOMEM);
129
130         /* initialize foreign symlink prefix path */
131         OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
132         if (sbi->ll_foreign_symlink_prefix == NULL)
133                 GOTO(out_destroy_ra, rc = -ENOMEM);
134         memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
135         sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
136
137         /* initialize foreign symlink upcall path, none by default */
138         OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
139         if (sbi->ll_foreign_symlink_upcall == NULL)
140                 GOTO(out_destroy_ra, rc = -ENOMEM);
141         memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
142         sbi->ll_foreign_symlink_upcall_items = NULL;
143         sbi->ll_foreign_symlink_upcall_nb_items = 0;
144         init_rwsem(&sbi->ll_foreign_symlink_sem);
145         /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
146          * not enabled by default
147          */
148
149         sbi->ll_ra_info.ra_max_pages =
150                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
151         sbi->ll_ra_info.ra_max_pages_per_file =
152                 min(sbi->ll_ra_info.ra_max_pages / 4,
153                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
154         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
155                                 sbi->ll_ra_info.ra_max_pages_per_file;
156         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
157         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
158         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
159
160         set_bit(LL_SBI_VERBOSE, sbi->ll_flags);
161 #ifdef ENABLE_CHECKSUM
162         set_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
163 #endif
164 #ifdef ENABLE_FLOCK
165         set_bit(LL_SBI_FLOCK, sbi->ll_flags);
166 #endif
167
168 #ifdef HAVE_LRU_RESIZE_SUPPORT
169         set_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags);
170 #endif
171         set_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags);
172
173         /* metadata statahead is enabled by default */
174         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
175         sbi->ll_sa_max = LL_SA_RPC_DEF;
176         atomic_set(&sbi->ll_sa_total, 0);
177         atomic_set(&sbi->ll_sa_wrong, 0);
178         atomic_set(&sbi->ll_sa_running, 0);
179         atomic_set(&sbi->ll_agl_total, 0);
180         set_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags);
181         set_bit(LL_SBI_FAST_READ, sbi->ll_flags);
182         set_bit(LL_SBI_TINY_WRITE, sbi->ll_flags);
183         set_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags);
184         ll_sbi_set_encrypt(sbi, true);
185
186         /* root squash */
187         sbi->ll_squash.rsi_uid = 0;
188         sbi->ll_squash.rsi_gid = 0;
189         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
190         spin_lock_init(&sbi->ll_squash.rsi_lock);
191
192         /* Per-filesystem file heat */
193         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
194         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
195
196         /* Per-fs open heat level before requesting open lock */
197         sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT;
198         sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS;
199         sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS;
200         RETURN(sbi);
201 out_destroy_ra:
202         if (sbi->ll_foreign_symlink_prefix)
203                 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
204         if (sbi->ll_cache) {
205                 cl_cache_decref(sbi->ll_cache);
206                 sbi->ll_cache = NULL;
207         }
208         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
209 out_pcc:
210         pcc_super_fini(&sbi->ll_pcc_super);
211 out_sbi:
212         OBD_FREE_PTR(sbi);
213         RETURN(ERR_PTR(rc));
214 }
215
216 static void ll_free_sbi(struct super_block *sb)
217 {
218         struct ll_sb_info *sbi = ll_s2sbi(sb);
219         ENTRY;
220
221         if (sbi != NULL) {
222                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
223                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
224                 if (sbi->ll_ra_info.ll_readahead_wq)
225                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
226                 if (sbi->ll_cache != NULL) {
227                         cl_cache_decref(sbi->ll_cache);
228                         sbi->ll_cache = NULL;
229                 }
230                 if (sbi->ll_foreign_symlink_prefix) {
231                         OBD_FREE(sbi->ll_foreign_symlink_prefix,
232                                  sbi->ll_foreign_symlink_prefix_size);
233                         sbi->ll_foreign_symlink_prefix = NULL;
234                 }
235                 if (sbi->ll_foreign_symlink_upcall) {
236                         OBD_FREE(sbi->ll_foreign_symlink_upcall,
237                                  strlen(sbi->ll_foreign_symlink_upcall) +
238                                        1);
239                         sbi->ll_foreign_symlink_upcall = NULL;
240                 }
241                 if (sbi->ll_foreign_symlink_upcall_items) {
242                         int i;
243                         int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
244                         struct ll_foreign_symlink_upcall_item *items =
245                                 sbi->ll_foreign_symlink_upcall_items;
246
247                         for (i = 0 ; i < nb_items; i++)
248                                 if (items[i].type == STRING_TYPE)
249                                         OBD_FREE(items[i].string,
250                                                        items[i].size);
251
252                         OBD_FREE_LARGE(items, nb_items *
253                                 sizeof(struct ll_foreign_symlink_upcall_item));
254                         sbi->ll_foreign_symlink_upcall_items = NULL;
255                 }
256                 ll_free_rw_stats_info(sbi);
257                 pcc_super_fini(&sbi->ll_pcc_super);
258                 OBD_FREE(sbi, sizeof(*sbi));
259         }
260         EXIT;
261 }
262
263 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
264 {
265         struct inode *root = NULL;
266         struct ll_sb_info *sbi = ll_s2sbi(sb);
267         struct obd_statfs *osfs = NULL;
268         struct ptlrpc_request *request = NULL;
269         struct obd_connect_data *data = NULL;
270         struct obd_uuid *uuid;
271         struct md_op_data *op_data;
272         struct lustre_md lmd;
273         u64 valid;
274         int size, err, checksum;
275         bool api32;
276
277         ENTRY;
278         sbi->ll_md_obd = class_name2obd(md);
279         if (!sbi->ll_md_obd) {
280                 CERROR("MD %s: not setup or attached\n", md);
281                 RETURN(-EINVAL);
282         }
283
284         OBD_ALLOC_PTR(data);
285         if (data == NULL)
286                 RETURN(-ENOMEM);
287
288         OBD_ALLOC_PTR(osfs);
289         if (osfs == NULL) {
290                 OBD_FREE_PTR(data);
291                 RETURN(-ENOMEM);
292         }
293
294         /* pass client page size via ocd_grant_blkbits, the server should report
295          * back its backend blocksize for grant calculation purpose */
296         data->ocd_grant_blkbits = PAGE_SHIFT;
297
298         /* indicate MDT features supported by this client */
299         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
300                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
301                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
302                                   OBD_CONNECT_SRVLOCK  |
303                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
304                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
305                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
306                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
307                                   OBD_CONNECT_64BITHASH |
308                                   OBD_CONNECT_EINPROGRESS |
309                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
310                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
311                                   OBD_CONNECT_MAX_EASIZE |
312                                   OBD_CONNECT_FLOCK_DEAD |
313                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
314                                   OBD_CONNECT_OPEN_BY_FID |
315                                   OBD_CONNECT_DIR_STRIPE |
316                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
317                                   OBD_CONNECT_SUBTREE |
318                                   OBD_CONNECT_MULTIMODRPCS |
319                                   OBD_CONNECT_GRANT_PARAM |
320                                   OBD_CONNECT_GRANT_SHRINK |
321                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
322
323         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
324                                    OBD_CONNECT2_SUM_STATFS |
325                                    OBD_CONNECT2_OVERSTRIPING |
326                                    OBD_CONNECT2_FLR |
327                                    OBD_CONNECT2_LOCK_CONVERT |
328                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
329                                    OBD_CONNECT2_INC_XID |
330                                    OBD_CONNECT2_LSOM |
331                                    OBD_CONNECT2_ASYNC_DISCARD |
332                                    OBD_CONNECT2_PCC |
333                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
334                                    OBD_CONNECT2_GETATTR_PFID |
335                                    OBD_CONNECT2_DOM_LVB |
336                                    OBD_CONNECT2_REP_MBITS |
337                                    OBD_CONNECT2_ATOMIC_OPEN_LOCK;
338
339 #ifdef HAVE_LRU_RESIZE_SUPPORT
340         if (test_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags))
341                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
342 #endif
343         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
344
345         data->ocd_cksum_types = obd_cksum_types_supported_client();
346
347         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
348                 /* flag mdc connection as lightweight, only used for test
349                  * purpose, use with care */
350                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
351
352         data->ocd_ibits_known = MDS_INODELOCK_FULL;
353         data->ocd_version = LUSTRE_VERSION_CODE;
354
355         if (sb->s_flags & SB_RDONLY)
356                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
357         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags))
358                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
359
360 #ifdef SB_NOSEC
361         /* Setting this indicates we correctly support S_NOSEC (See kernel
362          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
363          */
364         sb->s_flags |= SB_NOSEC;
365 #endif
366         sbi->ll_fop = ll_select_file_operations(sbi);
367
368         /* always ping even if server suppress_pings */
369         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
370                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
371
372         obd_connect_set_secctx(data);
373         if (ll_sbi_has_encrypt(sbi))
374                 obd_connect_set_enc(data);
375
376 #if defined(CONFIG_SECURITY)
377         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
378 #endif
379
380         data->ocd_brw_size = MD_MAX_BRW_SIZE;
381
382         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
383                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
384         if (err == -EBUSY) {
385                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
386                                    "recovery, of which this client is not a "
387                                    "part. Please wait for recovery to complete,"
388                                    " abort, or time out.\n", md);
389                 GOTO(out, err);
390         } else if (err) {
391                 CERROR("cannot connect to %s: rc = %d\n", md, err);
392                 GOTO(out, err);
393         }
394
395         sbi->ll_md_exp->exp_connect_data = *data;
396
397         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
398                            LUSTRE_SEQ_METADATA);
399         if (err) {
400                 CERROR("%s: Can't init metadata layer FID infrastructure, "
401                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
402                 GOTO(out_md, err);
403         }
404
405         /* For mount, we only need fs info from MDT0, and also in DNE, it
406          * can make sure the client can be mounted as long as MDT0 is
407          * avaible */
408         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
409                         ktime_get_seconds() - sbi->ll_statfs_max_age,
410                         OBD_STATFS_FOR_MDT0);
411         if (err)
412                 GOTO(out_md_fid, err);
413
414         /* This needs to be after statfs to ensure connect has finished.
415          * Note that "data" does NOT contain the valid connect reply.
416          * If connecting to a 1.8 server there will be no LMV device, so
417          * we can access the MDC export directly and exp_connect_flags will
418          * be non-zero, but if accessing an upgraded 2.1 server it will
419          * have the correct flags filled in.
420          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
421         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
422         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
423             valid != CLIENT_CONNECT_MDT_REQD) {
424                 char *buf;
425
426                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
427                 obd_connect_flags2str(buf, PAGE_SIZE,
428                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
429                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
430                                    "feature(s) needed for correct operation "
431                                    "of this client (%s). Please upgrade "
432                                    "server or downgrade client.\n",
433                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
434                 OBD_FREE(buf, PAGE_SIZE);
435                 GOTO(out_md_fid, err = -EPROTO);
436         }
437
438         size = sizeof(*data);
439         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
440                            KEY_CONN_DATA,  &size, data);
441         if (err) {
442                 CERROR("%s: Get connect data failed: rc = %d\n",
443                        sbi->ll_md_exp->exp_obd->obd_name, err);
444                 GOTO(out_md_fid, err);
445         }
446
447         LASSERT(osfs->os_bsize);
448         sb->s_blocksize = osfs->os_bsize;
449         sb->s_blocksize_bits = log2(osfs->os_bsize);
450         sb->s_magic = LL_SUPER_MAGIC;
451         sb->s_maxbytes = MAX_LFS_FILESIZE;
452         sbi->ll_namelen = osfs->os_namelen;
453         sbi->ll_mnt.mnt = current->fs->root.mnt;
454
455         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags) &&
456             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
457                 LCONSOLE_INFO("Disabling user_xattr feature because "
458                               "it is not supported on the server\n");
459                 clear_bit(LL_SBI_USER_XATTR, sbi->ll_flags);
460         }
461
462         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
463 #ifdef SB_POSIXACL
464                 sb->s_flags |= SB_POSIXACL;
465 #endif
466                 set_bit(LL_SBI_ACL, sbi->ll_flags);
467         } else {
468                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
469 #ifdef SB_POSIXACL
470                 sb->s_flags &= ~SB_POSIXACL;
471 #endif
472                 clear_bit(LL_SBI_ACL, sbi->ll_flags);
473         }
474
475         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
476                 set_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
477
478         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
479                 set_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags);
480
481         if (obd_connect_has_secctx(data))
482                 set_bit(LL_SBI_FILE_SECCTX, sbi->ll_flags);
483
484         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
485                 if (ll_sbi_has_test_dummy_encryption(sbi))
486                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
487                                       sbi->ll_fsname,
488                                       sbi->ll_md_exp->exp_obd->obd_name);
489                 ll_sbi_set_encrypt(sbi, false);
490         }
491
492         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
493                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
494                         LCONSOLE_INFO("%s: disabling xattr cache due to "
495                                       "unknown maximum xattr size.\n", dt);
496                 } else if (!sbi->ll_xattr_cache_set) {
497                         /* If xattr_cache is already set (no matter 0 or 1)
498                          * during processing llog, it won't be enabled here. */
499                         set_bit(LL_SBI_XATTR_CACHE, sbi->ll_flags);
500                         sbi->ll_xattr_cache_enabled = 1;
501                 }
502         }
503
504         sbi->ll_dt_obd = class_name2obd(dt);
505         if (!sbi->ll_dt_obd) {
506                 CERROR("DT %s: not setup or attached\n", dt);
507                 GOTO(out_md_fid, err = -ENODEV);
508         }
509
510         /* pass client page size via ocd_grant_blkbits, the server should report
511          * back its backend blocksize for grant calculation purpose */
512         data->ocd_grant_blkbits = PAGE_SHIFT;
513
514         /* indicate OST features supported by this client */
515         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
516                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
517                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
518                                   OBD_CONNECT_SRVLOCK |
519                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
520                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
521                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
522                                   OBD_CONNECT_EINPROGRESS |
523                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
524                                   OBD_CONNECT_LAYOUTLOCK |
525                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
526                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
527                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
528         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
529                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK |
530                                    OBD_CONNECT2_REP_MBITS;
531
532         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
533                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
534
535         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
536          * disabled by default, because it can still be enabled on the
537          * fly via /sys. As a consequence, we still need to come to an
538          * agreement on the supported algorithms at connect time
539          */
540         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
541
542         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
543                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
544         else
545                 data->ocd_cksum_types = obd_cksum_types_supported_client();
546
547 #ifdef HAVE_LRU_RESIZE_SUPPORT
548         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
549 #endif
550         /* always ping even if server suppress_pings */
551         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
552                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
553
554         if (ll_sbi_has_encrypt(sbi))
555                 obd_connect_set_enc(data);
556
557         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
558                "ocd_grant: %d\n", data->ocd_connect_flags,
559                data->ocd_version, data->ocd_grant);
560
561         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
562         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
563
564         data->ocd_brw_size = DT_MAX_BRW_SIZE;
565
566         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
567                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
568         if (err == -EBUSY) {
569                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
570                                    "recovery, of which this client is not a "
571                                    "part.  Please wait for recovery to "
572                                    "complete, abort, or time out.\n", dt);
573                 GOTO(out_md, err);
574         } else if (err) {
575                 CERROR("%s: Cannot connect to %s: rc = %d\n",
576                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
577                 GOTO(out_md, err);
578         }
579
580         if (ll_sbi_has_encrypt(sbi) &&
581             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
582                 if (ll_sbi_has_test_dummy_encryption(sbi))
583                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
584                                       sbi->ll_fsname, dt);
585                 ll_sbi_set_encrypt(sbi, false);
586         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
587                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
588         }
589
590         sbi->ll_dt_exp->exp_connect_data = *data;
591
592         /* Don't change value if it was specified in the config log */
593         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
594                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
595                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
596                               (data->ocd_brw_size >> PAGE_SHIFT));
597                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
598                     sbi->ll_ra_info.ra_max_pages_per_file)
599                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
600                                 sbi->ll_ra_info.ra_max_pages_per_file;
601         }
602
603         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
604                            LUSTRE_SEQ_METADATA);
605         if (err) {
606                 CERROR("%s: Can't init data layer FID infrastructure, "
607                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
608                 GOTO(out_dt, err);
609         }
610
611         mutex_lock(&sbi->ll_lco.lco_lock);
612         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
613         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
614         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
615         mutex_unlock(&sbi->ll_lco.lco_lock);
616
617         fid_zero(&sbi->ll_root_fid);
618         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
619                            &sbi->ll_root_fid);
620         if (err) {
621                 CERROR("cannot mds_connect: rc = %d\n", err);
622                 GOTO(out_lock_cn_cb, err);
623         }
624         if (!fid_is_sane(&sbi->ll_root_fid)) {
625                 CERROR("%s: Invalid root fid "DFID" during mount\n",
626                        sbi->ll_md_exp->exp_obd->obd_name,
627                        PFID(&sbi->ll_root_fid));
628                 GOTO(out_lock_cn_cb, err = -EINVAL);
629         }
630         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
631
632         sb->s_op = &lustre_super_operations;
633         sb->s_xattr = ll_xattr_handlers;
634 #if THREAD_SIZE >= 8192 /*b=17630*/
635         sb->s_export_op = &lustre_export_operations;
636 #endif
637 #ifdef HAVE_LUSTRE_CRYPTO
638         llcrypt_set_ops(sb, &lustre_cryptops);
639 #endif
640
641         /* make root inode
642          * XXX: move this to after cbd setup? */
643         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
644         if (test_bit(LL_SBI_ACL, sbi->ll_flags))
645                 valid |= OBD_MD_FLACL;
646
647         OBD_ALLOC_PTR(op_data);
648         if (op_data == NULL)
649                 GOTO(out_lock_cn_cb, err = -ENOMEM);
650
651         op_data->op_fid1 = sbi->ll_root_fid;
652         op_data->op_mode = 0;
653         op_data->op_valid = valid;
654
655         err = md_getattr(sbi->ll_md_exp, op_data, &request);
656
657         OBD_FREE_PTR(op_data);
658         if (err) {
659                 CERROR("%s: md_getattr failed for root: rc = %d\n",
660                        sbi->ll_md_exp->exp_obd->obd_name, err);
661                 GOTO(out_lock_cn_cb, err);
662         }
663
664         err = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill,
665                                sbi->ll_dt_exp, sbi->ll_md_exp, &lmd);
666         if (err) {
667                 CERROR("failed to understand root inode md: rc = %d\n", err);
668                 ptlrpc_req_finished(request);
669                 GOTO(out_lock_cn_cb, err);
670         }
671
672         LASSERT(fid_is_sane(&sbi->ll_root_fid));
673         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
674         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid, api32), &lmd);
675         md_free_lustre_md(sbi->ll_md_exp, &lmd);
676         ptlrpc_req_finished(request);
677
678         if (IS_ERR(root)) {
679                 lmd_clear_acl(&lmd);
680                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
681                 root = NULL;
682                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
683                        sbi->ll_fsname, err);
684                 GOTO(out_root, err);
685         }
686
687         checksum = test_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
688         if (sbi->ll_checksum_set) {
689                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
690                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
691                                          sizeof(checksum), &checksum, NULL);
692                 if (err) {
693                         CERROR("%s: Set checksum failed: rc = %d\n",
694                                sbi->ll_dt_exp->exp_obd->obd_name, err);
695                         GOTO(out_root, err);
696                 }
697         }
698         cl_sb_init(sb);
699
700         sb->s_root = d_make_root(root);
701         if (sb->s_root == NULL) {
702                 err = -ENOMEM;
703                 CERROR("%s: can't make root dentry: rc = %d\n",
704                        sbi->ll_fsname, err);
705                 GOTO(out_root, err);
706         }
707
708         sbi->ll_sdev_orig = sb->s_dev;
709
710         /* We set sb->s_dev equal on all lustre clients in order to support
711          * NFS export clustering.  NFSD requires that the FSID be the same
712          * on all clients. */
713         /* s_dev is also used in lt_compare() to compare two fs, but that is
714          * only a node-local comparison. */
715         uuid = obd_get_uuid(sbi->ll_md_exp);
716         if (uuid != NULL)
717                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
718
719         if (data != NULL)
720                 OBD_FREE_PTR(data);
721         if (osfs != NULL)
722                 OBD_FREE_PTR(osfs);
723
724         if (sbi->ll_dt_obd) {
725                 err = sysfs_create_link(&sbi->ll_kset.kobj,
726                                         &sbi->ll_dt_obd->obd_kset.kobj,
727                                         sbi->ll_dt_obd->obd_type->typ_name);
728                 if (err < 0) {
729                         CERROR("%s: could not register %s in llite: rc = %d\n",
730                                dt, sbi->ll_fsname, err);
731                         err = 0;
732                 }
733         }
734
735         if (sbi->ll_md_obd) {
736                 err = sysfs_create_link(&sbi->ll_kset.kobj,
737                                         &sbi->ll_md_obd->obd_kset.kobj,
738                                         sbi->ll_md_obd->obd_type->typ_name);
739                 if (err < 0) {
740                         CERROR("%s: could not register %s in llite: rc = %d\n",
741                                md, sbi->ll_fsname, err);
742                         err = 0;
743                 }
744         }
745
746         RETURN(err);
747 out_root:
748         iput(root);
749 out_lock_cn_cb:
750         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
751 out_dt:
752         obd_disconnect(sbi->ll_dt_exp);
753         sbi->ll_dt_exp = NULL;
754         sbi->ll_dt_obd = NULL;
755 out_md_fid:
756         obd_fid_fini(sbi->ll_md_exp->exp_obd);
757 out_md:
758         obd_disconnect(sbi->ll_md_exp);
759         sbi->ll_md_exp = NULL;
760         sbi->ll_md_obd = NULL;
761 out:
762         if (data != NULL)
763                 OBD_FREE_PTR(data);
764         if (osfs != NULL)
765                 OBD_FREE_PTR(osfs);
766         return err;
767 }
768
769 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
770 {
771         int size, rc;
772
773         size = sizeof(*lmmsize);
774         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
775                           KEY_MAX_EASIZE, &size, lmmsize);
776         if (rc != 0) {
777                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
778                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
779                 RETURN(rc);
780         }
781
782         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
783
784         size = sizeof(int);
785         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
786                           KEY_MAX_EASIZE, &size, lmmsize);
787         if (rc)
788                 CERROR("Get max mdsize error rc %d\n", rc);
789
790         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
791
792         RETURN(rc);
793 }
794
795 /**
796  * Get the value of the default_easize parameter.
797  *
798  * \see client_obd::cl_default_mds_easize
799  *
800  * \param[in] sbi       superblock info for this filesystem
801  * \param[out] lmmsize  pointer to storage location for value
802  *
803  * \retval 0            on success
804  * \retval negative     negated errno on failure
805  */
806 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
807 {
808         int size, rc;
809
810         size = sizeof(int);
811         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
812                          KEY_DEFAULT_EASIZE, &size, lmmsize);
813         if (rc)
814                 CERROR("Get default mdsize error rc %d\n", rc);
815
816         RETURN(rc);
817 }
818
819 /**
820  * Set the default_easize parameter to the given value.
821  *
822  * \see client_obd::cl_default_mds_easize
823  *
824  * \param[in] sbi       superblock info for this filesystem
825  * \param[in] lmmsize   the size to set
826  *
827  * \retval 0            on success
828  * \retval negative     negated errno on failure
829  */
830 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
831 {
832         int rc;
833
834         if (lmmsize < sizeof(struct lov_mds_md) ||
835             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
836                 return -EINVAL;
837
838         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
839                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
840                                 sizeof(int), &lmmsize, NULL);
841
842         RETURN(rc);
843 }
844
845 static void client_common_put_super(struct super_block *sb)
846 {
847         struct ll_sb_info *sbi = ll_s2sbi(sb);
848         ENTRY;
849
850         cl_sb_fini(sb);
851
852         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
853         obd_disconnect(sbi->ll_dt_exp);
854         sbi->ll_dt_exp = NULL;
855
856         ll_debugfs_unregister_super(sb);
857
858         obd_fid_fini(sbi->ll_md_exp->exp_obd);
859         obd_disconnect(sbi->ll_md_exp);
860         sbi->ll_md_exp = NULL;
861
862         EXIT;
863 }
864
865 void ll_kill_super(struct super_block *sb)
866 {
867         struct ll_sb_info *sbi;
868         ENTRY;
869
870         /* not init sb ?*/
871         if (!(sb->s_flags & SB_ACTIVE))
872                 return;
873
874         sbi = ll_s2sbi(sb);
875         /* we need restore s_dev from changed for clustred NFS before put_super
876          * because new kernels have cached s_dev and change sb->s_dev in
877          * put_super not affected real removing devices */
878         if (sbi) {
879                 sb->s_dev = sbi->ll_sdev_orig;
880
881                 /* wait running statahead threads to quit */
882                 while (atomic_read(&sbi->ll_sa_running) > 0)
883                         schedule_timeout_uninterruptible(
884                                 cfs_time_seconds(1) >> 3);
885         }
886
887         EXIT;
888 }
889
890 /* Since we use this table for ll_sbi_flags_seq_show make
891  * sure what you want displayed for a specific token that
892  * is listed more than once below be listed first. For
893  * example we want "checksum" displayed, not "nochecksum"
894  * for the sbi_flags.
895  */
896 static const match_table_t ll_sbi_flags_name = {
897         {LL_SBI_NOLCK,                  "nolock"},
898         {LL_SBI_CHECKSUM,               "checksum"},
899         {LL_SBI_CHECKSUM,               "nochecksum"},
900         {LL_SBI_LOCALFLOCK,             "localflock"},
901         {LL_SBI_FLOCK,                  "flock"},
902         {LL_SBI_FLOCK,                  "noflock"},
903         {LL_SBI_USER_XATTR,             "user_xattr"},
904         {LL_SBI_USER_XATTR,             "nouser_xattr"},
905         {LL_SBI_LRU_RESIZE,             "lruresize"},
906         {LL_SBI_LRU_RESIZE,             "nolruresize"},
907         {LL_SBI_LAZYSTATFS,             "lazystatfs"},
908         {LL_SBI_LAZYSTATFS,             "nolazystatfs"},
909         {LL_SBI_32BIT_API,              "32bitapi"},
910         {LL_SBI_USER_FID2PATH,          "user_fid2path"},
911         {LL_SBI_USER_FID2PATH,          "nouser_fid2path"},
912         {LL_SBI_VERBOSE,                "verbose"},
913         {LL_SBI_VERBOSE,                "noverbose"},
914         {LL_SBI_ALWAYS_PING,            "always_ping"},
915         {LL_SBI_TEST_DUMMY_ENCRYPTION,  "test_dummy_encryption"},
916         {LL_SBI_ENCRYPT,                "encrypt"},
917         {LL_SBI_ENCRYPT,                "noencrypt"},
918         {LL_SBI_FOREIGN_SYMLINK,        "foreign_symlink=%s"},
919         {LL_SBI_NUM_MOUNT_OPT,          NULL},
920
921         {LL_SBI_ACL,                    "acl"},
922         {LL_SBI_AGL_ENABLED,            "agl"},
923         {LL_SBI_64BIT_HASH,             "64bit_hash"},
924         {LL_SBI_LAYOUT_LOCK,            "layout"},
925         {LL_SBI_XATTR_CACHE,            "xattr_cache"},
926         {LL_SBI_NOROOTSQUASH,           "norootsquash"},
927         {LL_SBI_FAST_READ,              "fast_read"},
928         {LL_SBI_FILE_SECCTX,            "file_secctx"},
929         {LL_SBI_TINY_WRITE,             "tiny_write"},
930         {LL_SBI_FILE_HEAT,              "file_heat"},
931         {LL_SBI_PARALLEL_DIO,           "parallel_dio"},
932 };
933
934 int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
935 {
936         struct super_block *sb = m->private;
937         int i;
938
939         for (i = 0; i < LL_SBI_NUM_FLAGS; i++) {
940                 int j;
941
942                 if (!test_bit(i, ll_s2sbi(sb)->ll_flags))
943                         continue;
944
945                 for (j = 0; j < ARRAY_SIZE(ll_sbi_flags_name); j++) {
946                         if (ll_sbi_flags_name[j].token == i &&
947                             ll_sbi_flags_name[j].pattern) {
948                                 seq_printf(m, "%s ",
949                                            ll_sbi_flags_name[j].pattern);
950                                 break;
951                         }
952                 }
953         }
954         seq_puts(m, "\b\n");
955         return 0;
956 }
957
958 /* non-client-specific mount options are parsed in lmd_parse */
959 static int ll_options(char *options, struct super_block *sb)
960 {
961         struct ll_sb_info *sbi = ll_s2sbi(sb);
962         char *s2, *s1, *opts;
963
964         ENTRY;
965         if (!options)
966                 RETURN(0);
967
968         /* Don't stomp on lmd_opts */
969         opts = kstrdup(options, GFP_KERNEL);
970         if (!opts)
971                 RETURN(-ENOMEM);
972         s1 = opts;
973         s2 = opts;
974
975         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
976
977         while ((s1 = strsep(&opts, ",")) != NULL) {
978                 substring_t args[MAX_OPT_ARGS];
979                 bool turn_off = false;
980                 int token;
981
982                 if (!*s1)
983                         continue;
984
985                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
986
987                 if (strncmp(s1, "no", 2) == 0)
988                         turn_off = true;
989
990                 /*
991                  * Initialize args struct so we know whether arg was
992                  * found; some options take optional arguments.
993                  */
994                 args[0].to = NULL;
995                 args[0].from = NULL;
996                 token = match_token(s1, ll_sbi_flags_name, args);
997                 if (token == LL_SBI_NUM_MOUNT_OPT) {
998                         if (match_wildcard("context", s1) ||
999                             match_wildcard("fscontext", s1) ||
1000                             match_wildcard("defcontext", s1) ||
1001                             match_wildcard("rootcontext",s1))
1002                                 continue;
1003
1004                         LCONSOLE_ERROR_MSG(0x152,
1005                                            "Unknown option '%s', won't mount.\n",
1006                                            s1);
1007                         RETURN(-EINVAL);
1008                 }
1009
1010                 switch (token) {
1011                 case LL_SBI_NOLCK:
1012                 case LL_SBI_32BIT_API:
1013                 case LL_SBI_64BIT_HASH:
1014                 case LL_SBI_ALWAYS_PING:
1015                         set_bit(token, sbi->ll_flags);
1016                         break;
1017
1018                 case LL_SBI_FLOCK:
1019                         clear_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags);
1020                         if (turn_off)
1021                                 clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1022                         else
1023                                 set_bit(token, sbi->ll_flags);
1024                         break;
1025
1026                 case LL_SBI_LOCALFLOCK:
1027                         clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1028                         set_bit(token, sbi->ll_flags);
1029                         break;
1030
1031                 case LL_SBI_CHECKSUM:
1032                         sbi->ll_checksum_set = 1;
1033                         /* fall through */
1034                 case LL_SBI_USER_XATTR:
1035                 case LL_SBI_USER_FID2PATH:
1036                 case LL_SBI_LRU_RESIZE:
1037                 case LL_SBI_LAZYSTATFS:
1038                 case LL_SBI_VERBOSE:
1039                         if (turn_off)
1040                                 clear_bit(token, sbi->ll_flags);
1041                         else
1042                                 set_bit(token, sbi->ll_flags);
1043                         break;
1044                 case LL_SBI_TEST_DUMMY_ENCRYPTION: {
1045 #ifdef HAVE_LUSTRE_CRYPTO
1046                         set_bit(token, sbi->ll_flags);
1047 #else
1048                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1049 #endif
1050                         break;
1051                 }
1052                 case LL_SBI_ENCRYPT:
1053 #ifdef HAVE_LUSTRE_CRYPTO
1054                         if (turn_off)
1055                                 clear_bit(token, sbi->ll_flags);
1056                         else
1057                                 set_bit(token, sbi->ll_flags);
1058 #else
1059                         LCONSOLE_WARN("noencrypt or encrypt mount option ignored: encryption not supported\n");
1060 #endif
1061                         break;
1062                 case LL_SBI_FOREIGN_SYMLINK:
1063                         /* non-default prefix provided ? */
1064                         if (args->from) {
1065                                 size_t old_len;
1066                                 char *old;
1067
1068                                 /* path must be absolute */
1069                                 if (args->from[0] != '/') {
1070                                         LCONSOLE_ERROR_MSG(0x152,
1071                                                            "foreign prefix '%s' must be an absolute path\n",
1072                                                            args->from);
1073                                         RETURN(-EINVAL);
1074                                 }
1075
1076                                 old_len = sbi->ll_foreign_symlink_prefix_size;
1077                                 old = sbi->ll_foreign_symlink_prefix;
1078                                 /* alloc for path length and '\0' */
1079                                 sbi->ll_foreign_symlink_prefix = match_strdup(args);
1080                                 if (!sbi->ll_foreign_symlink_prefix) {
1081                                         /* restore previous */
1082                                         sbi->ll_foreign_symlink_prefix = old;
1083                                         sbi->ll_foreign_symlink_prefix_size =
1084                                                 old_len;
1085                                         RETURN(-ENOMEM);
1086                                 }
1087                                 sbi->ll_foreign_symlink_prefix_size =
1088                                         args->to - args->from + 1;
1089                                 OBD_ALLOC_POST(sbi->ll_foreign_symlink_prefix,
1090                                                sbi->ll_foreign_symlink_prefix_size,
1091                                                "kmalloced");
1092                                 if (old)
1093                                         OBD_FREE(old, old_len);
1094
1095                                 /* enable foreign symlink support */
1096                                 set_bit(token, sbi->ll_flags);
1097                         } else {
1098                                 LCONSOLE_ERROR_MSG(0x152,
1099                                                    "invalid %s option\n", s1);
1100                         }
1101                 /* fall through */
1102                 default:
1103                         break;
1104                 }
1105         }
1106         kfree(opts);
1107         RETURN(0);
1108 }
1109
1110 void ll_lli_init(struct ll_inode_info *lli)
1111 {
1112         lli->lli_inode_magic = LLI_INODE_MAGIC;
1113         lli->lli_flags = 0;
1114         rwlock_init(&lli->lli_lock);
1115         lli->lli_posix_acl = NULL;
1116         /* Do not set lli_fid, it has been initialized already. */
1117         fid_zero(&lli->lli_pfid);
1118         lli->lli_mds_read_och = NULL;
1119         lli->lli_mds_write_och = NULL;
1120         lli->lli_mds_exec_och = NULL;
1121         lli->lli_open_fd_read_count = 0;
1122         lli->lli_open_fd_write_count = 0;
1123         lli->lli_open_fd_exec_count = 0;
1124         mutex_init(&lli->lli_och_mutex);
1125         spin_lock_init(&lli->lli_agl_lock);
1126         spin_lock_init(&lli->lli_layout_lock);
1127         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1128         lli->lli_clob = NULL;
1129
1130         init_rwsem(&lli->lli_xattrs_list_rwsem);
1131         mutex_init(&lli->lli_xattrs_enq_lock);
1132
1133         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1134         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1135                 lli->lli_opendir_key = NULL;
1136                 lli->lli_sai = NULL;
1137                 spin_lock_init(&lli->lli_sa_lock);
1138                 lli->lli_opendir_pid = 0;
1139                 lli->lli_sa_enabled = 0;
1140                 init_rwsem(&lli->lli_lsm_sem);
1141         } else {
1142                 mutex_init(&lli->lli_size_mutex);
1143                 mutex_init(&lli->lli_setattr_mutex);
1144                 lli->lli_symlink_name = NULL;
1145                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1146                 range_lock_tree_init(&lli->lli_write_tree);
1147                 init_rwsem(&lli->lli_glimpse_sem);
1148                 lli->lli_glimpse_time = ktime_set(0, 0);
1149                 INIT_LIST_HEAD(&lli->lli_agl_list);
1150                 lli->lli_agl_index = 0;
1151                 lli->lli_async_rc = 0;
1152                 spin_lock_init(&lli->lli_heat_lock);
1153                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1154                 lli->lli_heat_flags = 0;
1155                 mutex_init(&lli->lli_pcc_lock);
1156                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1157                 lli->lli_pcc_inode = NULL;
1158                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1159                 lli->lli_pcc_generation = 0;
1160                 mutex_init(&lli->lli_group_mutex);
1161                 lli->lli_group_users = 0;
1162                 lli->lli_group_gid = 0;
1163         }
1164         mutex_init(&lli->lli_layout_mutex);
1165         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1166         /* ll_cl_context initialize */
1167         INIT_LIST_HEAD(&lli->lli_lccs);
1168 }
1169
1170 #define MAX_STRING_SIZE 128
1171
1172 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1173
1174 #define LSI_BDI_INITIALIZED     0x00400000
1175
1176 #ifndef HAVE_BDI_CAP_MAP_COPY
1177 # define BDI_CAP_MAP_COPY       0
1178 #endif
1179
1180 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1181 {
1182         struct  lustre_sb_info *lsi = s2lsi(sb);
1183         char buf[MAX_STRING_SIZE];
1184         va_list args;
1185         int err;
1186
1187         err = bdi_init(&lsi->lsi_bdi);
1188         if (err)
1189                 return err;
1190
1191         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1192         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1193         lsi->lsi_bdi.name = "lustre";
1194         va_start(args, fmt);
1195         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1196         va_end(args);
1197         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1198         va_end(args);
1199         if (!err)
1200                 sb->s_bdi = &lsi->lsi_bdi;
1201
1202         return err;
1203 }
1204 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1205
1206 int ll_fill_super(struct super_block *sb)
1207 {
1208         struct  lustre_profile *lprof = NULL;
1209         struct  lustre_sb_info *lsi = s2lsi(sb);
1210         struct  ll_sb_info *sbi = NULL;
1211         char    *dt = NULL, *md = NULL;
1212         char    *profilenm = get_profile_name(sb);
1213         struct config_llog_instance *cfg;
1214         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1215         const int instlen = LUSTRE_MAXINSTANCE + 2;
1216         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1217         char name[MAX_STRING_SIZE];
1218         int md_len = 0;
1219         int dt_len = 0;
1220         uuid_t uuid;
1221         char *ptr;
1222         int len;
1223         int err;
1224
1225         ENTRY;
1226         /* for ASLR, to map between cfg_instance and hashed ptr */
1227         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1228                profilenm, cfg_instance, sb);
1229
1230         OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
1231
1232         OBD_ALLOC_PTR(cfg);
1233         if (cfg == NULL)
1234                 GOTO(out_free_cfg, err = -ENOMEM);
1235
1236         /* client additional sb info */
1237         lsi->lsi_llsbi = sbi = ll_init_sbi();
1238         if (IS_ERR(sbi))
1239                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1240
1241         err = ll_options(lsi->lsi_lmd->lmd_opts, sb);
1242         if (err)
1243                 GOTO(out_free_cfg, err);
1244
1245         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1246         sb->s_d_op = &ll_d_ops;
1247
1248         /* UUID handling */
1249         generate_random_uuid(uuid.b);
1250         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1251
1252         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1253
1254         /* Get fsname */
1255         len = strlen(profilenm);
1256         ptr = strrchr(profilenm, '-');
1257         if (ptr && (strcmp(ptr, "-client") == 0))
1258                 len -= 7;
1259
1260         if (len > LUSTRE_MAXFSNAME) {
1261                 if (unlikely(len >= MAX_STRING_SIZE))
1262                         len = MAX_STRING_SIZE - 1;
1263                 strncpy(name, profilenm, len);
1264                 name[len] = '\0';
1265                 err = -ENAMETOOLONG;
1266                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1267                        name, LUSTRE_MAXFSNAME, err);
1268                 GOTO(out_free_cfg, err);
1269         }
1270         strncpy(sbi->ll_fsname, profilenm, len);
1271         sbi->ll_fsname[len] = '\0';
1272
1273         /* Mount info */
1274         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1275                  profilenm, cfg_instance);
1276
1277         err = super_setup_bdi_name(sb, "%s", name);
1278         if (err)
1279                 GOTO(out_free_cfg, err);
1280
1281         /* Call ll_debugfs_register_super() before lustre_process_log()
1282          * so that "llite.*.*" params can be processed correctly.
1283          */
1284         err = ll_debugfs_register_super(sb, name);
1285         if (err < 0) {
1286                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1287                        sbi->ll_fsname, err);
1288                 err = 0;
1289         }
1290
1291         /* The cfg_instance is a value unique to this super, in case some
1292          * joker tries to mount the same fs at two mount points.
1293          */
1294         cfg->cfg_instance = cfg_instance;
1295         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1296         cfg->cfg_callback = class_config_llog_handler;
1297         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1298         /* set up client obds */
1299         err = lustre_process_log(sb, profilenm, cfg);
1300         if (err < 0)
1301                 GOTO(out_debugfs, err);
1302
1303         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1304         lprof = class_get_profile(profilenm);
1305         if (lprof == NULL) {
1306                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1307                                    " read from the MGS.  Does that filesystem "
1308                                    "exist?\n", profilenm);
1309                 GOTO(out_debugfs, err = -EINVAL);
1310         }
1311         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1312                lprof->lp_md, lprof->lp_dt);
1313
1314         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1315         OBD_ALLOC(dt, dt_len);
1316         if (!dt)
1317                 GOTO(out_profile, err = -ENOMEM);
1318         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1319
1320         md_len = strlen(lprof->lp_md) + instlen + 2;
1321         OBD_ALLOC(md, md_len);
1322         if (!md)
1323                 GOTO(out_free_dt, err = -ENOMEM);
1324         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1325
1326         /* connections, registrations, sb setup */
1327         err = client_common_fill_super(sb, md, dt);
1328         if (err < 0)
1329                 GOTO(out_free_md, err);
1330
1331         sbi->ll_client_common_fill_super_succeeded = 1;
1332
1333 out_free_md:
1334         if (md)
1335                 OBD_FREE(md, md_len);
1336 out_free_dt:
1337         if (dt)
1338                 OBD_FREE(dt, dt_len);
1339 out_profile:
1340         if (lprof)
1341                 class_put_profile(lprof);
1342 out_debugfs:
1343         if (err < 0)
1344                 ll_debugfs_unregister_super(sb);
1345 out_free_cfg:
1346         if (cfg)
1347                 OBD_FREE_PTR(cfg);
1348
1349         if (err)
1350                 ll_put_super(sb);
1351         else if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1352                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1353         RETURN(err);
1354 } /* ll_fill_super */
1355
1356 void ll_put_super(struct super_block *sb)
1357 {
1358         struct config_llog_instance cfg, params_cfg;
1359         struct obd_device *obd;
1360         struct lustre_sb_info *lsi = s2lsi(sb);
1361         struct ll_sb_info *sbi = ll_s2sbi(sb);
1362         char *profilenm = get_profile_name(sb);
1363         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1364         long ccc_count;
1365         int next, force = 1, rc = 0;
1366         ENTRY;
1367
1368         if (IS_ERR(sbi))
1369                 GOTO(out_no_sbi, 0);
1370
1371         /* Should replace instance_id with something better for ASLR */
1372         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1373                profilenm, cfg_instance, sb);
1374
1375         cfg.cfg_instance = cfg_instance;
1376         lustre_end_log(sb, profilenm, &cfg);
1377
1378         params_cfg.cfg_instance = cfg_instance;
1379         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1380
1381         if (sbi->ll_md_exp) {
1382                 obd = class_exp2obd(sbi->ll_md_exp);
1383                 if (obd)
1384                         force = obd->obd_force;
1385         }
1386
1387         /* Wait for unstable pages to be committed to stable storage */
1388         if (force == 0) {
1389                 rc = l_wait_event_abortable(
1390                         sbi->ll_cache->ccc_unstable_waitq,
1391                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1392         }
1393
1394         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1395         if (force == 0 && rc != -ERESTARTSYS)
1396                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1397
1398         /* We need to set force before the lov_disconnect in
1399          * lustre_common_put_super, since l_d cleans up osc's as well.
1400          */
1401         if (force) {
1402                 next = 0;
1403                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1404                                                      &next)) != NULL) {
1405                         obd->obd_force = force;
1406                 }
1407         }
1408
1409         if (sbi->ll_client_common_fill_super_succeeded) {
1410                 /* Only if client_common_fill_super succeeded */
1411                 client_common_put_super(sb);
1412         }
1413
1414         next = 0;
1415         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1416                 class_manual_cleanup(obd);
1417
1418         if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1419                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1420
1421         if (profilenm)
1422                 class_del_profile(profilenm);
1423
1424 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1425         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1426                 bdi_destroy(&lsi->lsi_bdi);
1427                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1428         }
1429 #endif
1430
1431         ll_free_sbi(sb);
1432         lsi->lsi_llsbi = NULL;
1433 out_no_sbi:
1434         lustre_common_put_super(sb);
1435
1436         cl_env_cache_purge(~0);
1437
1438         EXIT;
1439 } /* client_put_super */
1440
1441 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1442 {
1443         struct inode *inode = NULL;
1444
1445         /* NOTE: we depend on atomic igrab() -bzzz */
1446         lock_res_and_lock(lock);
1447         if (lock->l_resource->lr_lvb_inode) {
1448                 struct ll_inode_info * lli;
1449                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1450                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1451                         inode = igrab(lock->l_resource->lr_lvb_inode);
1452                 } else {
1453                         inode = lock->l_resource->lr_lvb_inode;
1454                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1455                                          D_WARNING, lock, "lr_lvb_inode %p is "
1456                                          "bogus: magic %08x",
1457                                          lock->l_resource->lr_lvb_inode,
1458                                          lli->lli_inode_magic);
1459                         inode = NULL;
1460                 }
1461         }
1462         unlock_res_and_lock(lock);
1463         return inode;
1464 }
1465
1466 void ll_dir_clear_lsm_md(struct inode *inode)
1467 {
1468         struct ll_inode_info *lli = ll_i2info(inode);
1469
1470         LASSERT(S_ISDIR(inode->i_mode));
1471
1472         if (lli->lli_lsm_md) {
1473                 lmv_free_memmd(lli->lli_lsm_md);
1474                 lli->lli_lsm_md = NULL;
1475         }
1476
1477         if (lli->lli_default_lsm_md) {
1478                 lmv_free_memmd(lli->lli_default_lsm_md);
1479                 lli->lli_default_lsm_md = NULL;
1480         }
1481 }
1482
1483 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1484                                       const struct lu_fid *fid,
1485                                       struct lustre_md *md)
1486 {
1487         struct ll_sb_info *sbi = ll_s2sbi(sb);
1488         struct ll_inode_info *lli;
1489         struct mdt_body *body = md->body;
1490         struct inode *inode;
1491         ino_t ino;
1492
1493         ENTRY;
1494
1495         LASSERT(md->lmv);
1496         ino = cl_fid_build_ino(fid, test_bit(LL_SBI_32BIT_API, sbi->ll_flags));
1497         inode = iget_locked(sb, ino);
1498         if (inode == NULL) {
1499                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1500                        sbi->ll_fsname, PFID(fid));
1501                 RETURN(ERR_PTR(-ENOENT));
1502         }
1503
1504         lli = ll_i2info(inode);
1505         if (inode->i_state & I_NEW) {
1506                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1507                                 (body->mbo_mode & S_IFMT);
1508                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1509                          PFID(fid));
1510
1511                 inode->i_mtime.tv_sec = 0;
1512                 inode->i_atime.tv_sec = 0;
1513                 inode->i_ctime.tv_sec = 0;
1514                 inode->i_rdev = 0;
1515
1516 #ifdef HAVE_BACKING_DEV_INFO
1517                 /* initializing backing dev info. */
1518                 inode->i_mapping->backing_dev_info =
1519                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1520 #endif
1521                 inode->i_op = &ll_dir_inode_operations;
1522                 inode->i_fop = &ll_dir_operations;
1523                 lli->lli_fid = *fid;
1524                 ll_lli_init(lli);
1525
1526                 /* master object FID */
1527                 lli->lli_pfid = body->mbo_fid1;
1528                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1529                        lli, PFID(fid), PFID(&lli->lli_pfid));
1530                 unlock_new_inode(inode);
1531         } else {
1532                 /* in directory restripe/auto-split, a directory will be
1533                  * transformed to a stripe if it's plain, set its pfid here,
1534                  * otherwise ll_lock_cancel_bits() can't find the master inode.
1535                  */
1536                 lli->lli_pfid = body->mbo_fid1;
1537         }
1538
1539         RETURN(inode);
1540 }
1541
1542 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1543 {
1544         struct lu_fid *fid;
1545         struct lmv_stripe_md *lsm = md->lmv;
1546         struct ll_inode_info *lli = ll_i2info(inode);
1547         int i;
1548
1549         LASSERT(lsm != NULL);
1550
1551         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1552                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1553         lsm_md_dump(D_INODE, lsm);
1554
1555         if (!lmv_dir_striped(lsm))
1556                 goto out;
1557
1558         /* XXX sigh, this lsm_root initialization should be in
1559          * LMV layer, but it needs ll_iget right now, so we
1560          * put this here right now. */
1561         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1562                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1563                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1564
1565                 if (!fid_is_sane(fid))
1566                         continue;
1567
1568                 /* Unfortunately ll_iget will call ll_update_inode,
1569                  * where the initialization of slave inode is slightly
1570                  * different, so it reset lsm_md to NULL to avoid
1571                  * initializing lsm for slave inode. */
1572                 lsm->lsm_md_oinfo[i].lmo_root =
1573                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1574                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1575                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1576
1577                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1578                         while (i-- > 0) {
1579                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1580                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1581                         }
1582                         return rc;
1583                 }
1584         }
1585 out:
1586         lli->lli_lsm_md = lsm;
1587
1588         return 0;
1589 }
1590
1591 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1592 {
1593         struct ll_inode_info *lli = ll_i2info(inode);
1594
1595         ENTRY;
1596
1597         if (!md->default_lmv) {
1598                 /* clear default lsm */
1599                 if (lli->lli_default_lsm_md) {
1600                         down_write(&lli->lli_lsm_sem);
1601                         if (lli->lli_default_lsm_md) {
1602                                 lmv_free_memmd(lli->lli_default_lsm_md);
1603                                 lli->lli_default_lsm_md = NULL;
1604                         }
1605                         up_write(&lli->lli_lsm_sem);
1606                 }
1607                 RETURN_EXIT;
1608         }
1609
1610         if (lli->lli_default_lsm_md) {
1611                 /* do nonthing if default lsm isn't changed */
1612                 down_read(&lli->lli_lsm_sem);
1613                 if (lli->lli_default_lsm_md &&
1614                     lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1615                         up_read(&lli->lli_lsm_sem);
1616                         RETURN_EXIT;
1617                 }
1618                 up_read(&lli->lli_lsm_sem);
1619         }
1620
1621         down_write(&lli->lli_lsm_sem);
1622         if (lli->lli_default_lsm_md)
1623                 lmv_free_memmd(lli->lli_default_lsm_md);
1624         lli->lli_default_lsm_md = md->default_lmv;
1625         lsm_md_dump(D_INODE, md->default_lmv);
1626         md->default_lmv = NULL;
1627         up_write(&lli->lli_lsm_sem);
1628         RETURN_EXIT;
1629 }
1630
1631 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1632 {
1633         struct ll_inode_info *lli = ll_i2info(inode);
1634         struct lmv_stripe_md *lsm = md->lmv;
1635         struct cl_attr  *attr;
1636         int rc = 0;
1637
1638         ENTRY;
1639
1640         LASSERT(S_ISDIR(inode->i_mode));
1641         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1642                PFID(ll_inode2fid(inode)));
1643
1644         /* update default LMV */
1645         if (md->default_lmv)
1646                 ll_update_default_lsm_md(inode, md);
1647
1648         /* after dir migration/restripe, a stripe may be turned into a
1649          * directory, in this case, zero out its lli_pfid.
1650          */
1651         if (unlikely(fid_is_norm(&lli->lli_pfid)))
1652                 fid_zero(&lli->lli_pfid);
1653
1654         /*
1655          * no striped information from request, lustre_md from req does not
1656          * include stripeEA, see ll_md_setattr()
1657          */
1658         if (!lsm)
1659                 RETURN(0);
1660
1661         /*
1662          * normally dir layout doesn't change, only take read lock to check
1663          * that to avoid blocking other MD operations.
1664          */
1665         down_read(&lli->lli_lsm_sem);
1666
1667         /* some current lookup initialized lsm, and unchanged */
1668         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1669                 GOTO(unlock, rc = 0);
1670
1671         /* if dir layout doesn't match, check whether version is increased,
1672          * which means layout is changed, this happens in dir split/merge and
1673          * lfsck.
1674          *
1675          * foreign LMV should not change.
1676          */
1677         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1678             lsm->lsm_md_layout_version <=
1679             lli->lli_lsm_md->lsm_md_layout_version) {
1680                 CERROR("%s: "DFID" dir layout mismatch:\n",
1681                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1682                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1683                 lsm_md_dump(D_ERROR, lsm);
1684                 GOTO(unlock, rc = -EINVAL);
1685         }
1686
1687         up_read(&lli->lli_lsm_sem);
1688         down_write(&lli->lli_lsm_sem);
1689         /* clear existing lsm */
1690         if (lli->lli_lsm_md) {
1691                 lmv_free_memmd(lli->lli_lsm_md);
1692                 lli->lli_lsm_md = NULL;
1693         }
1694
1695         rc = ll_init_lsm_md(inode, md);
1696         up_write(&lli->lli_lsm_sem);
1697
1698         if (rc)
1699                 RETURN(rc);
1700
1701         /* set md->lmv to NULL, so the following free lustre_md will not free
1702          * this lsm.
1703          */
1704         md->lmv = NULL;
1705
1706         /* md_merge_attr() may take long, since lsm is already set, switch to
1707          * read lock.
1708          */
1709         down_read(&lli->lli_lsm_sem);
1710
1711         if (!lmv_dir_striped(lli->lli_lsm_md))
1712                 GOTO(unlock, rc = 0);
1713
1714         OBD_ALLOC_PTR(attr);
1715         if (!attr)
1716                 GOTO(unlock, rc = -ENOMEM);
1717
1718         /* validate the lsm */
1719         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1720                            ll_md_blocking_ast);
1721         if (!rc) {
1722                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1723                         md->body->mbo_nlink = attr->cat_nlink;
1724                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1725                         md->body->mbo_size = attr->cat_size;
1726                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1727                         md->body->mbo_atime = attr->cat_atime;
1728                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1729                         md->body->mbo_ctime = attr->cat_ctime;
1730                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1731                         md->body->mbo_mtime = attr->cat_mtime;
1732         }
1733
1734         OBD_FREE_PTR(attr);
1735         GOTO(unlock, rc);
1736 unlock:
1737         up_read(&lli->lli_lsm_sem);
1738
1739         return rc;
1740 }
1741
1742 void ll_clear_inode(struct inode *inode)
1743 {
1744         struct ll_inode_info *lli = ll_i2info(inode);
1745         struct ll_sb_info *sbi = ll_i2sbi(inode);
1746
1747         ENTRY;
1748
1749         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1750                PFID(ll_inode2fid(inode)), inode);
1751
1752         if (S_ISDIR(inode->i_mode)) {
1753                 /* these should have been cleared in ll_file_release */
1754                 LASSERT(lli->lli_opendir_key == NULL);
1755                 LASSERT(lli->lli_sai == NULL);
1756                 LASSERT(lli->lli_opendir_pid == 0);
1757         } else {
1758                 pcc_inode_free(inode);
1759         }
1760
1761         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1762
1763         LASSERT(!lli->lli_open_fd_write_count);
1764         LASSERT(!lli->lli_open_fd_read_count);
1765         LASSERT(!lli->lli_open_fd_exec_count);
1766
1767         if (lli->lli_mds_write_och)
1768                 ll_md_real_close(inode, FMODE_WRITE);
1769         if (lli->lli_mds_exec_och)
1770                 ll_md_real_close(inode, FMODE_EXEC);
1771         if (lli->lli_mds_read_och)
1772                 ll_md_real_close(inode, FMODE_READ);
1773
1774         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1775                 OBD_FREE(lli->lli_symlink_name,
1776                          strlen(lli->lli_symlink_name) + 1);
1777                 lli->lli_symlink_name = NULL;
1778         }
1779
1780         ll_xattr_cache_destroy(inode);
1781
1782         forget_all_cached_acls(inode);
1783         lli_clear_acl(lli);
1784         lli->lli_inode_magic = LLI_INODE_DEAD;
1785
1786         if (S_ISDIR(inode->i_mode))
1787                 ll_dir_clear_lsm_md(inode);
1788         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1789                 LASSERT(list_empty(&lli->lli_agl_list));
1790
1791         /*
1792          * XXX This has to be done before lsm is freed below, because
1793          * cl_object still uses inode lsm.
1794          */
1795         cl_inode_fini(inode);
1796
1797         llcrypt_put_encryption_info(inode);
1798
1799         EXIT;
1800 }
1801
1802 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1803 {
1804         struct lustre_md md;
1805         struct inode *inode = dentry->d_inode;
1806         struct ll_sb_info *sbi = ll_i2sbi(inode);
1807         struct ptlrpc_request *request = NULL;
1808         int rc, ia_valid;
1809
1810         ENTRY;
1811
1812         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1813                                      LUSTRE_OPC_ANY, NULL);
1814         if (IS_ERR(op_data))
1815                 RETURN(PTR_ERR(op_data));
1816
1817         /* If this is a chgrp of a regular file, we want to reserve enough
1818          * quota to cover the entire file size.
1819          */
1820         if (S_ISREG(inode->i_mode) && op_data->op_attr.ia_valid & ATTR_GID &&
1821             from_kgid(&init_user_ns, op_data->op_attr.ia_gid) !=
1822             from_kgid(&init_user_ns, inode->i_gid)) {
1823                 op_data->op_xvalid |= OP_XVALID_BLOCKS;
1824                 op_data->op_attr_blocks = inode->i_blocks;
1825         }
1826
1827
1828         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1829         if (rc) {
1830                 ptlrpc_req_finished(request);
1831                 if (rc == -ENOENT) {
1832                         clear_nlink(inode);
1833                         /* Unlinked special device node? Or just a race?
1834                          * Pretend we done everything. */
1835                         if (!S_ISREG(inode->i_mode) &&
1836                             !S_ISDIR(inode->i_mode)) {
1837                                 ia_valid = op_data->op_attr.ia_valid;
1838                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1839                                 rc = simple_setattr(dentry, &op_data->op_attr);
1840                                 op_data->op_attr.ia_valid = ia_valid;
1841                         }
1842                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1843                         CERROR("md_setattr fails: rc = %d\n", rc);
1844                 }
1845                 RETURN(rc);
1846         }
1847
1848         rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp,
1849                               sbi->ll_md_exp, &md);
1850         if (rc) {
1851                 ptlrpc_req_finished(request);
1852                 RETURN(rc);
1853         }
1854
1855         ia_valid = op_data->op_attr.ia_valid;
1856         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1857          * cache is not cleared yet. */
1858         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1859         if (S_ISREG(inode->i_mode))
1860                 inode_lock(inode);
1861         rc = simple_setattr(dentry, &op_data->op_attr);
1862         if (S_ISREG(inode->i_mode))
1863                 inode_unlock(inode);
1864         op_data->op_attr.ia_valid = ia_valid;
1865
1866         rc = ll_update_inode(inode, &md);
1867         ptlrpc_req_finished(request);
1868
1869         RETURN(rc);
1870 }
1871
1872 /**
1873  * Zero portion of page that is part of @inode.
1874  * This implies, if necessary:
1875  * - taking cl_lock on range corresponding to concerned page
1876  * - grabbing vm page
1877  * - associating cl_page
1878  * - proceeding to clio read
1879  * - zeroing range in page
1880  * - proceeding to cl_page flush
1881  * - releasing cl_lock
1882  *
1883  * \param[in] inode     inode
1884  * \param[in] index     page index
1885  * \param[in] offset    offset in page to start zero from
1886  * \param[in] len       len to zero
1887  *
1888  * \retval 0            on success
1889  * \retval negative     errno on failure
1890  */
1891 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1892                     unsigned len)
1893 {
1894         struct ll_inode_info *lli = ll_i2info(inode);
1895         struct cl_object *clob = lli->lli_clob;
1896         __u16 refcheck;
1897         struct lu_env *env = NULL;
1898         struct cl_io *io = NULL;
1899         struct cl_page *clpage = NULL;
1900         struct page *vmpage = NULL;
1901         unsigned from = index << PAGE_SHIFT;
1902         struct cl_lock *lock = NULL;
1903         struct cl_lock_descr *descr = NULL;
1904         struct cl_2queue *queue = NULL;
1905         struct cl_sync_io *anchor = NULL;
1906         bool holdinglock = false;
1907         bool lockedbymyself = true;
1908         int rc;
1909
1910         ENTRY;
1911
1912         env = cl_env_get(&refcheck);
1913         if (IS_ERR(env))
1914                 RETURN(PTR_ERR(env));
1915
1916         io = vvp_env_thread_io(env);
1917         io->ci_obj = clob;
1918         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1919         if (rc)
1920                 GOTO(putenv, rc);
1921
1922         lock = vvp_env_lock(env);
1923         descr = &lock->cll_descr;
1924         descr->cld_obj   = io->ci_obj;
1925         descr->cld_start = cl_index(io->ci_obj, from);
1926         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1927         descr->cld_mode  = CLM_WRITE;
1928         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1929
1930         /* request lock for page */
1931         rc = cl_lock_request(env, io, lock);
1932         /* -ECANCELED indicates a matching lock with a different extent
1933          * was already present, and -EEXIST indicates a matching lock
1934          * on exactly the same extent was already present.
1935          * In both cases it means we are covered.
1936          */
1937         if (rc == -ECANCELED || rc == -EEXIST)
1938                 rc = 0;
1939         else if (rc < 0)
1940                 GOTO(iofini, rc);
1941         else
1942                 holdinglock = true;
1943
1944         /* grab page */
1945         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1946         if (vmpage == NULL)
1947                 GOTO(rellock, rc = -EOPNOTSUPP);
1948
1949         if (!PageDirty(vmpage)) {
1950                 /* associate cl_page */
1951                 clpage = cl_page_find(env, clob, vmpage->index,
1952                                       vmpage, CPT_CACHEABLE);
1953                 if (IS_ERR(clpage))
1954                         GOTO(pagefini, rc = PTR_ERR(clpage));
1955
1956                 cl_page_assume(env, io, clpage);
1957         }
1958
1959         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1960             !PageWriteback(vmpage)) {
1961                 /* read page */
1962                 /* set PagePrivate2 to detect special case of empty page
1963                  * in osc_brw_fini_request()
1964                  */
1965                 SetPagePrivate2(vmpage);
1966                 rc = ll_io_read_page(env, io, clpage, NULL);
1967                 if (!PagePrivate2(vmpage))
1968                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1969                          * meaning we read an empty page. In this case, in order
1970                          * to avoid allocating unnecessary block in truncated
1971                          * file, we must not zero and write as below. Subsequent
1972                          * server-side truncate will handle things correctly.
1973                          */
1974                         GOTO(clpfini, rc = 0);
1975                 ClearPagePrivate2(vmpage);
1976                 if (rc)
1977                         GOTO(clpfini, rc);
1978                 lockedbymyself = trylock_page(vmpage);
1979                 cl_page_assume(env, io, clpage);
1980         }
1981
1982         /* zero range in page */
1983         zero_user(vmpage, offset, len);
1984
1985         if (holdinglock && clpage) {
1986                 /* explicitly write newly modified page */
1987                 queue = &io->ci_queue;
1988                 cl_2queue_init(queue);
1989                 anchor = &vvp_env_info(env)->vti_anchor;
1990                 cl_sync_io_init(anchor, 1);
1991                 clpage->cp_sync_io = anchor;
1992                 cl_2queue_add(queue, clpage, true);
1993                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1994                 if (rc)
1995                         GOTO(queuefini1, rc);
1996                 rc = cl_sync_io_wait(env, anchor, 0);
1997                 if (rc)
1998                         GOTO(queuefini2, rc);
1999                 cl_page_assume(env, io, clpage);
2000
2001 queuefini2:
2002                 cl_2queue_discard(env, io, queue);
2003 queuefini1:
2004                 cl_2queue_disown(env, io, queue);
2005                 cl_2queue_fini(env, queue);
2006         }
2007
2008 clpfini:
2009         if (clpage)
2010                 cl_page_put(env, clpage);
2011 pagefini:
2012         if (lockedbymyself) {
2013                 unlock_page(vmpage);
2014                 put_page(vmpage);
2015         }
2016 rellock:
2017         if (holdinglock)
2018                 cl_lock_release(env, lock);
2019 iofini:
2020         cl_io_fini(env, io);
2021 putenv:
2022         if (env)
2023                 cl_env_put(env, &refcheck);
2024
2025         RETURN(rc);
2026 }
2027
2028 /* If this inode has objects allocated to it (lsm != NULL), then the OST
2029  * object(s) determine the file size and mtime.  Otherwise, the MDS will
2030  * keep these values until such a time that objects are allocated for it.
2031  * We do the MDS operations first, as it is checking permissions for us.
2032  * We don't to the MDS RPC if there is nothing that we want to store there,
2033  * otherwise there is no harm in updating mtime/atime on the MDS if we are
2034  * going to do an RPC anyways.
2035  *
2036  * If we are doing a truncate, we will send the mtime and ctime updates
2037  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2038  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2039  * at the same time.
2040  *
2041  * In case of HSMimport, we only set attr on MDS.
2042  */
2043 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2044                    enum op_xvalid xvalid, bool hsm_import)
2045 {
2046         struct inode *inode = dentry->d_inode;
2047         struct ll_inode_info *lli = ll_i2info(inode);
2048         struct md_op_data *op_data = NULL;
2049         ktime_t kstart = ktime_get();
2050         int rc = 0;
2051
2052         ENTRY;
2053
2054         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2055                "valid %x, hsm_import %d\n",
2056                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2057                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2058                hsm_import);
2059
2060         if (attr->ia_valid & ATTR_SIZE) {
2061                 /* Check new size against VFS/VM file size limit and rlimit */
2062                 rc = inode_newsize_ok(inode, attr->ia_size);
2063                 if (rc)
2064                         RETURN(rc);
2065
2066                 /* The maximum Lustre file size is variable, based on the
2067                  * OST maximum object size and number of stripes.  This
2068                  * needs another check in addition to the VFS check above. */
2069                 if (attr->ia_size > ll_file_maxbytes(inode)) {
2070                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2071                                PFID(&lli->lli_fid), attr->ia_size,
2072                                ll_file_maxbytes(inode));
2073                         RETURN(-EFBIG);
2074                 }
2075
2076                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2077         }
2078
2079         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2080         if (attr->ia_valid & TIMES_SET_FLAGS) {
2081                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2082                     !capable(CAP_FOWNER))
2083                         RETURN(-EPERM);
2084         }
2085
2086         /* We mark all of the fields "set" so MDS/OST does not re-set them */
2087         if (!(xvalid & OP_XVALID_CTIME_SET) &&
2088              (attr->ia_valid & ATTR_CTIME)) {
2089                 attr->ia_ctime = current_time(inode);
2090                 xvalid |= OP_XVALID_CTIME_SET;
2091         }
2092         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2093             (attr->ia_valid & ATTR_ATIME)) {
2094                 attr->ia_atime = current_time(inode);
2095                 attr->ia_valid |= ATTR_ATIME_SET;
2096         }
2097         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2098             (attr->ia_valid & ATTR_MTIME)) {
2099                 attr->ia_mtime = current_time(inode);
2100                 attr->ia_valid |= ATTR_MTIME_SET;
2101         }
2102
2103         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2104                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2105                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2106                        ktime_get_real_seconds());
2107
2108         if (S_ISREG(inode->i_mode))
2109                 inode_unlock(inode);
2110
2111         /* We always do an MDS RPC, even if we're only changing the size;
2112          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2113
2114         OBD_ALLOC_PTR(op_data);
2115         if (op_data == NULL)
2116                 GOTO(out, rc = -ENOMEM);
2117
2118         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2119                 /* If we are changing file size, file content is
2120                  * modified, flag it.
2121                  */
2122                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2123                 op_data->op_bias |= MDS_DATA_MODIFIED;
2124                 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
2125         }
2126
2127         if (attr->ia_valid & ATTR_FILE) {
2128                 struct ll_file_data *fd = attr->ia_file->private_data;
2129
2130                 if (fd->fd_lease_och)
2131                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2132         }
2133
2134         op_data->op_attr = *attr;
2135         op_data->op_xvalid = xvalid;
2136
2137         rc = ll_md_setattr(dentry, op_data);
2138         if (rc)
2139                 GOTO(out, rc);
2140
2141         if (!S_ISREG(inode->i_mode) || hsm_import)
2142                 GOTO(out, rc = 0);
2143
2144         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2145                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2146             xvalid & OP_XVALID_CTIME_SET) {
2147                 bool cached = false;
2148
2149                 rc = pcc_inode_setattr(inode, attr, &cached);
2150                 if (cached) {
2151                         if (rc) {
2152                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2153                                        "rc = %d\n",
2154                                        ll_i2sbi(inode)->ll_fsname,
2155                                        PFID(&lli->lli_fid), rc);
2156                                 GOTO(out, rc);
2157                         }
2158                 } else {
2159                         unsigned int flags = 0;
2160
2161                         /* For truncate and utimes sending attributes to OSTs,
2162                          * setting mtime/atime to the past will be performed
2163                          * under PW [0:EOF] extent lock (new_size:EOF for
2164                          * truncate). It may seem excessive to send mtime/atime
2165                          * updates to OSTs when not setting times to past, but
2166                          * it is necessary due to possible time
2167                          * de-synchronization between MDT inode and OST objects
2168                          */
2169                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
2170                                 xvalid |= OP_XVALID_FLAGS;
2171                                 flags = LUSTRE_ENCRYPT_FL;
2172                                 /* Call to ll_io_zero_page is not necessary if
2173                                  * truncating on PAGE_SIZE boundary, because
2174                                  * whole pages will be wiped.
2175                                  * In case of Direct IO, all we need is to set
2176                                  * new size.
2177                                  */
2178                                 if (attr->ia_valid & ATTR_SIZE &&
2179                                     attr->ia_size & ~PAGE_MASK &&
2180                                     !(attr->ia_valid & ATTR_FILE &&
2181                                       attr->ia_file->f_flags & O_DIRECT)) {
2182                                         pgoff_t offset =
2183                                                 attr->ia_size & (PAGE_SIZE - 1);
2184
2185                                         rc = ll_io_zero_page(inode,
2186                                                     attr->ia_size >> PAGE_SHIFT,
2187                                                     offset, PAGE_SIZE - offset);
2188                                         if (rc)
2189                                                 GOTO(out, rc);
2190                                 }
2191                         }
2192                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2193                 }
2194         }
2195
2196         /* If the file was restored, it needs to set dirty flag.
2197          *
2198          * We've already sent MDS_DATA_MODIFIED flag in
2199          * ll_md_setattr() for truncate. However, the MDT refuses to
2200          * set the HS_DIRTY flag on released files, so we have to set
2201          * it again if the file has been restored. Please check how
2202          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2203          *
2204          * Please notice that if the file is not released, the previous
2205          * MDS_DATA_MODIFIED has taken effect and usually
2206          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2207          * This way we can save an RPC for common open + trunc
2208          * operation. */
2209         if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
2210                 struct hsm_state_set hss = {
2211                         .hss_valid = HSS_SETMASK,
2212                         .hss_setmask = HS_DIRTY,
2213                 };
2214                 int rc2;
2215
2216                 rc2 = ll_hsm_state_set(inode, &hss);
2217                 /* truncate and write can happen at the same time, so that
2218                  * the file can be set modified even though the file is not
2219                  * restored from released state, and ll_hsm_state_set() is
2220                  * not applicable for the file, and rc2 < 0 is normal in this
2221                  * case. */
2222                 if (rc2 < 0)
2223                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2224                                PFID(ll_inode2fid(inode)), rc2);
2225         }
2226
2227         EXIT;
2228 out:
2229         if (op_data != NULL)
2230                 ll_finish_md_op_data(op_data);
2231
2232         if (S_ISREG(inode->i_mode)) {
2233                 inode_lock(inode);
2234                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2235                         inode_dio_wait(inode);
2236                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2237                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2238                  * inode flags, so there is a gap where S_NOSEC is not set.
2239                  * This can cause a writer to take the i_mutex unnecessarily,
2240                  * but this is safe to do and should be rare. */
2241                 inode_has_no_xattr(inode);
2242         }
2243
2244         if (!rc)
2245                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2246                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2247                                    ktime_us_delta(ktime_get(), kstart));
2248
2249         return rc;
2250 }
2251
2252 int ll_setattr(struct dentry *de, struct iattr *attr)
2253 {
2254         int mode = de->d_inode->i_mode;
2255         enum op_xvalid xvalid = 0;
2256         int rc;
2257
2258         rc = llcrypt_prepare_setattr(de, attr);
2259         if (rc)
2260                 return rc;
2261
2262         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2263                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2264                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2265
2266         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2267                                (ATTR_SIZE|ATTR_MODE)) &&
2268             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2269              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2270               !(attr->ia_mode & S_ISGID))))
2271                 attr->ia_valid |= ATTR_FORCE;
2272
2273         if ((attr->ia_valid & ATTR_MODE) &&
2274             (mode & S_ISUID) &&
2275             !(attr->ia_mode & S_ISUID) &&
2276             !(attr->ia_valid & ATTR_KILL_SUID))
2277                 attr->ia_valid |= ATTR_KILL_SUID;
2278
2279         if ((attr->ia_valid & ATTR_MODE) &&
2280             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2281             !(attr->ia_mode & S_ISGID) &&
2282             !(attr->ia_valid & ATTR_KILL_SGID))
2283                 attr->ia_valid |= ATTR_KILL_SGID;
2284
2285         return ll_setattr_raw(de, attr, xvalid, false);
2286 }
2287
2288 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2289                        u32 flags)
2290 {
2291         struct obd_statfs obd_osfs = { 0 };
2292         time64_t max_age;
2293         int rc;
2294
2295         ENTRY;
2296         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2297
2298         if (test_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags))
2299                 flags |= OBD_STATFS_NODELAY;
2300
2301         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2302         if (rc)
2303                 RETURN(rc);
2304
2305         osfs->os_type = LL_SUPER_MAGIC;
2306
2307         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2308               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2309
2310         if (osfs->os_state & OS_STATFS_SUM)
2311                 GOTO(out, rc);
2312
2313         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2314         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2315                 GOTO(out, rc = 0);
2316
2317         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2318                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2319                obd_osfs.os_files);
2320
2321         osfs->os_bsize = obd_osfs.os_bsize;
2322         osfs->os_blocks = obd_osfs.os_blocks;
2323         osfs->os_bfree = obd_osfs.os_bfree;
2324         osfs->os_bavail = obd_osfs.os_bavail;
2325
2326         /* If we have _some_ OSTs, but don't have as many free objects on the
2327          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2328          * to compensate, so that the "inodes in use" number is correct.
2329          * This should be kept in sync with lod_statfs() behaviour.
2330          */
2331         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2332                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2333                                  obd_osfs.os_ffree;
2334                 osfs->os_ffree = obd_osfs.os_ffree;
2335         }
2336
2337 out:
2338         RETURN(rc);
2339 }
2340
2341 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2342 {
2343         struct if_quotactl qctl = {
2344                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2345                 .qc_type = PRJQUOTA,
2346                 .qc_valid = QC_GENERAL,
2347         };
2348         u64 limit, curblock;
2349         int ret;
2350
2351         qctl.qc_id = ll_i2info(inode)->lli_projid;
2352         ret = quotactl_ioctl(inode->i_sb, &qctl);
2353         if (ret) {
2354                 /* ignore errors if project ID does not have
2355                  * a quota limit or feature unsupported.
2356                  */
2357                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2358                         ret = 0;
2359                 return ret;
2360         }
2361
2362         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2363                  qctl.qc_dqblk.dqb_bsoftlimit :
2364                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2365         if (limit && sfs->f_blocks > limit) {
2366                 curblock = (qctl.qc_dqblk.dqb_curspace +
2367                                 sfs->f_bsize - 1) / sfs->f_bsize;
2368                 sfs->f_blocks = limit;
2369                 sfs->f_bfree = sfs->f_bavail =
2370                         (sfs->f_blocks > curblock) ?
2371                         (sfs->f_blocks - curblock) : 0;
2372         }
2373
2374         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2375                 qctl.qc_dqblk.dqb_isoftlimit :
2376                 qctl.qc_dqblk.dqb_ihardlimit;
2377         if (limit && sfs->f_files > limit) {
2378                 sfs->f_files = limit;
2379                 sfs->f_ffree = (sfs->f_files >
2380                         qctl.qc_dqblk.dqb_curinodes) ?
2381                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2382         }
2383
2384         return 0;
2385 }
2386
2387 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2388 {
2389         struct super_block *sb = de->d_sb;
2390         struct obd_statfs osfs;
2391         __u64 fsid = huge_encode_dev(sb->s_dev);
2392         ktime_t kstart = ktime_get();
2393         int rc;
2394
2395         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2396
2397         /* Some amount of caching on the client is allowed */
2398         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2399         if (rc)
2400                 return rc;
2401
2402         statfs_unpack(sfs, &osfs);
2403
2404         /* We need to downshift for all 32-bit kernels, because we can't
2405          * tell if the kernel is being called via sys_statfs64() or not.
2406          * Stop before overflowing f_bsize - in which case it is better
2407          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2408         if (sizeof(long) < 8) {
2409                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2410                         sfs->f_bsize <<= 1;
2411
2412                         osfs.os_blocks >>= 1;
2413                         osfs.os_bfree >>= 1;
2414                         osfs.os_bavail >>= 1;
2415                 }
2416         }
2417
2418         sfs->f_blocks = osfs.os_blocks;
2419         sfs->f_bfree = osfs.os_bfree;
2420         sfs->f_bavail = osfs.os_bavail;
2421         sfs->f_fsid.val[0] = (__u32)fsid;
2422         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2423         if (ll_i2info(de->d_inode)->lli_projid)
2424                 return ll_statfs_project(de->d_inode, sfs);
2425
2426         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2427                            ktime_us_delta(ktime_get(), kstart));
2428
2429         return 0;
2430 }
2431
2432 void ll_inode_size_lock(struct inode *inode)
2433 {
2434         struct ll_inode_info *lli;
2435
2436         LASSERT(!S_ISDIR(inode->i_mode));
2437
2438         lli = ll_i2info(inode);
2439         mutex_lock(&lli->lli_size_mutex);
2440 }
2441
2442 void ll_inode_size_unlock(struct inode *inode)
2443 {
2444         struct ll_inode_info *lli;
2445
2446         lli = ll_i2info(inode);
2447         mutex_unlock(&lli->lli_size_mutex);
2448 }
2449
2450 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
2451 {
2452         /* do not clear encryption flag */
2453         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2454         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2455         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2456                 set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2457         else
2458                 clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2459 }
2460
2461 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2462 {
2463         struct ll_inode_info *lli = ll_i2info(inode);
2464         struct mdt_body *body = md->body;
2465         struct ll_sb_info *sbi = ll_i2sbi(inode);
2466         bool api32;
2467         int rc = 0;
2468
2469         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2470                 rc = cl_file_inode_init(inode, md);
2471                 if (rc)
2472                         return rc;
2473         }
2474
2475         if (S_ISDIR(inode->i_mode)) {
2476                 rc = ll_update_lsm_md(inode, md);
2477                 if (rc != 0)
2478                         return rc;
2479         }
2480
2481         if (body->mbo_valid & OBD_MD_FLACL)
2482                 lli_replace_acl(lli, md);
2483
2484         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
2485         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1, api32);
2486         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2487
2488         if (body->mbo_valid & OBD_MD_FLATIME) {
2489                 if (body->mbo_atime > inode->i_atime.tv_sec)
2490                         inode->i_atime.tv_sec = body->mbo_atime;
2491                 lli->lli_atime = body->mbo_atime;
2492         }
2493
2494         if (body->mbo_valid & OBD_MD_FLMTIME) {
2495                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2496                         CDEBUG(D_INODE,
2497                                "setting ino %lu mtime from %lld to %llu\n",
2498                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2499                                body->mbo_mtime);
2500                         inode->i_mtime.tv_sec = body->mbo_mtime;
2501                 }
2502                 lli->lli_mtime = body->mbo_mtime;
2503         }
2504
2505         if (body->mbo_valid & OBD_MD_FLCTIME) {
2506                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2507                         inode->i_ctime.tv_sec = body->mbo_ctime;
2508                 lli->lli_ctime = body->mbo_ctime;
2509         }
2510
2511         if (body->mbo_valid & OBD_MD_FLBTIME)
2512                 lli->lli_btime = body->mbo_btime;
2513
2514         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2515         if (body->mbo_valid & OBD_MD_FLFLAGS)
2516                 ll_update_inode_flags(inode, body->mbo_flags);
2517         if (body->mbo_valid & OBD_MD_FLMODE)
2518                 inode->i_mode = (inode->i_mode & S_IFMT) |
2519                                 (body->mbo_mode & ~S_IFMT);
2520
2521         if (body->mbo_valid & OBD_MD_FLTYPE)
2522                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2523                                 (body->mbo_mode & S_IFMT);
2524
2525         LASSERT(inode->i_mode != 0);
2526         if (body->mbo_valid & OBD_MD_FLUID)
2527                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2528         if (body->mbo_valid & OBD_MD_FLGID)
2529                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2530         if (body->mbo_valid & OBD_MD_FLPROJID)
2531                 lli->lli_projid = body->mbo_projid;
2532         if (body->mbo_valid & OBD_MD_FLNLINK) {
2533                 spin_lock(&inode->i_lock);
2534                 set_nlink(inode, body->mbo_nlink);
2535                 spin_unlock(&inode->i_lock);
2536         }
2537         if (body->mbo_valid & OBD_MD_FLRDEV)
2538                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2539
2540         if (body->mbo_valid & OBD_MD_FLID) {
2541                 /* FID shouldn't be changed! */
2542                 if (fid_is_sane(&lli->lli_fid)) {
2543                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2544                                  "Trying to change FID "DFID
2545                                  " to the "DFID", inode "DFID"(%p)\n",
2546                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2547                                  PFID(ll_inode2fid(inode)), inode);
2548                 } else {
2549                         lli->lli_fid = body->mbo_fid1;
2550                 }
2551         }
2552
2553         LASSERT(fid_seq(&lli->lli_fid) != 0);
2554
2555         lli->lli_attr_valid = body->mbo_valid;
2556         if (body->mbo_valid & OBD_MD_FLSIZE) {
2557                 i_size_write(inode, body->mbo_size);
2558
2559                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2560                        PFID(ll_inode2fid(inode)),
2561                        (unsigned long long)body->mbo_size);
2562
2563                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2564                         inode->i_blocks = body->mbo_blocks;
2565         } else {
2566                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2567                         lli->lli_lazysize = body->mbo_size;
2568                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2569                         lli->lli_lazyblocks = body->mbo_blocks;
2570         }
2571
2572         if (body->mbo_valid & OBD_MD_TSTATE) {
2573                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2574                  * clear it when done to ensure to start again
2575                  * glimpsing updated attrs
2576                  */
2577                 if (body->mbo_t_state & MS_RESTORE)
2578                         set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2579                 else
2580                         clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2581         }
2582
2583         return 0;
2584 }
2585
2586 /* update directory depth to ROOT, called after LOOKUP lock is fetched. */
2587 void ll_update_dir_depth(struct inode *dir, struct inode *inode)
2588 {
2589         struct ll_inode_info *lli;
2590
2591         if (!S_ISDIR(inode->i_mode))
2592                 return;
2593
2594         if (inode == dir)
2595                 return;
2596
2597         lli = ll_i2info(inode);
2598         lli->lli_depth = ll_i2info(dir)->lli_depth + 1;
2599         CDEBUG(D_INODE, DFID" depth %hu\n", PFID(&lli->lli_fid), lli->lli_depth);
2600 }
2601
2602 void ll_truncate_inode_pages_final(struct inode *inode)
2603 {
2604         struct address_space *mapping = &inode->i_data;
2605         unsigned long nrpages;
2606         unsigned long flags;
2607
2608         truncate_inode_pages_final(mapping);
2609
2610         /* Workaround for LU-118: Note nrpages may not be totally updated when
2611          * truncate_inode_pages() returns, as there can be a page in the process
2612          * of deletion (inside __delete_from_page_cache()) in the specified
2613          * range. Thus mapping->nrpages can be non-zero when this function
2614          * returns even after truncation of the whole mapping.  Only do this if
2615          * npages isn't already zero.
2616          */
2617         nrpages = mapping->nrpages;
2618         if (nrpages) {
2619                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2620                 nrpages = mapping->nrpages;
2621                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2622         } /* Workaround end */
2623
2624         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2625                  "see https://jira.whamcloud.com/browse/LU-118\n",
2626                  ll_i2sbi(inode)->ll_fsname,
2627                  PFID(ll_inode2fid(inode)), inode, nrpages);
2628 }
2629
2630 int ll_read_inode2(struct inode *inode, void *opaque)
2631 {
2632         struct lustre_md *md = opaque;
2633         struct ll_inode_info *lli = ll_i2info(inode);
2634         int     rc;
2635         ENTRY;
2636
2637         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2638                PFID(&lli->lli_fid), inode);
2639
2640         /* Core attributes from the MDS first.  This is a new inode, and
2641          * the VFS doesn't zero times in the core inode so we have to do
2642          * it ourselves.  They will be overwritten by either MDS or OST
2643          * attributes - we just need to make sure they aren't newer.
2644          */
2645         inode->i_mtime.tv_sec = 0;
2646         inode->i_atime.tv_sec = 0;
2647         inode->i_ctime.tv_sec = 0;
2648         inode->i_rdev = 0;
2649         rc = ll_update_inode(inode, md);
2650         if (rc != 0)
2651                 RETURN(rc);
2652
2653         /* OIDEBUG(inode); */
2654
2655 #ifdef HAVE_BACKING_DEV_INFO
2656         /* initializing backing dev info. */
2657         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2658 #endif
2659         if (S_ISREG(inode->i_mode)) {
2660                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2661                 inode->i_op = &ll_file_inode_operations;
2662                 inode->i_fop = sbi->ll_fop;
2663                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2664                 EXIT;
2665         } else if (S_ISDIR(inode->i_mode)) {
2666                 inode->i_op = &ll_dir_inode_operations;
2667                 inode->i_fop = &ll_dir_operations;
2668                 EXIT;
2669         } else if (S_ISLNK(inode->i_mode)) {
2670                 inode->i_op = &ll_fast_symlink_inode_operations;
2671                 EXIT;
2672         } else {
2673                 inode->i_op = &ll_special_inode_operations;
2674
2675                 init_special_inode(inode, inode->i_mode,
2676                                    inode->i_rdev);
2677
2678                 EXIT;
2679         }
2680
2681         return 0;
2682 }
2683
2684 void ll_delete_inode(struct inode *inode)
2685 {
2686         struct ll_inode_info *lli = ll_i2info(inode);
2687         ENTRY;
2688
2689         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2690                 /* It is last chance to write out dirty pages,
2691                  * otherwise we may lose data while umount.
2692                  *
2693                  * If i_nlink is 0 then just discard data. This is safe because
2694                  * local inode gets i_nlink 0 from server only for the last
2695                  * unlink, so that file is not opened somewhere else
2696                  */
2697                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2698                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2699         }
2700
2701         ll_truncate_inode_pages_final(inode);
2702         ll_clear_inode(inode);
2703         clear_inode(inode);
2704
2705         EXIT;
2706 }
2707
2708 int ll_iocontrol(struct inode *inode, struct file *file,
2709                  unsigned int cmd, unsigned long arg)
2710 {
2711         struct ll_sb_info *sbi = ll_i2sbi(inode);
2712         struct ptlrpc_request *req = NULL;
2713         int rc, flags = 0;
2714         ENTRY;
2715
2716         switch (cmd) {
2717         case FS_IOC_GETFLAGS: {
2718                 struct mdt_body *body;
2719                 struct md_op_data *op_data;
2720
2721                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2722                                              0, 0, LUSTRE_OPC_ANY,
2723                                              NULL);
2724                 if (IS_ERR(op_data))
2725                         RETURN(PTR_ERR(op_data));
2726
2727                 op_data->op_valid = OBD_MD_FLFLAGS;
2728                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2729                 ll_finish_md_op_data(op_data);
2730                 if (rc) {
2731                         CERROR("%s: failure inode "DFID": rc = %d\n",
2732                                sbi->ll_md_exp->exp_obd->obd_name,
2733                                PFID(ll_inode2fid(inode)), rc);
2734                         RETURN(-abs(rc));
2735                 }
2736
2737                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2738
2739                 flags = body->mbo_flags;
2740
2741                 ptlrpc_req_finished(req);
2742
2743                 RETURN(put_user(flags, (int __user *)arg));
2744         }
2745         case FS_IOC_SETFLAGS: {
2746                 struct iattr *attr;
2747                 struct md_op_data *op_data;
2748                 struct cl_object *obj;
2749                 struct fsxattr fa = { 0 };
2750
2751                 if (get_user(flags, (int __user *)arg))
2752                         RETURN(-EFAULT);
2753
2754                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2755                 if (flags & LUSTRE_PROJINHERIT_FL)
2756                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2757
2758                 rc = ll_ioctl_check_project(inode, fa.fsx_xflags,
2759                                             fa.fsx_projid);
2760                 if (rc)
2761                         RETURN(rc);
2762
2763                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2764                                              LUSTRE_OPC_ANY, NULL);
2765                 if (IS_ERR(op_data))
2766                         RETURN(PTR_ERR(op_data));
2767
2768                 op_data->op_attr_flags = flags;
2769                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2770                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2771                 ll_finish_md_op_data(op_data);
2772                 ptlrpc_req_finished(req);
2773                 if (rc)
2774                         RETURN(rc);
2775
2776                 ll_update_inode_flags(inode, flags);
2777
2778                 obj = ll_i2info(inode)->lli_clob;
2779                 if (obj == NULL)
2780                         RETURN(0);
2781
2782                 OBD_ALLOC_PTR(attr);
2783                 if (attr == NULL)
2784                         RETURN(-ENOMEM);
2785
2786                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2787
2788                 OBD_FREE_PTR(attr);
2789                 RETURN(rc);
2790         }
2791         default:
2792                 RETURN(-ENOSYS);
2793         }
2794
2795         RETURN(0);
2796 }
2797
2798 int ll_flush_ctx(struct inode *inode)
2799 {
2800         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2801
2802         CDEBUG(D_SEC, "flush context for user %d\n",
2803                from_kuid(&init_user_ns, current_uid()));
2804
2805         obd_set_info_async(NULL, sbi->ll_md_exp,
2806                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2807                            0, NULL, NULL);
2808         obd_set_info_async(NULL, sbi->ll_dt_exp,
2809                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2810                            0, NULL, NULL);
2811         return 0;
2812 }
2813
2814 /* umount -f client means force down, don't save state */
2815 void ll_umount_begin(struct super_block *sb)
2816 {
2817         struct ll_sb_info *sbi = ll_s2sbi(sb);
2818         struct obd_device *obd;
2819         struct obd_ioctl_data *ioc_data;
2820         int cnt;
2821         ENTRY;
2822
2823         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2824                sb->s_count, atomic_read(&sb->s_active));
2825
2826         obd = class_exp2obd(sbi->ll_md_exp);
2827         if (obd == NULL) {
2828                 CERROR("Invalid MDC connection handle %#llx\n",
2829                        sbi->ll_md_exp->exp_handle.h_cookie);
2830                 EXIT;
2831                 return;
2832         }
2833         obd->obd_force = 1;
2834
2835         obd = class_exp2obd(sbi->ll_dt_exp);
2836         if (obd == NULL) {
2837                 CERROR("Invalid LOV connection handle %#llx\n",
2838                        sbi->ll_dt_exp->exp_handle.h_cookie);
2839                 EXIT;
2840                 return;
2841         }
2842         obd->obd_force = 1;
2843
2844         OBD_ALLOC_PTR(ioc_data);
2845         if (ioc_data) {
2846                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2847                               sizeof *ioc_data, ioc_data, NULL);
2848
2849                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2850                               sizeof *ioc_data, ioc_data, NULL);
2851
2852                 OBD_FREE_PTR(ioc_data);
2853         }
2854
2855         /* Really, we'd like to wait until there are no requests outstanding,
2856          * and then continue.  For now, we just periodically checking for vfs
2857          * to decrement mnt_cnt and hope to finish it within 10sec.
2858          */
2859         cnt = 10;
2860         while (cnt > 0 &&
2861                !may_umount(sbi->ll_mnt.mnt)) {
2862                 ssleep(1);
2863                 cnt -= 1;
2864         }
2865
2866         EXIT;
2867 }
2868
2869 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2870 {
2871         struct ll_sb_info *sbi = ll_s2sbi(sb);
2872         char *profilenm = get_profile_name(sb);
2873         int err;
2874         __u32 read_only;
2875
2876         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2877                 read_only = *flags & MS_RDONLY;
2878                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2879                                          sizeof(KEY_READ_ONLY),
2880                                          KEY_READ_ONLY, sizeof(read_only),
2881                                          &read_only, NULL);
2882                 if (err) {
2883                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2884                                       profilenm, read_only ?
2885                                       "read-only" : "read-write", err);
2886                         return err;
2887                 }
2888
2889                 if (read_only)
2890                         sb->s_flags |= SB_RDONLY;
2891                 else
2892                         sb->s_flags &= ~SB_RDONLY;
2893
2894                 if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
2895                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2896                                       read_only ?  "read-only" : "read-write");
2897         }
2898         return 0;
2899 }
2900
2901 /**
2902  * Cleanup the open handle that is cached on MDT-side.
2903  *
2904  * For open case, the client side open handling thread may hit error
2905  * after the MDT grant the open. Under such case, the client should
2906  * send close RPC to the MDT as cleanup; otherwise, the open handle
2907  * on the MDT will be leaked there until the client umount or evicted.
2908  *
2909  * In further, if someone unlinked the file, because the open handle
2910  * holds the reference on such file/object, then it will block the
2911  * subsequent threads that want to locate such object via FID.
2912  *
2913  * \param[in] sb        super block for this file-system
2914  * \param[in] open_req  pointer to the original open request
2915  */
2916 void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill)
2917 {
2918         struct mdt_body                 *body;
2919         struct md_op_data               *op_data;
2920         struct ptlrpc_request           *close_req = NULL;
2921         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2922         ENTRY;
2923
2924         body = req_capsule_server_get(pill, &RMF_MDT_BODY);
2925         OBD_ALLOC_PTR(op_data);
2926         if (op_data == NULL) {
2927                 CWARN("%s: cannot allocate op_data to release open handle for "
2928                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2929
2930                 RETURN_EXIT;
2931         }
2932
2933         op_data->op_fid1 = body->mbo_fid1;
2934         op_data->op_open_handle = body->mbo_open_handle;
2935         op_data->op_mod_time = ktime_get_real_seconds();
2936         md_close(exp, op_data, NULL, &close_req);
2937         ptlrpc_req_finished(close_req);
2938         ll_finish_md_op_data(op_data);
2939
2940         EXIT;
2941 }
2942
2943 int ll_prep_inode(struct inode **inode, struct req_capsule *pill,
2944                   struct super_block *sb, struct lookup_intent *it)
2945 {
2946         struct ll_sb_info *sbi = NULL;
2947         struct lustre_md md = { NULL };
2948         bool default_lmv_deleted = false;
2949         int rc;
2950
2951         ENTRY;
2952
2953         LASSERT(*inode || sb);
2954         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2955         rc = md_get_lustre_md(sbi->ll_md_exp, pill, sbi->ll_dt_exp,
2956                               sbi->ll_md_exp, &md);
2957         if (rc != 0)
2958                 GOTO(out, rc);
2959
2960         /*
2961          * clear default_lmv only if intent_getattr reply doesn't contain it.
2962          * but it needs to be done after iget, check this early because
2963          * ll_update_lsm_md() may change md.
2964          */
2965         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2966             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2967                 default_lmv_deleted = true;
2968
2969         if (*inode) {
2970                 rc = ll_update_inode(*inode, &md);
2971                 if (rc != 0)
2972                         GOTO(out, rc);
2973         } else {
2974                 bool api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
2975                 struct lu_fid *fid1 = &md.body->mbo_fid1;
2976
2977                 LASSERT(sb != NULL);
2978
2979                 /*
2980                  * At this point server returns to client's same fid as client
2981                  * generated for creating. So using ->fid1 is okay here.
2982                  */
2983                 if (!fid_is_sane(fid1)) {
2984                         CERROR("%s: Fid is insane "DFID"\n",
2985                                 sbi->ll_fsname, PFID(fid1));
2986                         GOTO(out, rc = -EINVAL);
2987                 }
2988
2989                 *inode = ll_iget(sb, cl_fid_build_ino(fid1, api32), &md);
2990                 if (IS_ERR(*inode)) {
2991                         lmd_clear_acl(&md);
2992                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2993                         *inode = NULL;
2994                         CERROR("new_inode -fatal: rc %d\n", rc);
2995                         GOTO(out, rc);
2996                 }
2997         }
2998
2999         /* Handling piggyback layout lock.
3000          * Layout lock can be piggybacked by getattr and open request.
3001          * The lsm can be applied to inode only if it comes with a layout lock
3002          * otherwise correct layout may be overwritten, for example:
3003          * 1. proc1: mdt returns a lsm but not granting layout
3004          * 2. layout was changed by another client
3005          * 3. proc2: refresh layout and layout lock granted
3006          * 4. proc1: to apply a stale layout */
3007         if (it != NULL && it->it_lock_mode != 0) {
3008                 struct lustre_handle lockh;
3009                 struct ldlm_lock *lock;
3010
3011                 lockh.cookie = it->it_lock_handle;
3012                 lock = ldlm_handle2lock(&lockh);
3013                 LASSERT(lock != NULL);
3014                 if (ldlm_has_layout(lock)) {
3015                         struct cl_object_conf conf;
3016
3017                         memset(&conf, 0, sizeof(conf));
3018                         conf.coc_opc = OBJECT_CONF_SET;
3019                         conf.coc_inode = *inode;
3020                         conf.coc_lock = lock;
3021                         conf.u.coc_layout = md.layout;
3022                         (void)ll_layout_conf(*inode, &conf);
3023                 }
3024                 LDLM_LOCK_PUT(lock);
3025         }
3026
3027         if (default_lmv_deleted)
3028                 ll_update_default_lsm_md(*inode, &md);
3029
3030         /* we may want to apply some policy for foreign file/dir */
3031         if (ll_sbi_has_foreign_symlink(sbi)) {
3032                 rc = ll_manage_foreign(*inode, &md);
3033                 if (rc < 0)
3034                         GOTO(out, rc);
3035         }
3036
3037         GOTO(out, rc = 0);
3038
3039 out:
3040         /* cleanup will be done if necessary */
3041         md_free_lustre_md(sbi->ll_md_exp, &md);
3042
3043         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
3044                 ll_intent_drop_lock(it);
3045                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, pill);
3046         }
3047
3048         return rc;
3049 }
3050
3051 int ll_obd_statfs(struct inode *inode, void __user *arg)
3052 {
3053         struct ll_sb_info *sbi = NULL;
3054         struct obd_export *exp;
3055         struct obd_ioctl_data *data = NULL;
3056         __u32 type;
3057         int len = 0, rc;
3058
3059         if (inode)
3060                 sbi = ll_i2sbi(inode);
3061         if (!sbi)
3062                 GOTO(out_statfs, rc = -EINVAL);
3063
3064         rc = obd_ioctl_getdata(&data, &len, arg);
3065         if (rc)
3066                 GOTO(out_statfs, rc);
3067
3068         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3069             !data->ioc_pbuf1 || !data->ioc_pbuf2)
3070                 GOTO(out_statfs, rc = -EINVAL);
3071
3072         if (data->ioc_inllen1 != sizeof(__u32) ||
3073             data->ioc_inllen2 != sizeof(__u32) ||
3074             data->ioc_plen1 != sizeof(struct obd_statfs) ||
3075             data->ioc_plen2 != sizeof(struct obd_uuid))
3076                 GOTO(out_statfs, rc = -EINVAL);
3077
3078         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3079         if (type & LL_STATFS_LMV)
3080                 exp = sbi->ll_md_exp;
3081         else if (type & LL_STATFS_LOV)
3082                 exp = sbi->ll_dt_exp;
3083         else
3084                 GOTO(out_statfs, rc = -ENODEV);
3085
3086         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3087         if (rc)
3088                 GOTO(out_statfs, rc);
3089 out_statfs:
3090         OBD_FREE_LARGE(data, len);
3091         return rc;
3092 }
3093
3094 /*
3095  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3096  * be called early to avoid deadlock.
3097  */
3098 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3099 {
3100         if (op_data->op_mea2_sem) {
3101                 up_read_non_owner(op_data->op_mea2_sem);
3102                 op_data->op_mea2_sem = NULL;
3103         }
3104
3105         if (op_data->op_mea1_sem) {
3106                 up_read_non_owner(op_data->op_mea1_sem);
3107                 op_data->op_mea1_sem = NULL;
3108         }
3109 }
3110
3111 /* this function prepares md_op_data hint for passing it down to MD stack. */
3112 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3113                                       struct inode *i1, struct inode *i2,
3114                                       const char *name, size_t namelen,
3115                                       __u32 mode, enum md_op_code opc,
3116                                       void *data)
3117 {
3118         struct llcrypt_name fname = { 0 };
3119         int rc;
3120
3121         LASSERT(i1 != NULL);
3122
3123         if (name == NULL) {
3124                 /* Do not reuse namelen for something else. */
3125                 if (namelen != 0)
3126                         return ERR_PTR(-EINVAL);
3127         } else {
3128                 if (namelen > ll_i2sbi(i1)->ll_namelen)
3129                         return ERR_PTR(-ENAMETOOLONG);
3130
3131                 /* "/" is not valid name, but it's allowed */
3132                 if (!lu_name_is_valid_2(name, namelen) &&
3133                     strncmp("/", name, namelen) != 0)
3134                         return ERR_PTR(-EINVAL);
3135         }
3136
3137         if (op_data == NULL)
3138                 OBD_ALLOC_PTR(op_data);
3139
3140         if (op_data == NULL)
3141                 return ERR_PTR(-ENOMEM);
3142
3143         ll_i2gids(op_data->op_suppgids, i1, i2);
3144         op_data->op_fid1 = *ll_inode2fid(i1);
3145
3146         if (S_ISDIR(i1->i_mode)) {
3147                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3148                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3149                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3150                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3151         }
3152
3153         if (i2) {
3154                 op_data->op_fid2 = *ll_inode2fid(i2);
3155                 if (S_ISDIR(i2->i_mode)) {
3156                         if (i2 != i1) {
3157                                 /* i2 is typically a child of i1, and MUST be
3158                                  * further from the root to avoid deadlocks.
3159                                  */
3160                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3161                                 op_data->op_mea2_sem =
3162                                                 &ll_i2info(i2)->lli_lsm_sem;
3163                         }
3164                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3165                 }
3166         } else {
3167                 fid_zero(&op_data->op_fid2);
3168         }
3169
3170         if (test_bit(LL_SBI_64BIT_HASH, ll_i2sbi(i1)->ll_flags))
3171                 op_data->op_cli_flags |= CLI_HASH64;
3172
3173         if (ll_need_32bit_api(ll_i2sbi(i1)))
3174                 op_data->op_cli_flags |= CLI_API32;
3175
3176         if (opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_CREATE) {
3177                 /* In case of lookup, ll_setup_filename() has already been
3178                  * called in ll_lookup_it(), so just take provided name.
3179                  */
3180                 fname.disk_name.name = (unsigned char *)name;
3181                 fname.disk_name.len = namelen;
3182         } else if (name && namelen) {
3183                 struct qstr dname = QSTR_INIT(name, namelen);
3184                 struct inode *dir;
3185                 struct lu_fid *pfid = NULL;
3186                 struct lu_fid fid;
3187                 int lookup;
3188
3189                 if (!S_ISDIR(i1->i_mode) && i2 && S_ISDIR(i2->i_mode)) {
3190                         /* special case when called from ll_link() */
3191                         dir = i2;
3192                         lookup = 0;
3193                 } else {
3194                         dir = i1;