Whamcloud - gitweb
LU-13717 sec: fix handling of encrypted file with long name
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/llite/llite_lib.c
32  *
33  * Lustre Light Super operations
34  */
35
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include <linux/cpu.h>
39 #include <linux/module.h>
40 #include <linux/random.h>
41 #include <linux/statfs.h>
42 #include <linux/time.h>
43 #include <linux/file.h>
44 #include <linux/types.h>
45 #include <libcfs/linux/linux-uuid.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48 #include <linux/user_namespace.h>
49 #include <linux/delay.h>
50 #include <linux/uidgid.h>
51 #include <linux/fs_struct.h>
52
53 #ifndef HAVE_CPUS_READ_LOCK
54 #include <libcfs/linux/linux-cpu.h>
55 #endif
56 #include <libcfs/linux/linux-misc.h>
57 #include <uapi/linux/lustre/lustre_ioctl.h>
58 #ifdef HAVE_UAPI_LINUX_MOUNT_H
59 #include <uapi/linux/mount.h>
60 #endif
61
62 #include <lustre_ha.h>
63 #include <lustre_dlm.h>
64 #include <lprocfs_status.h>
65 #include <lustre_disk.h>
66 #include <uapi/linux/lustre/lustre_param.h>
67 #include <lustre_log.h>
68 #include <cl_object.h>
69 #include <obd_cksum.h>
70 #include "llite_internal.h"
71
72 struct kmem_cache *ll_file_data_slab;
73
74 #ifndef log2
75 #define log2(n) ffz(~(n))
76 #endif
77
78 /**
79  * If there is only one number of core visible to Lustre,
80  * async readahead will be disabled, to avoid massive over
81  * subscription, we use 1/2 of active cores as default max
82  * async readahead requests.
83  */
84 static inline unsigned int ll_get_ra_async_max_active(void)
85 {
86         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
87 }
88
89 static struct ll_sb_info *ll_init_sbi(void)
90 {
91         struct ll_sb_info *sbi = NULL;
92         unsigned long pages;
93         unsigned long lru_page_max;
94         struct sysinfo si;
95         int rc;
96
97         ENTRY;
98
99         OBD_ALLOC_PTR(sbi);
100         if (sbi == NULL)
101                 RETURN(ERR_PTR(-ENOMEM));
102
103         rc = pcc_super_init(&sbi->ll_pcc_super);
104         if (rc < 0)
105                 GOTO(out_sbi, rc);
106
107         spin_lock_init(&sbi->ll_lock);
108         mutex_init(&sbi->ll_lco.lco_lock);
109         spin_lock_init(&sbi->ll_pp_extent_lock);
110         spin_lock_init(&sbi->ll_process_lock);
111         sbi->ll_rw_stats_on = 0;
112         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
113
114         si_meminfo(&si);
115         pages = si.totalram - si.totalhigh;
116         lru_page_max = pages / 2;
117
118         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
119         sbi->ll_ra_info.ll_readahead_wq =
120                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
121                                        0, CFS_CPT_ANY,
122                                        sbi->ll_ra_info.ra_async_max_active);
123         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
124                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
125
126         /* initialize ll_cache data */
127         sbi->ll_cache = cl_cache_init(lru_page_max);
128         if (sbi->ll_cache == NULL)
129                 GOTO(out_destroy_ra, rc = -ENOMEM);
130
131         /* initialize foreign symlink prefix path */
132         OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
133         if (sbi->ll_foreign_symlink_prefix == NULL)
134                 GOTO(out_destroy_ra, rc = -ENOMEM);
135         memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
136         sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
137
138         /* initialize foreign symlink upcall path, none by default */
139         OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
140         if (sbi->ll_foreign_symlink_upcall == NULL)
141                 GOTO(out_destroy_ra, rc = -ENOMEM);
142         memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
143         sbi->ll_foreign_symlink_upcall_items = NULL;
144         sbi->ll_foreign_symlink_upcall_nb_items = 0;
145         init_rwsem(&sbi->ll_foreign_symlink_sem);
146         /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
147          * not enabled by default
148          */
149
150         sbi->ll_ra_info.ra_max_pages =
151                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
152         sbi->ll_ra_info.ra_max_pages_per_file =
153                 min(sbi->ll_ra_info.ra_max_pages / 4,
154                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
155         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
156                                 sbi->ll_ra_info.ra_max_pages_per_file;
157         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
158         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
159         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
160
161         set_bit(LL_SBI_VERBOSE, sbi->ll_flags);
162 #ifdef ENABLE_CHECKSUM
163         set_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
164 #endif
165 #ifdef ENABLE_FLOCK
166         set_bit(LL_SBI_FLOCK, sbi->ll_flags);
167 #endif
168
169 #ifdef HAVE_LRU_RESIZE_SUPPORT
170         set_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags);
171 #endif
172         set_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags);
173
174         /* metadata statahead is enabled by default */
175         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
176         sbi->ll_sa_max = LL_SA_RPC_DEF;
177         atomic_set(&sbi->ll_sa_total, 0);
178         atomic_set(&sbi->ll_sa_wrong, 0);
179         atomic_set(&sbi->ll_sa_running, 0);
180         atomic_set(&sbi->ll_agl_total, 0);
181         set_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags);
182         set_bit(LL_SBI_FAST_READ, sbi->ll_flags);
183         set_bit(LL_SBI_TINY_WRITE, sbi->ll_flags);
184         set_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags);
185         ll_sbi_set_encrypt(sbi, true);
186
187         /* root squash */
188         sbi->ll_squash.rsi_uid = 0;
189         sbi->ll_squash.rsi_gid = 0;
190         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
191         spin_lock_init(&sbi->ll_squash.rsi_lock);
192
193         /* Per-filesystem file heat */
194         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
195         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
196
197         /* Per-fs open heat level before requesting open lock */
198         sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT;
199         sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS;
200         sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS;
201         RETURN(sbi);
202 out_destroy_ra:
203         if (sbi->ll_foreign_symlink_prefix)
204                 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
205         if (sbi->ll_cache) {
206                 cl_cache_decref(sbi->ll_cache);
207                 sbi->ll_cache = NULL;
208         }
209         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
210 out_pcc:
211         pcc_super_fini(&sbi->ll_pcc_super);
212 out_sbi:
213         OBD_FREE_PTR(sbi);
214         RETURN(ERR_PTR(rc));
215 }
216
217 static void ll_free_sbi(struct super_block *sb)
218 {
219         struct ll_sb_info *sbi = ll_s2sbi(sb);
220         ENTRY;
221
222         if (sbi != NULL) {
223                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
224                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
225                 if (sbi->ll_ra_info.ll_readahead_wq)
226                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
227                 if (sbi->ll_cache != NULL) {
228                         cl_cache_decref(sbi->ll_cache);
229                         sbi->ll_cache = NULL;
230                 }
231                 if (sbi->ll_foreign_symlink_prefix) {
232                         OBD_FREE(sbi->ll_foreign_symlink_prefix,
233                                  sbi->ll_foreign_symlink_prefix_size);
234                         sbi->ll_foreign_symlink_prefix = NULL;
235                 }
236                 if (sbi->ll_foreign_symlink_upcall) {
237                         OBD_FREE(sbi->ll_foreign_symlink_upcall,
238                                  strlen(sbi->ll_foreign_symlink_upcall) +
239                                        1);
240                         sbi->ll_foreign_symlink_upcall = NULL;
241                 }
242                 if (sbi->ll_foreign_symlink_upcall_items) {
243                         int i;
244                         int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
245                         struct ll_foreign_symlink_upcall_item *items =
246                                 sbi->ll_foreign_symlink_upcall_items;
247
248                         for (i = 0 ; i < nb_items; i++)
249                                 if (items[i].type == STRING_TYPE)
250                                         OBD_FREE(items[i].string,
251                                                        items[i].size);
252
253                         OBD_FREE_LARGE(items, nb_items *
254                                 sizeof(struct ll_foreign_symlink_upcall_item));
255                         sbi->ll_foreign_symlink_upcall_items = NULL;
256                 }
257                 ll_free_rw_stats_info(sbi);
258                 pcc_super_fini(&sbi->ll_pcc_super);
259                 OBD_FREE(sbi, sizeof(*sbi));
260         }
261         EXIT;
262 }
263
264 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
265 {
266         struct inode *root = NULL;
267         struct ll_sb_info *sbi = ll_s2sbi(sb);
268         struct obd_statfs *osfs = NULL;
269         struct ptlrpc_request *request = NULL;
270         struct obd_connect_data *data = NULL;
271         struct obd_uuid *uuid;
272         struct md_op_data *op_data;
273         struct lustre_md lmd;
274         u64 valid;
275         int size, err, checksum;
276         bool api32;
277
278         ENTRY;
279         sbi->ll_md_obd = class_name2obd(md);
280         if (!sbi->ll_md_obd) {
281                 CERROR("MD %s: not setup or attached\n", md);
282                 RETURN(-EINVAL);
283         }
284
285         OBD_ALLOC_PTR(data);
286         if (data == NULL)
287                 RETURN(-ENOMEM);
288
289         OBD_ALLOC_PTR(osfs);
290         if (osfs == NULL) {
291                 OBD_FREE_PTR(data);
292                 RETURN(-ENOMEM);
293         }
294
295         /* pass client page size via ocd_grant_blkbits, the server should report
296          * back its backend blocksize for grant calculation purpose */
297         data->ocd_grant_blkbits = PAGE_SHIFT;
298
299         /* indicate MDT features supported by this client */
300         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
301                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
302                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
303                                   OBD_CONNECT_SRVLOCK  |
304                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
305                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
306                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
307                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
308                                   OBD_CONNECT_64BITHASH |
309                                   OBD_CONNECT_EINPROGRESS |
310                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
311                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
312                                   OBD_CONNECT_MAX_EASIZE |
313                                   OBD_CONNECT_FLOCK_DEAD |
314                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
315                                   OBD_CONNECT_OPEN_BY_FID |
316                                   OBD_CONNECT_DIR_STRIPE |
317                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
318                                   OBD_CONNECT_SUBTREE |
319                                   OBD_CONNECT_MULTIMODRPCS |
320                                   OBD_CONNECT_GRANT_PARAM |
321                                   OBD_CONNECT_GRANT_SHRINK |
322                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
323
324         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
325                                    OBD_CONNECT2_SUM_STATFS |
326                                    OBD_CONNECT2_OVERSTRIPING |
327                                    OBD_CONNECT2_FLR |
328                                    OBD_CONNECT2_LOCK_CONVERT |
329                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
330                                    OBD_CONNECT2_INC_XID |
331                                    OBD_CONNECT2_LSOM |
332                                    OBD_CONNECT2_ASYNC_DISCARD |
333                                    OBD_CONNECT2_PCC |
334                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
335                                    OBD_CONNECT2_GETATTR_PFID |
336                                    OBD_CONNECT2_DOM_LVB |
337                                    OBD_CONNECT2_REP_MBITS |
338                                    OBD_CONNECT2_ATOMIC_OPEN_LOCK;
339
340 #ifdef HAVE_LRU_RESIZE_SUPPORT
341         if (test_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags))
342                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
343 #endif
344         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
345
346         data->ocd_cksum_types = obd_cksum_types_supported_client();
347
348         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
349                 /* flag mdc connection as lightweight, only used for test
350                  * purpose, use with care */
351                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
352
353         data->ocd_ibits_known = MDS_INODELOCK_FULL;
354         data->ocd_version = LUSTRE_VERSION_CODE;
355
356         if (sb->s_flags & SB_RDONLY)
357                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
358         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags))
359                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
360
361 #ifdef SB_NOSEC
362         /* Setting this indicates we correctly support S_NOSEC (See kernel
363          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
364          */
365         sb->s_flags |= SB_NOSEC;
366 #endif
367         sbi->ll_fop = ll_select_file_operations(sbi);
368
369         /* always ping even if server suppress_pings */
370         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
371                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
372
373         obd_connect_set_secctx(data);
374         if (ll_sbi_has_encrypt(sbi))
375                 obd_connect_set_enc(data);
376
377 #if defined(CONFIG_SECURITY)
378         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
379 #endif
380
381         data->ocd_brw_size = MD_MAX_BRW_SIZE;
382
383         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
384                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
385         if (err == -EBUSY) {
386                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
387                                    "recovery, of which this client is not a "
388                                    "part. Please wait for recovery to complete,"
389                                    " abort, or time out.\n", md);
390                 GOTO(out, err);
391         } else if (err) {
392                 CERROR("cannot connect to %s: rc = %d\n", md, err);
393                 GOTO(out, err);
394         }
395
396         sbi->ll_md_exp->exp_connect_data = *data;
397
398         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
399                            LUSTRE_SEQ_METADATA);
400         if (err) {
401                 CERROR("%s: Can't init metadata layer FID infrastructure, "
402                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
403                 GOTO(out_md, err);
404         }
405
406         /* For mount, we only need fs info from MDT0, and also in DNE, it
407          * can make sure the client can be mounted as long as MDT0 is
408          * avaible */
409         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
410                         ktime_get_seconds() - sbi->ll_statfs_max_age,
411                         OBD_STATFS_FOR_MDT0);
412         if (err)
413                 GOTO(out_md_fid, err);
414
415         /* This needs to be after statfs to ensure connect has finished.
416          * Note that "data" does NOT contain the valid connect reply.
417          * If connecting to a 1.8 server there will be no LMV device, so
418          * we can access the MDC export directly and exp_connect_flags will
419          * be non-zero, but if accessing an upgraded 2.1 server it will
420          * have the correct flags filled in.
421          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
422         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
423         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
424             valid != CLIENT_CONNECT_MDT_REQD) {
425                 char *buf;
426
427                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
428                 obd_connect_flags2str(buf, PAGE_SIZE,
429                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
430                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
431                                    "feature(s) needed for correct operation "
432                                    "of this client (%s). Please upgrade "
433                                    "server or downgrade client.\n",
434                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
435                 OBD_FREE(buf, PAGE_SIZE);
436                 GOTO(out_md_fid, err = -EPROTO);
437         }
438
439         size = sizeof(*data);
440         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
441                            KEY_CONN_DATA,  &size, data);
442         if (err) {
443                 CERROR("%s: Get connect data failed: rc = %d\n",
444                        sbi->ll_md_exp->exp_obd->obd_name, err);
445                 GOTO(out_md_fid, err);
446         }
447
448         LASSERT(osfs->os_bsize);
449         sb->s_blocksize = osfs->os_bsize;
450         sb->s_blocksize_bits = log2(osfs->os_bsize);
451         sb->s_magic = LL_SUPER_MAGIC;
452         sb->s_maxbytes = MAX_LFS_FILESIZE;
453         sbi->ll_namelen = osfs->os_namelen;
454         sbi->ll_mnt.mnt = current->fs->root.mnt;
455
456         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags) &&
457             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
458                 LCONSOLE_INFO("Disabling user_xattr feature because "
459                               "it is not supported on the server\n");
460                 clear_bit(LL_SBI_USER_XATTR, sbi->ll_flags);
461         }
462
463         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
464 #ifdef SB_POSIXACL
465                 sb->s_flags |= SB_POSIXACL;
466 #endif
467                 set_bit(LL_SBI_ACL, sbi->ll_flags);
468         } else {
469                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
470 #ifdef SB_POSIXACL
471                 sb->s_flags &= ~SB_POSIXACL;
472 #endif
473                 clear_bit(LL_SBI_ACL, sbi->ll_flags);
474         }
475
476         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
477                 set_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
478
479         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
480                 set_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags);
481
482         if (obd_connect_has_secctx(data))
483                 set_bit(LL_SBI_FILE_SECCTX, sbi->ll_flags);
484
485         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
486                 if (ll_sbi_has_test_dummy_encryption(sbi))
487                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
488                                       sbi->ll_fsname,
489                                       sbi->ll_md_exp->exp_obd->obd_name);
490                 ll_sbi_set_encrypt(sbi, false);
491         }
492
493         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
494                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
495                         LCONSOLE_INFO("%s: disabling xattr cache due to "
496                                       "unknown maximum xattr size.\n", dt);
497                 } else if (!sbi->ll_xattr_cache_set) {
498                         /* If xattr_cache is already set (no matter 0 or 1)
499                          * during processing llog, it won't be enabled here. */
500                         set_bit(LL_SBI_XATTR_CACHE, sbi->ll_flags);
501                         sbi->ll_xattr_cache_enabled = 1;
502                 }
503         }
504
505         sbi->ll_dt_obd = class_name2obd(dt);
506         if (!sbi->ll_dt_obd) {
507                 CERROR("DT %s: not setup or attached\n", dt);
508                 GOTO(out_md_fid, err = -ENODEV);
509         }
510
511         /* pass client page size via ocd_grant_blkbits, the server should report
512          * back its backend blocksize for grant calculation purpose */
513         data->ocd_grant_blkbits = PAGE_SHIFT;
514
515         /* indicate OST features supported by this client */
516         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
517                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
518                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
519                                   OBD_CONNECT_SRVLOCK |
520                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
521                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
522                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
523                                   OBD_CONNECT_EINPROGRESS |
524                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
525                                   OBD_CONNECT_LAYOUTLOCK |
526                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
527                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
528                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
529         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
530                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK |
531                                    OBD_CONNECT2_REP_MBITS;
532
533         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
534                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
535
536         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
537          * disabled by default, because it can still be enabled on the
538          * fly via /sys. As a consequence, we still need to come to an
539          * agreement on the supported algorithms at connect time
540          */
541         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
542
543         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
544                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
545         else
546                 data->ocd_cksum_types = obd_cksum_types_supported_client();
547
548 #ifdef HAVE_LRU_RESIZE_SUPPORT
549         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
550 #endif
551         /* always ping even if server suppress_pings */
552         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
553                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
554
555         if (ll_sbi_has_encrypt(sbi))
556                 obd_connect_set_enc(data);
557
558         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
559                "ocd_grant: %d\n", data->ocd_connect_flags,
560                data->ocd_version, data->ocd_grant);
561
562         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
563         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
564
565         data->ocd_brw_size = DT_MAX_BRW_SIZE;
566
567         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
568                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
569         if (err == -EBUSY) {
570                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
571                                    "recovery, of which this client is not a "
572                                    "part.  Please wait for recovery to "
573                                    "complete, abort, or time out.\n", dt);
574                 GOTO(out_md, err);
575         } else if (err) {
576                 CERROR("%s: Cannot connect to %s: rc = %d\n",
577                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
578                 GOTO(out_md, err);
579         }
580
581         if (ll_sbi_has_encrypt(sbi) &&
582             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
583                 if (ll_sbi_has_test_dummy_encryption(sbi))
584                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
585                                       sbi->ll_fsname, dt);
586                 ll_sbi_set_encrypt(sbi, false);
587         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
588                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
589         }
590
591         sbi->ll_dt_exp->exp_connect_data = *data;
592
593         /* Don't change value if it was specified in the config log */
594         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
595                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
596                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
597                               (data->ocd_brw_size >> PAGE_SHIFT));
598                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
599                     sbi->ll_ra_info.ra_max_pages_per_file)
600                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
601                                 sbi->ll_ra_info.ra_max_pages_per_file;
602         }
603
604         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
605                            LUSTRE_SEQ_METADATA);
606         if (err) {
607                 CERROR("%s: Can't init data layer FID infrastructure, "
608                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
609                 GOTO(out_dt, err);
610         }
611
612         mutex_lock(&sbi->ll_lco.lco_lock);
613         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
614         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
615         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
616         mutex_unlock(&sbi->ll_lco.lco_lock);
617
618         fid_zero(&sbi->ll_root_fid);
619         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
620                            &sbi->ll_root_fid);
621         if (err) {
622                 CERROR("cannot mds_connect: rc = %d\n", err);
623                 GOTO(out_lock_cn_cb, err);
624         }
625         if (!fid_is_sane(&sbi->ll_root_fid)) {
626                 CERROR("%s: Invalid root fid "DFID" during mount\n",
627                        sbi->ll_md_exp->exp_obd->obd_name,
628                        PFID(&sbi->ll_root_fid));
629                 GOTO(out_lock_cn_cb, err = -EINVAL);
630         }
631         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
632
633         sb->s_op = &lustre_super_operations;
634         sb->s_xattr = ll_xattr_handlers;
635 #if THREAD_SIZE >= 8192 /*b=17630*/
636         sb->s_export_op = &lustre_export_operations;
637 #endif
638 #ifdef HAVE_LUSTRE_CRYPTO
639         llcrypt_set_ops(sb, &lustre_cryptops);
640 #endif
641
642         /* make root inode
643          * XXX: move this to after cbd setup? */
644         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
645         if (test_bit(LL_SBI_ACL, sbi->ll_flags))
646                 valid |= OBD_MD_FLACL;
647
648         OBD_ALLOC_PTR(op_data);
649         if (op_data == NULL)
650                 GOTO(out_lock_cn_cb, err = -ENOMEM);
651
652         op_data->op_fid1 = sbi->ll_root_fid;
653         op_data->op_mode = 0;
654         op_data->op_valid = valid;
655
656         err = md_getattr(sbi->ll_md_exp, op_data, &request);
657
658         OBD_FREE_PTR(op_data);
659         if (err) {
660                 CERROR("%s: md_getattr failed for root: rc = %d\n",
661                        sbi->ll_md_exp->exp_obd->obd_name, err);
662                 GOTO(out_lock_cn_cb, err);
663         }
664
665         err = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill,
666                                sbi->ll_dt_exp, sbi->ll_md_exp, &lmd);
667         if (err) {
668                 CERROR("failed to understand root inode md: rc = %d\n", err);
669                 ptlrpc_req_finished(request);
670                 GOTO(out_lock_cn_cb, err);
671         }
672
673         LASSERT(fid_is_sane(&sbi->ll_root_fid));
674         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
675         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid, api32), &lmd);
676         md_free_lustre_md(sbi->ll_md_exp, &lmd);
677         ptlrpc_req_finished(request);
678
679         if (IS_ERR(root)) {
680                 lmd_clear_acl(&lmd);
681                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
682                 root = NULL;
683                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
684                        sbi->ll_fsname, err);
685                 GOTO(out_root, err);
686         }
687
688         checksum = test_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
689         if (sbi->ll_checksum_set) {
690                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
691                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
692                                          sizeof(checksum), &checksum, NULL);
693                 if (err) {
694                         CERROR("%s: Set checksum failed: rc = %d\n",
695                                sbi->ll_dt_exp->exp_obd->obd_name, err);
696                         GOTO(out_root, err);
697                 }
698         }
699         cl_sb_init(sb);
700
701         sb->s_root = d_make_root(root);
702         if (sb->s_root == NULL) {
703                 err = -ENOMEM;
704                 CERROR("%s: can't make root dentry: rc = %d\n",
705                        sbi->ll_fsname, err);
706                 GOTO(out_root, err);
707         }
708
709         sbi->ll_sdev_orig = sb->s_dev;
710
711         /* We set sb->s_dev equal on all lustre clients in order to support
712          * NFS export clustering.  NFSD requires that the FSID be the same
713          * on all clients. */
714         /* s_dev is also used in lt_compare() to compare two fs, but that is
715          * only a node-local comparison. */
716         uuid = obd_get_uuid(sbi->ll_md_exp);
717         if (uuid != NULL)
718                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
719
720         if (data != NULL)
721                 OBD_FREE_PTR(data);
722         if (osfs != NULL)
723                 OBD_FREE_PTR(osfs);
724
725         if (sbi->ll_dt_obd) {
726                 err = sysfs_create_link(&sbi->ll_kset.kobj,
727                                         &sbi->ll_dt_obd->obd_kset.kobj,
728                                         sbi->ll_dt_obd->obd_type->typ_name);
729                 if (err < 0) {
730                         CERROR("%s: could not register %s in llite: rc = %d\n",
731                                dt, sbi->ll_fsname, err);
732                         err = 0;
733                 }
734         }
735
736         if (sbi->ll_md_obd) {
737                 err = sysfs_create_link(&sbi->ll_kset.kobj,
738                                         &sbi->ll_md_obd->obd_kset.kobj,
739                                         sbi->ll_md_obd->obd_type->typ_name);
740                 if (err < 0) {
741                         CERROR("%s: could not register %s in llite: rc = %d\n",
742                                md, sbi->ll_fsname, err);
743                         err = 0;
744                 }
745         }
746
747         RETURN(err);
748 out_root:
749         iput(root);
750 out_lock_cn_cb:
751         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
752 out_dt:
753         obd_disconnect(sbi->ll_dt_exp);
754         sbi->ll_dt_exp = NULL;
755         sbi->ll_dt_obd = NULL;
756 out_md_fid:
757         obd_fid_fini(sbi->ll_md_exp->exp_obd);
758 out_md:
759         obd_disconnect(sbi->ll_md_exp);
760         sbi->ll_md_exp = NULL;
761         sbi->ll_md_obd = NULL;
762 out:
763         if (data != NULL)
764                 OBD_FREE_PTR(data);
765         if (osfs != NULL)
766                 OBD_FREE_PTR(osfs);
767         return err;
768 }
769
770 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
771 {
772         int size, rc;
773
774         size = sizeof(*lmmsize);
775         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
776                           KEY_MAX_EASIZE, &size, lmmsize);
777         if (rc != 0) {
778                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
779                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
780                 RETURN(rc);
781         }
782
783         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
784
785         size = sizeof(int);
786         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
787                           KEY_MAX_EASIZE, &size, lmmsize);
788         if (rc)
789                 CERROR("Get max mdsize error rc %d\n", rc);
790
791         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
792
793         RETURN(rc);
794 }
795
796 /**
797  * Get the value of the default_easize parameter.
798  *
799  * \see client_obd::cl_default_mds_easize
800  *
801  * \param[in] sbi       superblock info for this filesystem
802  * \param[out] lmmsize  pointer to storage location for value
803  *
804  * \retval 0            on success
805  * \retval negative     negated errno on failure
806  */
807 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
808 {
809         int size, rc;
810
811         size = sizeof(int);
812         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
813                          KEY_DEFAULT_EASIZE, &size, lmmsize);
814         if (rc)
815                 CERROR("Get default mdsize error rc %d\n", rc);
816
817         RETURN(rc);
818 }
819
820 /**
821  * Set the default_easize parameter to the given value.
822  *
823  * \see client_obd::cl_default_mds_easize
824  *
825  * \param[in] sbi       superblock info for this filesystem
826  * \param[in] lmmsize   the size to set
827  *
828  * \retval 0            on success
829  * \retval negative     negated errno on failure
830  */
831 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
832 {
833         int rc;
834
835         if (lmmsize < sizeof(struct lov_mds_md) ||
836             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
837                 return -EINVAL;
838
839         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
840                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
841                                 sizeof(int), &lmmsize, NULL);
842
843         RETURN(rc);
844 }
845
846 static void client_common_put_super(struct super_block *sb)
847 {
848         struct ll_sb_info *sbi = ll_s2sbi(sb);
849         ENTRY;
850
851         cl_sb_fini(sb);
852
853         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
854         obd_disconnect(sbi->ll_dt_exp);
855         sbi->ll_dt_exp = NULL;
856
857         ll_debugfs_unregister_super(sb);
858
859         obd_fid_fini(sbi->ll_md_exp->exp_obd);
860         obd_disconnect(sbi->ll_md_exp);
861         sbi->ll_md_exp = NULL;
862
863         EXIT;
864 }
865
866 void ll_kill_super(struct super_block *sb)
867 {
868         struct ll_sb_info *sbi;
869         ENTRY;
870
871         /* not init sb ?*/
872         if (!(sb->s_flags & SB_ACTIVE))
873                 return;
874
875         sbi = ll_s2sbi(sb);
876         /* we need restore s_dev from changed for clustred NFS before put_super
877          * because new kernels have cached s_dev and change sb->s_dev in
878          * put_super not affected real removing devices */
879         if (sbi) {
880                 sb->s_dev = sbi->ll_sdev_orig;
881
882                 /* wait running statahead threads to quit */
883                 while (atomic_read(&sbi->ll_sa_running) > 0)
884                         schedule_timeout_uninterruptible(
885                                 cfs_time_seconds(1) >> 3);
886         }
887
888         EXIT;
889 }
890
891 /* Since we use this table for ll_sbi_flags_seq_show make
892  * sure what you want displayed for a specific token that
893  * is listed more than once below be listed first. For
894  * example we want "checksum" displayed, not "nochecksum"
895  * for the sbi_flags.
896  */
897 static const match_table_t ll_sbi_flags_name = {
898         {LL_SBI_NOLCK,                  "nolock"},
899         {LL_SBI_CHECKSUM,               "checksum"},
900         {LL_SBI_CHECKSUM,               "nochecksum"},
901         {LL_SBI_LOCALFLOCK,             "localflock"},
902         {LL_SBI_FLOCK,                  "flock"},
903         {LL_SBI_FLOCK,                  "noflock"},
904         {LL_SBI_USER_XATTR,             "user_xattr"},
905         {LL_SBI_USER_XATTR,             "nouser_xattr"},
906         {LL_SBI_LRU_RESIZE,             "lruresize"},
907         {LL_SBI_LRU_RESIZE,             "nolruresize"},
908         {LL_SBI_LAZYSTATFS,             "lazystatfs"},
909         {LL_SBI_LAZYSTATFS,             "nolazystatfs"},
910         {LL_SBI_32BIT_API,              "32bitapi"},
911         {LL_SBI_USER_FID2PATH,          "user_fid2path"},
912         {LL_SBI_USER_FID2PATH,          "nouser_fid2path"},
913         {LL_SBI_VERBOSE,                "verbose"},
914         {LL_SBI_VERBOSE,                "noverbose"},
915         {LL_SBI_ALWAYS_PING,            "always_ping"},
916         {LL_SBI_TEST_DUMMY_ENCRYPTION,  "test_dummy_encryption"},
917         {LL_SBI_ENCRYPT,                "encrypt"},
918         {LL_SBI_ENCRYPT,                "noencrypt"},
919         {LL_SBI_FOREIGN_SYMLINK,        "foreign_symlink=%s"},
920         {LL_SBI_NUM_MOUNT_OPT,          NULL},
921
922         {LL_SBI_ACL,                    "acl"},
923         {LL_SBI_AGL_ENABLED,            "agl"},
924         {LL_SBI_64BIT_HASH,             "64bit_hash"},
925         {LL_SBI_LAYOUT_LOCK,            "layout"},
926         {LL_SBI_XATTR_CACHE,            "xattr_cache"},
927         {LL_SBI_NOROOTSQUASH,           "norootsquash"},
928         {LL_SBI_FAST_READ,              "fast_read"},
929         {LL_SBI_FILE_SECCTX,            "file_secctx"},
930         {LL_SBI_TINY_WRITE,             "tiny_write"},
931         {LL_SBI_FILE_HEAT,              "file_heat"},
932         {LL_SBI_PARALLEL_DIO,           "parallel_dio"},
933 };
934
935 int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
936 {
937         struct super_block *sb = m->private;
938         int i;
939
940         for (i = 0; i < LL_SBI_NUM_FLAGS; i++) {
941                 int j;
942
943                 if (!test_bit(i, ll_s2sbi(sb)->ll_flags))
944                         continue;
945
946                 for (j = 0; j < ARRAY_SIZE(ll_sbi_flags_name); j++) {
947                         if (ll_sbi_flags_name[j].token == i &&
948                             ll_sbi_flags_name[j].pattern) {
949                                 seq_printf(m, "%s ",
950                                            ll_sbi_flags_name[j].pattern);
951                                 break;
952                         }
953                 }
954         }
955         seq_puts(m, "\b\n");
956         return 0;
957 }
958
959 /* non-client-specific mount options are parsed in lmd_parse */
960 static int ll_options(char *options, struct super_block *sb)
961 {
962         struct ll_sb_info *sbi = ll_s2sbi(sb);
963         char *s2, *s1, *opts;
964
965         ENTRY;
966         if (!options)
967                 RETURN(0);
968
969         /* Don't stomp on lmd_opts */
970         opts = kstrdup(options, GFP_KERNEL);
971         if (!opts)
972                 RETURN(-ENOMEM);
973         s1 = opts;
974         s2 = opts;
975
976         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
977
978         while ((s1 = strsep(&opts, ",")) != NULL) {
979                 substring_t args[MAX_OPT_ARGS];
980                 bool turn_off = false;
981                 int token;
982
983                 if (!*s1)
984                         continue;
985
986                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
987
988                 if (strncmp(s1, "no", 2) == 0)
989                         turn_off = true;
990
991                 /*
992                  * Initialize args struct so we know whether arg was
993                  * found; some options take optional arguments.
994                  */
995                 args[0].to = NULL;
996                 args[0].from = NULL;
997                 token = match_token(s1, ll_sbi_flags_name, args);
998                 if (token == LL_SBI_NUM_MOUNT_OPT) {
999                         if (match_wildcard("context", s1) ||
1000                             match_wildcard("fscontext", s1) ||
1001                             match_wildcard("defcontext", s1) ||
1002                             match_wildcard("rootcontext",s1))
1003                                 continue;
1004
1005                         LCONSOLE_ERROR_MSG(0x152,
1006                                            "Unknown option '%s', won't mount.\n",
1007                                            s1);
1008                         RETURN(-EINVAL);
1009                 }
1010
1011                 switch (token) {
1012                 case LL_SBI_NOLCK:
1013                 case LL_SBI_32BIT_API:
1014                 case LL_SBI_64BIT_HASH:
1015                 case LL_SBI_ALWAYS_PING:
1016                         set_bit(token, sbi->ll_flags);
1017                         break;
1018
1019                 case LL_SBI_FLOCK:
1020                         clear_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags);
1021                         if (turn_off)
1022                                 clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1023                         else
1024                                 set_bit(token, sbi->ll_flags);
1025                         break;
1026
1027                 case LL_SBI_LOCALFLOCK:
1028                         clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1029                         set_bit(token, sbi->ll_flags);
1030                         break;
1031
1032                 case LL_SBI_CHECKSUM:
1033                         sbi->ll_checksum_set = 1;
1034                         /* fall through */
1035                 case LL_SBI_USER_XATTR:
1036                 case LL_SBI_USER_FID2PATH:
1037                 case LL_SBI_LRU_RESIZE:
1038                 case LL_SBI_LAZYSTATFS:
1039                 case LL_SBI_VERBOSE:
1040                         if (turn_off)
1041                                 clear_bit(token, sbi->ll_flags);
1042                         else
1043                                 set_bit(token, sbi->ll_flags);
1044                         break;
1045                 case LL_SBI_TEST_DUMMY_ENCRYPTION: {
1046 #ifdef HAVE_LUSTRE_CRYPTO
1047                         set_bit(token, sbi->ll_flags);
1048 #else
1049                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1050 #endif
1051                         break;
1052                 }
1053                 case LL_SBI_ENCRYPT:
1054 #ifdef HAVE_LUSTRE_CRYPTO
1055                         if (turn_off)
1056                                 clear_bit(token, sbi->ll_flags);
1057                         else
1058                                 set_bit(token, sbi->ll_flags);
1059 #else
1060                         LCONSOLE_WARN("noencrypt or encrypt mount option ignored: encryption not supported\n");
1061 #endif
1062                         break;
1063                 case LL_SBI_FOREIGN_SYMLINK:
1064                         /* non-default prefix provided ? */
1065                         if (args->from) {
1066                                 size_t old_len;
1067                                 char *old;
1068
1069                                 /* path must be absolute */
1070                                 if (args->from[0] != '/') {
1071                                         LCONSOLE_ERROR_MSG(0x152,
1072                                                            "foreign prefix '%s' must be an absolute path\n",
1073                                                            args->from);
1074                                         RETURN(-EINVAL);
1075                                 }
1076
1077                                 old_len = sbi->ll_foreign_symlink_prefix_size;
1078                                 old = sbi->ll_foreign_symlink_prefix;
1079                                 /* alloc for path length and '\0' */
1080                                 sbi->ll_foreign_symlink_prefix = match_strdup(args);
1081                                 if (!sbi->ll_foreign_symlink_prefix) {
1082                                         /* restore previous */
1083                                         sbi->ll_foreign_symlink_prefix = old;
1084                                         sbi->ll_foreign_symlink_prefix_size =
1085                                                 old_len;
1086                                         RETURN(-ENOMEM);
1087                                 }
1088                                 sbi->ll_foreign_symlink_prefix_size =
1089                                         args->to - args->from + 1;
1090                                 OBD_ALLOC_POST(sbi->ll_foreign_symlink_prefix,
1091                                                sbi->ll_foreign_symlink_prefix_size,
1092                                                "kmalloced");
1093                                 if (old)
1094                                         OBD_FREE(old, old_len);
1095
1096                                 /* enable foreign symlink support */
1097                                 set_bit(token, sbi->ll_flags);
1098                         } else {
1099                                 LCONSOLE_ERROR_MSG(0x152,
1100                                                    "invalid %s option\n", s1);
1101                         }
1102                 /* fall through */
1103                 default:
1104                         break;
1105                 }
1106         }
1107         kfree(opts);
1108         RETURN(0);
1109 }
1110
1111 void ll_lli_init(struct ll_inode_info *lli)
1112 {
1113         lli->lli_inode_magic = LLI_INODE_MAGIC;
1114         lli->lli_flags = 0;
1115         rwlock_init(&lli->lli_lock);
1116         lli->lli_posix_acl = NULL;
1117         /* Do not set lli_fid, it has been initialized already. */
1118         fid_zero(&lli->lli_pfid);
1119         lli->lli_mds_read_och = NULL;
1120         lli->lli_mds_write_och = NULL;
1121         lli->lli_mds_exec_och = NULL;
1122         lli->lli_open_fd_read_count = 0;
1123         lli->lli_open_fd_write_count = 0;
1124         lli->lli_open_fd_exec_count = 0;
1125         mutex_init(&lli->lli_och_mutex);
1126         spin_lock_init(&lli->lli_agl_lock);
1127         spin_lock_init(&lli->lli_layout_lock);
1128         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1129         lli->lli_clob = NULL;
1130
1131         init_rwsem(&lli->lli_xattrs_list_rwsem);
1132         mutex_init(&lli->lli_xattrs_enq_lock);
1133
1134         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1135         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1136                 lli->lli_opendir_key = NULL;
1137                 lli->lli_sai = NULL;
1138                 spin_lock_init(&lli->lli_sa_lock);
1139                 lli->lli_opendir_pid = 0;
1140                 lli->lli_sa_enabled = 0;
1141                 init_rwsem(&lli->lli_lsm_sem);
1142         } else {
1143                 mutex_init(&lli->lli_size_mutex);
1144                 mutex_init(&lli->lli_setattr_mutex);
1145                 lli->lli_symlink_name = NULL;
1146                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1147                 range_lock_tree_init(&lli->lli_write_tree);
1148                 init_rwsem(&lli->lli_glimpse_sem);
1149                 lli->lli_glimpse_time = ktime_set(0, 0);
1150                 INIT_LIST_HEAD(&lli->lli_agl_list);
1151                 lli->lli_agl_index = 0;
1152                 lli->lli_async_rc = 0;
1153                 spin_lock_init(&lli->lli_heat_lock);
1154                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1155                 lli->lli_heat_flags = 0;
1156                 mutex_init(&lli->lli_pcc_lock);
1157                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1158                 lli->lli_pcc_inode = NULL;
1159                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1160                 lli->lli_pcc_generation = 0;
1161                 mutex_init(&lli->lli_group_mutex);
1162                 lli->lli_group_users = 0;
1163                 lli->lli_group_gid = 0;
1164         }
1165         mutex_init(&lli->lli_layout_mutex);
1166         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1167         /* ll_cl_context initialize */
1168         INIT_LIST_HEAD(&lli->lli_lccs);
1169 }
1170
1171 #define MAX_STRING_SIZE 128
1172
1173 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1174
1175 #define LSI_BDI_INITIALIZED     0x00400000
1176
1177 #ifndef HAVE_BDI_CAP_MAP_COPY
1178 # define BDI_CAP_MAP_COPY       0
1179 #endif
1180
1181 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1182 {
1183         struct  lustre_sb_info *lsi = s2lsi(sb);
1184         char buf[MAX_STRING_SIZE];
1185         va_list args;
1186         int err;
1187
1188         err = bdi_init(&lsi->lsi_bdi);
1189         if (err)
1190                 return err;
1191
1192         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1193         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1194         lsi->lsi_bdi.name = "lustre";
1195         va_start(args, fmt);
1196         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1197         va_end(args);
1198         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1199         va_end(args);
1200         if (!err)
1201                 sb->s_bdi = &lsi->lsi_bdi;
1202
1203         return err;
1204 }
1205 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1206
1207 int ll_fill_super(struct super_block *sb)
1208 {
1209         struct  lustre_profile *lprof = NULL;
1210         struct  lustre_sb_info *lsi = s2lsi(sb);
1211         struct  ll_sb_info *sbi = NULL;
1212         char    *dt = NULL, *md = NULL;
1213         char    *profilenm = get_profile_name(sb);
1214         struct config_llog_instance *cfg;
1215         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1216         const int instlen = LUSTRE_MAXINSTANCE + 2;
1217         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1218         char name[MAX_STRING_SIZE];
1219         int md_len = 0;
1220         int dt_len = 0;
1221         uuid_t uuid;
1222         char *ptr;
1223         int len;
1224         int err;
1225
1226         ENTRY;
1227         /* for ASLR, to map between cfg_instance and hashed ptr */
1228         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1229                profilenm, cfg_instance, sb);
1230
1231         OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
1232
1233         OBD_ALLOC_PTR(cfg);
1234         if (cfg == NULL)
1235                 GOTO(out_free_cfg, err = -ENOMEM);
1236
1237         /* client additional sb info */
1238         lsi->lsi_llsbi = sbi = ll_init_sbi();
1239         if (IS_ERR(sbi))
1240                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1241
1242         err = ll_options(lsi->lsi_lmd->lmd_opts, sb);
1243         if (err)
1244                 GOTO(out_free_cfg, err);
1245
1246         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1247         sb->s_d_op = &ll_d_ops;
1248
1249         /* UUID handling */
1250         generate_random_uuid(uuid.b);
1251         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1252
1253         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1254
1255         /* Get fsname */
1256         len = strlen(profilenm);
1257         ptr = strrchr(profilenm, '-');
1258         if (ptr && (strcmp(ptr, "-client") == 0))
1259                 len -= 7;
1260
1261         if (len > LUSTRE_MAXFSNAME) {
1262                 if (unlikely(len >= MAX_STRING_SIZE))
1263                         len = MAX_STRING_SIZE - 1;
1264                 strncpy(name, profilenm, len);
1265                 name[len] = '\0';
1266                 err = -ENAMETOOLONG;
1267                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1268                        name, LUSTRE_MAXFSNAME, err);
1269                 GOTO(out_free_cfg, err);
1270         }
1271         strncpy(sbi->ll_fsname, profilenm, len);
1272         sbi->ll_fsname[len] = '\0';
1273
1274         /* Mount info */
1275         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1276                  profilenm, cfg_instance);
1277
1278         err = super_setup_bdi_name(sb, "%s", name);
1279         if (err)
1280                 GOTO(out_free_cfg, err);
1281
1282         /* Call ll_debugfs_register_super() before lustre_process_log()
1283          * so that "llite.*.*" params can be processed correctly.
1284          */
1285         err = ll_debugfs_register_super(sb, name);
1286         if (err < 0) {
1287                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1288                        sbi->ll_fsname, err);
1289                 err = 0;
1290         }
1291
1292         /* The cfg_instance is a value unique to this super, in case some
1293          * joker tries to mount the same fs at two mount points.
1294          */
1295         cfg->cfg_instance = cfg_instance;
1296         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1297         cfg->cfg_callback = class_config_llog_handler;
1298         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1299         /* set up client obds */
1300         err = lustre_process_log(sb, profilenm, cfg);
1301         if (err < 0)
1302                 GOTO(out_debugfs, err);
1303
1304         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1305         lprof = class_get_profile(profilenm);
1306         if (lprof == NULL) {
1307                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1308                                    " read from the MGS.  Does that filesystem "
1309                                    "exist?\n", profilenm);
1310                 GOTO(out_debugfs, err = -EINVAL);
1311         }
1312         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1313                lprof->lp_md, lprof->lp_dt);
1314
1315         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1316         OBD_ALLOC(dt, dt_len);
1317         if (!dt)
1318                 GOTO(out_profile, err = -ENOMEM);
1319         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1320
1321         md_len = strlen(lprof->lp_md) + instlen + 2;
1322         OBD_ALLOC(md, md_len);
1323         if (!md)
1324                 GOTO(out_free_dt, err = -ENOMEM);
1325         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1326
1327         /* connections, registrations, sb setup */
1328         err = client_common_fill_super(sb, md, dt);
1329         if (err < 0)
1330                 GOTO(out_free_md, err);
1331
1332         sbi->ll_client_common_fill_super_succeeded = 1;
1333
1334 out_free_md:
1335         if (md)
1336                 OBD_FREE(md, md_len);
1337 out_free_dt:
1338         if (dt)
1339                 OBD_FREE(dt, dt_len);
1340 out_profile:
1341         if (lprof)
1342                 class_put_profile(lprof);
1343 out_debugfs:
1344         if (err < 0)
1345                 ll_debugfs_unregister_super(sb);
1346 out_free_cfg:
1347         if (cfg)
1348                 OBD_FREE_PTR(cfg);
1349
1350         if (err)
1351                 ll_put_super(sb);
1352         else if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1353                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1354         RETURN(err);
1355 } /* ll_fill_super */
1356
1357 void ll_put_super(struct super_block *sb)
1358 {
1359         struct config_llog_instance cfg, params_cfg;
1360         struct obd_device *obd;
1361         struct lustre_sb_info *lsi = s2lsi(sb);
1362         struct ll_sb_info *sbi = ll_s2sbi(sb);
1363         char *profilenm = get_profile_name(sb);
1364         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1365         long ccc_count;
1366         int next, force = 1, rc = 0;
1367         ENTRY;
1368
1369         if (IS_ERR(sbi))
1370                 GOTO(out_no_sbi, 0);
1371
1372         /* Should replace instance_id with something better for ASLR */
1373         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1374                profilenm, cfg_instance, sb);
1375
1376         cfg.cfg_instance = cfg_instance;
1377         lustre_end_log(sb, profilenm, &cfg);
1378
1379         params_cfg.cfg_instance = cfg_instance;
1380         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1381
1382         if (sbi->ll_md_exp) {
1383                 obd = class_exp2obd(sbi->ll_md_exp);
1384                 if (obd)
1385                         force = obd->obd_force;
1386         }
1387
1388         /* Wait for unstable pages to be committed to stable storage */
1389         if (force == 0) {
1390                 rc = l_wait_event_abortable(
1391                         sbi->ll_cache->ccc_unstable_waitq,
1392                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1393         }
1394
1395         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1396         if (force == 0 && rc != -ERESTARTSYS)
1397                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1398
1399         /* We need to set force before the lov_disconnect in
1400          * lustre_common_put_super, since l_d cleans up osc's as well.
1401          */
1402         if (force) {
1403                 next = 0;
1404                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1405                                                      &next)) != NULL) {
1406                         obd->obd_force = force;
1407                 }
1408         }
1409
1410         if (sbi->ll_client_common_fill_super_succeeded) {
1411                 /* Only if client_common_fill_super succeeded */
1412                 client_common_put_super(sb);
1413         }
1414
1415         next = 0;
1416         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1417                 class_manual_cleanup(obd);
1418
1419         if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1420                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1421
1422         if (profilenm)
1423                 class_del_profile(profilenm);
1424
1425 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1426         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1427                 bdi_destroy(&lsi->lsi_bdi);
1428                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1429         }
1430 #endif
1431
1432         ll_free_sbi(sb);
1433         lsi->lsi_llsbi = NULL;
1434 out_no_sbi:
1435         lustre_common_put_super(sb);
1436
1437         cl_env_cache_purge(~0);
1438
1439         EXIT;
1440 } /* client_put_super */
1441
1442 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1443 {
1444         struct inode *inode = NULL;
1445
1446         /* NOTE: we depend on atomic igrab() -bzzz */
1447         lock_res_and_lock(lock);
1448         if (lock->l_resource->lr_lvb_inode) {
1449                 struct ll_inode_info * lli;
1450                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1451                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1452                         inode = igrab(lock->l_resource->lr_lvb_inode);
1453                 } else {
1454                         inode = lock->l_resource->lr_lvb_inode;
1455                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1456                                          D_WARNING, lock, "lr_lvb_inode %p is "
1457                                          "bogus: magic %08x",
1458                                          lock->l_resource->lr_lvb_inode,
1459                                          lli->lli_inode_magic);
1460                         inode = NULL;
1461                 }
1462         }
1463         unlock_res_and_lock(lock);
1464         return inode;
1465 }
1466
1467 void ll_dir_clear_lsm_md(struct inode *inode)
1468 {
1469         struct ll_inode_info *lli = ll_i2info(inode);
1470
1471         LASSERT(S_ISDIR(inode->i_mode));
1472
1473         if (lli->lli_lsm_md) {
1474                 lmv_free_memmd(lli->lli_lsm_md);
1475                 lli->lli_lsm_md = NULL;
1476         }
1477
1478         if (lli->lli_default_lsm_md) {
1479                 lmv_free_memmd(lli->lli_default_lsm_md);
1480                 lli->lli_default_lsm_md = NULL;
1481         }
1482 }
1483
1484 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1485                                       const struct lu_fid *fid,
1486                                       struct lustre_md *md)
1487 {
1488         struct ll_sb_info *sbi = ll_s2sbi(sb);
1489         struct ll_inode_info *lli;
1490         struct mdt_body *body = md->body;
1491         struct inode *inode;
1492         ino_t ino;
1493
1494         ENTRY;
1495
1496         LASSERT(md->lmv);
1497         ino = cl_fid_build_ino(fid, test_bit(LL_SBI_32BIT_API, sbi->ll_flags));
1498         inode = iget_locked(sb, ino);
1499         if (inode == NULL) {
1500                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1501                        sbi->ll_fsname, PFID(fid));
1502                 RETURN(ERR_PTR(-ENOENT));
1503         }
1504
1505         lli = ll_i2info(inode);
1506         if (inode->i_state & I_NEW) {
1507                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1508                                 (body->mbo_mode & S_IFMT);
1509                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1510                          PFID(fid));
1511
1512                 inode->i_mtime.tv_sec = 0;
1513                 inode->i_atime.tv_sec = 0;
1514                 inode->i_ctime.tv_sec = 0;
1515                 inode->i_rdev = 0;
1516
1517 #ifdef HAVE_BACKING_DEV_INFO
1518                 /* initializing backing dev info. */
1519                 inode->i_mapping->backing_dev_info =
1520                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1521 #endif
1522                 inode->i_op = &ll_dir_inode_operations;
1523                 inode->i_fop = &ll_dir_operations;
1524                 lli->lli_fid = *fid;
1525                 ll_lli_init(lli);
1526
1527                 /* master object FID */
1528                 lli->lli_pfid = body->mbo_fid1;
1529                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1530                        lli, PFID(fid), PFID(&lli->lli_pfid));
1531                 unlock_new_inode(inode);
1532         } else {
1533                 /* in directory restripe/auto-split, a directory will be
1534                  * transformed to a stripe if it's plain, set its pfid here,
1535                  * otherwise ll_lock_cancel_bits() can't find the master inode.
1536                  */
1537                 lli->lli_pfid = body->mbo_fid1;
1538         }
1539
1540         RETURN(inode);
1541 }
1542
1543 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1544 {
1545         struct lu_fid *fid;
1546         struct lmv_stripe_md *lsm = md->lmv;
1547         struct ll_inode_info *lli = ll_i2info(inode);
1548         int i;
1549
1550         LASSERT(lsm != NULL);
1551
1552         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1553                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1554         lsm_md_dump(D_INODE, lsm);
1555
1556         if (!lmv_dir_striped(lsm))
1557                 goto out;
1558
1559         /* XXX sigh, this lsm_root initialization should be in
1560          * LMV layer, but it needs ll_iget right now, so we
1561          * put this here right now. */
1562         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1563                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1564                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1565
1566                 if (!fid_is_sane(fid))
1567                         continue;
1568
1569                 /* Unfortunately ll_iget will call ll_update_inode,
1570                  * where the initialization of slave inode is slightly
1571                  * different, so it reset lsm_md to NULL to avoid
1572                  * initializing lsm for slave inode. */
1573                 lsm->lsm_md_oinfo[i].lmo_root =
1574                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1575                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1576                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1577
1578                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1579                         while (i-- > 0) {
1580                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1581                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1582                         }
1583                         return rc;
1584                 }
1585         }
1586 out:
1587         lli->lli_lsm_md = lsm;
1588
1589         return 0;
1590 }
1591
1592 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1593 {
1594         struct ll_inode_info *lli = ll_i2info(inode);
1595
1596         ENTRY;
1597
1598         if (!md->default_lmv) {
1599                 /* clear default lsm */
1600                 if (lli->lli_default_lsm_md) {
1601                         down_write(&lli->lli_lsm_sem);
1602                         if (lli->lli_default_lsm_md) {
1603                                 lmv_free_memmd(lli->lli_default_lsm_md);
1604                                 lli->lli_default_lsm_md = NULL;
1605                         }
1606                         up_write(&lli->lli_lsm_sem);
1607                 }
1608                 RETURN_EXIT;
1609         }
1610
1611         if (lli->lli_default_lsm_md) {
1612                 /* do nonthing if default lsm isn't changed */
1613                 down_read(&lli->lli_lsm_sem);
1614                 if (lli->lli_default_lsm_md &&
1615                     lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1616                         up_read(&lli->lli_lsm_sem);
1617                         RETURN_EXIT;
1618                 }
1619                 up_read(&lli->lli_lsm_sem);
1620         }
1621
1622         down_write(&lli->lli_lsm_sem);
1623         if (lli->lli_default_lsm_md)
1624                 lmv_free_memmd(lli->lli_default_lsm_md);
1625         lli->lli_default_lsm_md = md->default_lmv;
1626         lsm_md_dump(D_INODE, md->default_lmv);
1627         md->default_lmv = NULL;
1628         up_write(&lli->lli_lsm_sem);
1629         RETURN_EXIT;
1630 }
1631
1632 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1633 {
1634         struct ll_inode_info *lli = ll_i2info(inode);
1635         struct lmv_stripe_md *lsm = md->lmv;
1636         struct cl_attr  *attr;
1637         int rc = 0;
1638
1639         ENTRY;
1640
1641         LASSERT(S_ISDIR(inode->i_mode));
1642         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1643                PFID(ll_inode2fid(inode)));
1644
1645         /* update default LMV */
1646         if (md->default_lmv)
1647                 ll_update_default_lsm_md(inode, md);
1648
1649         /* after dir migration/restripe, a stripe may be turned into a
1650          * directory, in this case, zero out its lli_pfid.
1651          */
1652         if (unlikely(fid_is_norm(&lli->lli_pfid)))
1653                 fid_zero(&lli->lli_pfid);
1654
1655         /*
1656          * no striped information from request, lustre_md from req does not
1657          * include stripeEA, see ll_md_setattr()
1658          */
1659         if (!lsm)
1660                 RETURN(0);
1661
1662         /*
1663          * normally dir layout doesn't change, only take read lock to check
1664          * that to avoid blocking other MD operations.
1665          */
1666         down_read(&lli->lli_lsm_sem);
1667
1668         /* some current lookup initialized lsm, and unchanged */
1669         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1670                 GOTO(unlock, rc = 0);
1671
1672         /* if dir layout doesn't match, check whether version is increased,
1673          * which means layout is changed, this happens in dir split/merge and
1674          * lfsck.
1675          *
1676          * foreign LMV should not change.
1677          */
1678         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1679             lsm->lsm_md_layout_version <=
1680             lli->lli_lsm_md->lsm_md_layout_version) {
1681                 CERROR("%s: "DFID" dir layout mismatch:\n",
1682                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1683                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1684                 lsm_md_dump(D_ERROR, lsm);
1685                 GOTO(unlock, rc = -EINVAL);
1686         }
1687
1688         up_read(&lli->lli_lsm_sem);
1689         down_write(&lli->lli_lsm_sem);
1690         /* clear existing lsm */
1691         if (lli->lli_lsm_md) {
1692                 lmv_free_memmd(lli->lli_lsm_md);
1693                 lli->lli_lsm_md = NULL;
1694         }
1695
1696         rc = ll_init_lsm_md(inode, md);
1697         up_write(&lli->lli_lsm_sem);
1698
1699         if (rc)
1700                 RETURN(rc);
1701
1702         /* set md->lmv to NULL, so the following free lustre_md will not free
1703          * this lsm.
1704          */
1705         md->lmv = NULL;
1706
1707         /* md_merge_attr() may take long, since lsm is already set, switch to
1708          * read lock.
1709          */
1710         down_read(&lli->lli_lsm_sem);
1711
1712         if (!lmv_dir_striped(lli->lli_lsm_md))
1713                 GOTO(unlock, rc = 0);
1714
1715         OBD_ALLOC_PTR(attr);
1716         if (!attr)
1717                 GOTO(unlock, rc = -ENOMEM);
1718
1719         /* validate the lsm */
1720         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1721                            ll_md_blocking_ast);
1722         if (!rc) {
1723                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1724                         md->body->mbo_nlink = attr->cat_nlink;
1725                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1726                         md->body->mbo_size = attr->cat_size;
1727                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1728                         md->body->mbo_atime = attr->cat_atime;
1729                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1730                         md->body->mbo_ctime = attr->cat_ctime;
1731                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1732                         md->body->mbo_mtime = attr->cat_mtime;
1733         }
1734
1735         OBD_FREE_PTR(attr);
1736         GOTO(unlock, rc);
1737 unlock:
1738         up_read(&lli->lli_lsm_sem);
1739
1740         return rc;
1741 }
1742
1743 void ll_clear_inode(struct inode *inode)
1744 {
1745         struct ll_inode_info *lli = ll_i2info(inode);
1746         struct ll_sb_info *sbi = ll_i2sbi(inode);
1747
1748         ENTRY;
1749
1750         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1751                PFID(ll_inode2fid(inode)), inode);
1752
1753         if (S_ISDIR(inode->i_mode)) {
1754                 /* these should have been cleared in ll_file_release */
1755                 LASSERT(lli->lli_opendir_key == NULL);
1756                 LASSERT(lli->lli_sai == NULL);
1757                 LASSERT(lli->lli_opendir_pid == 0);
1758         } else {
1759                 pcc_inode_free(inode);
1760         }
1761
1762         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1763
1764         LASSERT(!lli->lli_open_fd_write_count);
1765         LASSERT(!lli->lli_open_fd_read_count);
1766         LASSERT(!lli->lli_open_fd_exec_count);
1767
1768         if (lli->lli_mds_write_och)
1769                 ll_md_real_close(inode, FMODE_WRITE);
1770         if (lli->lli_mds_exec_och)
1771                 ll_md_real_close(inode, FMODE_EXEC);
1772         if (lli->lli_mds_read_och)
1773                 ll_md_real_close(inode, FMODE_READ);
1774
1775         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1776                 OBD_FREE(lli->lli_symlink_name,
1777                          strlen(lli->lli_symlink_name) + 1);
1778                 lli->lli_symlink_name = NULL;
1779         }
1780
1781         ll_xattr_cache_destroy(inode);
1782
1783         forget_all_cached_acls(inode);
1784         lli_clear_acl(lli);
1785         lli->lli_inode_magic = LLI_INODE_DEAD;
1786
1787         if (S_ISDIR(inode->i_mode))
1788                 ll_dir_clear_lsm_md(inode);
1789         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1790                 LASSERT(list_empty(&lli->lli_agl_list));
1791
1792         /*
1793          * XXX This has to be done before lsm is freed below, because
1794          * cl_object still uses inode lsm.
1795          */
1796         cl_inode_fini(inode);
1797
1798         llcrypt_put_encryption_info(inode);
1799
1800         EXIT;
1801 }
1802
1803 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1804 {
1805         struct lustre_md md;
1806         struct inode *inode = dentry->d_inode;
1807         struct ll_sb_info *sbi = ll_i2sbi(inode);
1808         struct ptlrpc_request *request = NULL;
1809         int rc, ia_valid;
1810
1811         ENTRY;
1812
1813         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1814                                      LUSTRE_OPC_ANY, NULL);
1815         if (IS_ERR(op_data))
1816                 RETURN(PTR_ERR(op_data));
1817
1818         /* If this is a chgrp of a regular file, we want to reserve enough
1819          * quota to cover the entire file size.
1820          */
1821         if (S_ISREG(inode->i_mode) && op_data->op_attr.ia_valid & ATTR_GID &&
1822             from_kgid(&init_user_ns, op_data->op_attr.ia_gid) !=
1823             from_kgid(&init_user_ns, inode->i_gid)) {
1824                 op_data->op_xvalid |= OP_XVALID_BLOCKS;
1825                 op_data->op_attr_blocks = inode->i_blocks;
1826         }
1827
1828
1829         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1830         if (rc) {
1831                 ptlrpc_req_finished(request);
1832                 if (rc == -ENOENT) {
1833                         clear_nlink(inode);
1834                         /* Unlinked special device node? Or just a race?
1835                          * Pretend we done everything. */
1836                         if (!S_ISREG(inode->i_mode) &&
1837                             !S_ISDIR(inode->i_mode)) {
1838                                 ia_valid = op_data->op_attr.ia_valid;
1839                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1840                                 rc = simple_setattr(dentry, &op_data->op_attr);
1841                                 op_data->op_attr.ia_valid = ia_valid;
1842                         }
1843                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1844                         CERROR("md_setattr fails: rc = %d\n", rc);
1845                 }
1846                 RETURN(rc);
1847         }
1848
1849         rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp,
1850                               sbi->ll_md_exp, &md);
1851         if (rc) {
1852                 ptlrpc_req_finished(request);
1853                 RETURN(rc);
1854         }
1855
1856         ia_valid = op_data->op_attr.ia_valid;
1857         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1858          * cache is not cleared yet. */
1859         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1860         if (S_ISREG(inode->i_mode))
1861                 inode_lock(inode);
1862         rc = simple_setattr(dentry, &op_data->op_attr);
1863         if (S_ISREG(inode->i_mode))
1864                 inode_unlock(inode);
1865         op_data->op_attr.ia_valid = ia_valid;
1866
1867         rc = ll_update_inode(inode, &md);
1868         ptlrpc_req_finished(request);
1869
1870         RETURN(rc);
1871 }
1872
1873 /**
1874  * Zero portion of page that is part of @inode.
1875  * This implies, if necessary:
1876  * - taking cl_lock on range corresponding to concerned page
1877  * - grabbing vm page
1878  * - associating cl_page
1879  * - proceeding to clio read
1880  * - zeroing range in page
1881  * - proceeding to cl_page flush
1882  * - releasing cl_lock
1883  *
1884  * \param[in] inode     inode
1885  * \param[in] index     page index
1886  * \param[in] offset    offset in page to start zero from
1887  * \param[in] len       len to zero
1888  *
1889  * \retval 0            on success
1890  * \retval negative     errno on failure
1891  */
1892 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1893                     unsigned len)
1894 {
1895         struct ll_inode_info *lli = ll_i2info(inode);
1896         struct cl_object *clob = lli->lli_clob;
1897         __u16 refcheck;
1898         struct lu_env *env = NULL;
1899         struct cl_io *io = NULL;
1900         struct cl_page *clpage = NULL;
1901         struct page *vmpage = NULL;
1902         unsigned from = index << PAGE_SHIFT;
1903         struct cl_lock *lock = NULL;
1904         struct cl_lock_descr *descr = NULL;
1905         struct cl_2queue *queue = NULL;
1906         struct cl_sync_io *anchor = NULL;
1907         bool holdinglock = false;
1908         bool lockedbymyself = true;
1909         int rc;
1910
1911         ENTRY;
1912
1913         env = cl_env_get(&refcheck);
1914         if (IS_ERR(env))
1915                 RETURN(PTR_ERR(env));
1916
1917         io = vvp_env_thread_io(env);
1918         io->ci_obj = clob;
1919         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1920         if (rc)
1921                 GOTO(putenv, rc);
1922
1923         lock = vvp_env_lock(env);
1924         descr = &lock->cll_descr;
1925         descr->cld_obj   = io->ci_obj;
1926         descr->cld_start = cl_index(io->ci_obj, from);
1927         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1928         descr->cld_mode  = CLM_WRITE;
1929         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1930
1931         /* request lock for page */
1932         rc = cl_lock_request(env, io, lock);
1933         /* -ECANCELED indicates a matching lock with a different extent
1934          * was already present, and -EEXIST indicates a matching lock
1935          * on exactly the same extent was already present.
1936          * In both cases it means we are covered.
1937          */
1938         if (rc == -ECANCELED || rc == -EEXIST)
1939                 rc = 0;
1940         else if (rc < 0)
1941                 GOTO(iofini, rc);
1942         else
1943                 holdinglock = true;
1944
1945         /* grab page */
1946         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1947         if (vmpage == NULL)
1948                 GOTO(rellock, rc = -EOPNOTSUPP);
1949
1950         if (!PageDirty(vmpage)) {
1951                 /* associate cl_page */
1952                 clpage = cl_page_find(env, clob, vmpage->index,
1953                                       vmpage, CPT_CACHEABLE);
1954                 if (IS_ERR(clpage))
1955                         GOTO(pagefini, rc = PTR_ERR(clpage));
1956
1957                 cl_page_assume(env, io, clpage);
1958         }
1959
1960         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1961             !PageWriteback(vmpage)) {
1962                 /* read page */
1963                 /* set PagePrivate2 to detect special case of empty page
1964                  * in osc_brw_fini_request()
1965                  */
1966                 SetPagePrivate2(vmpage);
1967                 rc = ll_io_read_page(env, io, clpage, NULL);
1968                 if (!PagePrivate2(vmpage))
1969                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1970                          * meaning we read an empty page. In this case, in order
1971                          * to avoid allocating unnecessary block in truncated
1972                          * file, we must not zero and write as below. Subsequent
1973                          * server-side truncate will handle things correctly.
1974                          */
1975                         GOTO(clpfini, rc = 0);
1976                 ClearPagePrivate2(vmpage);
1977                 if (rc)
1978                         GOTO(clpfini, rc);
1979                 lockedbymyself = trylock_page(vmpage);
1980                 cl_page_assume(env, io, clpage);
1981         }
1982
1983         /* zero range in page */
1984         zero_user(vmpage, offset, len);
1985
1986         if (holdinglock && clpage) {
1987                 /* explicitly write newly modified page */
1988                 queue = &io->ci_queue;
1989                 cl_2queue_init(queue);
1990                 anchor = &vvp_env_info(env)->vti_anchor;
1991                 cl_sync_io_init(anchor, 1);
1992                 clpage->cp_sync_io = anchor;
1993                 cl_2queue_add(queue, clpage, true);
1994                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1995                 if (rc)
1996                         GOTO(queuefini1, rc);
1997                 rc = cl_sync_io_wait(env, anchor, 0);
1998                 if (rc)
1999                         GOTO(queuefini2, rc);
2000                 cl_page_assume(env, io, clpage);
2001
2002 queuefini2:
2003                 cl_2queue_discard(env, io, queue);
2004 queuefini1:
2005                 cl_2queue_disown(env, io, queue);
2006                 cl_2queue_fini(env, queue);
2007         }
2008
2009 clpfini:
2010         if (clpage)
2011                 cl_page_put(env, clpage);
2012 pagefini:
2013         if (lockedbymyself) {
2014                 unlock_page(vmpage);
2015                 put_page(vmpage);
2016         }
2017 rellock:
2018         if (holdinglock)
2019                 cl_lock_release(env, lock);
2020 iofini:
2021         cl_io_fini(env, io);
2022 putenv:
2023         if (env)
2024                 cl_env_put(env, &refcheck);
2025
2026         RETURN(rc);
2027 }
2028
2029 /**
2030  * Get reference file from volatile file name.
2031  * Volatile file name may look like:
2032  * <parent>/LUSTRE_VOLATILE_HDR:<mdt_index>:<random>:fd=<fd>
2033  * where fd is opened descriptor of reference file.
2034  *
2035  * \param[in] volatile_name     volatile file name
2036  * \param[in] volatile_len      volatile file name length
2037  * \param[out] ref_file         pointer to struct file of reference file
2038  *
2039  * \retval 0            on success
2040  * \retval negative     errno on failure
2041  */
2042 int volatile_ref_file(const char *volatile_name, int volatile_len,
2043                       struct file **ref_file)
2044 {
2045         char *p, *q, *fd_str;
2046         int fd, rc;
2047
2048         p = strnstr(volatile_name, ":fd=", volatile_len);
2049         if (!p || strlen(p + 4) == 0)
2050                 return -EINVAL;
2051
2052         q = strchrnul(p + 4, ':');
2053         fd_str = kstrndup(p + 4, q - p - 4, GFP_NOFS);
2054         if (!fd_str)
2055                 return -ENOMEM;
2056         rc = kstrtouint(fd_str, 10, &fd);
2057         kfree(fd_str);
2058         if (rc)
2059                 return -EINVAL;
2060
2061         *ref_file = fget(fd);
2062         if (!(*ref_file))
2063                 return -EINVAL;
2064         return 0;
2065 }
2066
2067 /* If this inode has objects allocated to it (lsm != NULL), then the OST
2068  * object(s) determine the file size and mtime.  Otherwise, the MDS will
2069  * keep these values until such a time that objects are allocated for it.
2070  * We do the MDS operations first, as it is checking permissions for us.
2071  * We don't to the MDS RPC if there is nothing that we want to store there,
2072  * otherwise there is no harm in updating mtime/atime on the MDS if we are
2073  * going to do an RPC anyways.
2074  *
2075  * If we are doing a truncate, we will send the mtime and ctime updates
2076  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2077  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2078  * at the same time.
2079  *
2080  * In case of HSMimport, we only set attr on MDS.
2081  */
2082 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2083                    enum op_xvalid xvalid, bool hsm_import)
2084 {
2085         struct inode *inode = dentry->d_inode;
2086         struct ll_inode_info *lli = ll_i2info(inode);
2087         struct md_op_data *op_data = NULL;
2088         ktime_t kstart = ktime_get();
2089         int rc = 0;
2090
2091         ENTRY;
2092
2093         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2094                "valid %x, hsm_import %d\n",
2095                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2096                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2097                hsm_import);
2098
2099         if (attr->ia_valid & ATTR_SIZE) {
2100                 /* Check new size against VFS/VM file size limit and rlimit */
2101                 rc = inode_newsize_ok(inode, attr->ia_size);
2102                 if (rc)
2103                         RETURN(rc);
2104
2105                 /* The maximum Lustre file size is variable, based on the
2106                  * OST maximum object size and number of stripes.  This
2107                  * needs another check in addition to the VFS check above. */
2108                 if (attr->ia_size > ll_file_maxbytes(inode)) {
2109                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2110                                PFID(&lli->lli_fid), attr->ia_size,
2111                                ll_file_maxbytes(inode));
2112                         RETURN(-EFBIG);
2113                 }
2114
2115                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2116         }
2117
2118         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2119         if (attr->ia_valid & TIMES_SET_FLAGS) {
2120                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2121                     !capable(CAP_FOWNER))
2122                         RETURN(-EPERM);
2123         }
2124
2125         /* We mark all of the fields "set" so MDS/OST does not re-set them */
2126         if (!(xvalid & OP_XVALID_CTIME_SET) &&
2127              (attr->ia_valid & ATTR_CTIME)) {
2128                 attr->ia_ctime = current_time(inode);
2129                 xvalid |= OP_XVALID_CTIME_SET;
2130         }
2131         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2132             (attr->ia_valid & ATTR_ATIME)) {
2133                 attr->ia_atime = current_time(inode);
2134                 attr->ia_valid |= ATTR_ATIME_SET;
2135         }
2136         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2137             (attr->ia_valid & ATTR_MTIME)) {
2138                 attr->ia_mtime = current_time(inode);
2139                 attr->ia_valid |= ATTR_MTIME_SET;
2140         }
2141
2142         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2143                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2144                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2145                        ktime_get_real_seconds());
2146
2147         if (S_ISREG(inode->i_mode))
2148                 inode_unlock(inode);
2149
2150         /* We always do an MDS RPC, even if we're only changing the size;
2151          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2152
2153         OBD_ALLOC_PTR(op_data);
2154         if (op_data == NULL)
2155                 GOTO(out, rc = -ENOMEM);
2156
2157         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2158                 /* If we are changing file size, file content is
2159                  * modified, flag it.
2160                  */
2161                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2162                 op_data->op_bias |= MDS_DATA_MODIFIED;
2163                 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
2164         }
2165
2166         if (attr->ia_valid & ATTR_FILE) {
2167                 struct ll_file_data *fd = attr->ia_file->private_data;
2168
2169                 if (fd->fd_lease_och)
2170                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2171         }
2172
2173         op_data->op_attr = *attr;
2174         op_data->op_xvalid = xvalid;
2175
2176         rc = ll_md_setattr(dentry, op_data);
2177         if (rc)
2178                 GOTO(out, rc);
2179
2180         if (!S_ISREG(inode->i_mode) || hsm_import)
2181                 GOTO(out, rc = 0);
2182
2183         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2184                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2185             xvalid & OP_XVALID_CTIME_SET) {
2186                 bool cached = false;
2187
2188                 rc = pcc_inode_setattr(inode, attr, &cached);
2189                 if (cached) {
2190                         if (rc) {
2191                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2192                                        "rc = %d\n",
2193                                        ll_i2sbi(inode)->ll_fsname,
2194                                        PFID(&lli->lli_fid), rc);
2195                                 GOTO(out, rc);
2196                         }
2197                 } else {
2198                         unsigned int flags = 0;
2199
2200                         /* For truncate and utimes sending attributes to OSTs,
2201                          * setting mtime/atime to the past will be performed
2202                          * under PW [0:EOF] extent lock (new_size:EOF for
2203                          * truncate). It may seem excessive to send mtime/atime
2204                          * updates to OSTs when not setting times to past, but
2205                          * it is necessary due to possible time
2206                          * de-synchronization between MDT inode and OST objects
2207                          */
2208                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
2209                                 xvalid |= OP_XVALID_FLAGS;
2210                                 flags = LUSTRE_ENCRYPT_FL;
2211                                 /* Call to ll_io_zero_page is not necessary if
2212                                  * truncating on PAGE_SIZE boundary, because
2213                                  * whole pages will be wiped.
2214                                  * In case of Direct IO, all we need is to set
2215                                  * new size.
2216                                  */
2217                                 if (attr->ia_valid & ATTR_SIZE &&
2218                                     attr->ia_size & ~PAGE_MASK &&
2219                                     !(attr->ia_valid & ATTR_FILE &&
2220                                       attr->ia_file->f_flags & O_DIRECT)) {
2221                                         pgoff_t offset =
2222                                                 attr->ia_size & (PAGE_SIZE - 1);
2223
2224                                         rc = ll_io_zero_page(inode,
2225                                                     attr->ia_size >> PAGE_SHIFT,
2226                                                     offset, PAGE_SIZE - offset);
2227                                         if (rc)
2228                                                 GOTO(out, rc);
2229                                 }
2230                                 /* If encrypted volatile file without the key,
2231                                  * we need to fetch size from reference file,
2232                                  * and set it on OST objects. This happens when
2233                                  * migrating or extending an encrypted file
2234                                  * without the key.
2235                                  */
2236                                 if (filename_is_volatile(dentry->d_name.name,
2237                                                          dentry->d_name.len,
2238                                                          NULL) &&
2239                                     llcrypt_require_key(inode) == -ENOKEY) {
2240                                         struct file *ref_file;
2241                                         struct inode *ref_inode;
2242                                         struct ll_inode_info *ref_lli;
2243                                         struct cl_object *ref_obj;
2244                                         struct cl_attr ref_attr = { 0 };
2245                                         struct lu_env *env;
2246                                         __u16 refcheck;
2247
2248                                         rc = volatile_ref_file(
2249                                                 dentry->d_name.name,
2250                                                 dentry->d_name.len,
2251                                                 &ref_file);
2252                                         if (rc)
2253                                                 GOTO(out, rc);
2254
2255                                         ref_inode = file_inode(ref_file);
2256                                         if (!ref_inode) {
2257                                                 fput(ref_file);
2258                                                 GOTO(out, rc = -EINVAL);
2259                                         }
2260
2261                                         env = cl_env_get(&refcheck);
2262                                         if (IS_ERR(env))
2263                                                 GOTO(out, rc = PTR_ERR(env));
2264
2265                                         ref_lli = ll_i2info(ref_inode);
2266                                         ref_obj = ref_lli->lli_clob;
2267                                         cl_object_attr_lock(ref_obj);
2268                                         rc = cl_object_attr_get(env, ref_obj,
2269                                                                 &ref_attr);
2270                                         cl_object_attr_unlock(ref_obj);
2271                                         cl_env_put(env, &refcheck);
2272                                         fput(ref_file);
2273                                         if (rc)
2274                                                 GOTO(out, rc);
2275
2276                                         attr->ia_valid |= ATTR_SIZE;
2277                                         attr->ia_size = ref_attr.cat_size;
2278                                 }
2279                         }
2280                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2281                 }
2282         }
2283
2284         /* If the file was restored, it needs to set dirty flag.
2285          *
2286          * We've already sent MDS_DATA_MODIFIED flag in
2287          * ll_md_setattr() for truncate. However, the MDT refuses to
2288          * set the HS_DIRTY flag on released files, so we have to set
2289          * it again if the file has been restored. Please check how
2290          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2291          *
2292          * Please notice that if the file is not released, the previous
2293          * MDS_DATA_MODIFIED has taken effect and usually
2294          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2295          * This way we can save an RPC for common open + trunc
2296          * operation. */
2297         if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
2298                 struct hsm_state_set hss = {
2299                         .hss_valid = HSS_SETMASK,
2300                         .hss_setmask = HS_DIRTY,
2301                 };
2302                 int rc2;
2303
2304                 rc2 = ll_hsm_state_set(inode, &hss);
2305                 /* truncate and write can happen at the same time, so that
2306                  * the file can be set modified even though the file is not
2307                  * restored from released state, and ll_hsm_state_set() is
2308                  * not applicable for the file, and rc2 < 0 is normal in this
2309                  * case. */
2310                 if (rc2 < 0)
2311                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2312                                PFID(ll_inode2fid(inode)), rc2);
2313         }
2314
2315         EXIT;
2316 out:
2317         if (op_data != NULL)
2318                 ll_finish_md_op_data(op_data);
2319
2320         if (S_ISREG(inode->i_mode)) {
2321                 inode_lock(inode);
2322                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2323                         inode_dio_wait(inode);
2324                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2325                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2326                  * inode flags, so there is a gap where S_NOSEC is not set.
2327                  * This can cause a writer to take the i_mutex unnecessarily,
2328                  * but this is safe to do and should be rare. */
2329                 inode_has_no_xattr(inode);
2330         }
2331
2332         if (!rc)
2333                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2334                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2335                                    ktime_us_delta(ktime_get(), kstart));
2336
2337         RETURN(rc);
2338 }
2339
2340 int ll_setattr(struct dentry *de, struct iattr *attr)
2341 {
2342         int mode = de->d_inode->i_mode;
2343         enum op_xvalid xvalid = 0;
2344         int rc;
2345
2346         rc = llcrypt_prepare_setattr(de, attr);
2347         if (rc)
2348                 return rc;
2349
2350         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2351                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2352                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2353
2354         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2355                                (ATTR_SIZE|ATTR_MODE)) &&
2356             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2357              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2358               !(attr->ia_mode & S_ISGID))))
2359                 attr->ia_valid |= ATTR_FORCE;
2360
2361         if ((attr->ia_valid & ATTR_MODE) &&
2362             (mode & S_ISUID) &&
2363             !(attr->ia_mode & S_ISUID) &&
2364             !(attr->ia_valid & ATTR_KILL_SUID))
2365                 attr->ia_valid |= ATTR_KILL_SUID;
2366
2367         if ((attr->ia_valid & ATTR_MODE) &&
2368             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2369             !(attr->ia_mode & S_ISGID) &&
2370             !(attr->ia_valid & ATTR_KILL_SGID))
2371                 attr->ia_valid |= ATTR_KILL_SGID;
2372
2373         return ll_setattr_raw(de, attr, xvalid, false);
2374 }
2375
2376 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2377                        u32 flags)
2378 {
2379         struct obd_statfs obd_osfs = { 0 };
2380         time64_t max_age;
2381         int rc;
2382
2383         ENTRY;
2384         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2385
2386         if (test_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags))
2387                 flags |= OBD_STATFS_NODELAY;
2388
2389         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2390         if (rc)
2391                 RETURN(rc);
2392
2393         osfs->os_type = LL_SUPER_MAGIC;
2394
2395         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2396               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2397
2398         if (osfs->os_state & OS_STATFS_SUM)
2399                 GOTO(out, rc);
2400
2401         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2402         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2403                 GOTO(out, rc = 0);
2404
2405         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2406                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2407                obd_osfs.os_files);
2408
2409         osfs->os_bsize = obd_osfs.os_bsize;
2410         osfs->os_blocks = obd_osfs.os_blocks;
2411         osfs->os_bfree = obd_osfs.os_bfree;
2412         osfs->os_bavail = obd_osfs.os_bavail;
2413
2414         /* If we have _some_ OSTs, but don't have as many free objects on the
2415          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2416          * to compensate, so that the "inodes in use" number is correct.
2417          * This should be kept in sync with lod_statfs() behaviour.
2418          */
2419         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2420                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2421                                  obd_osfs.os_ffree;
2422                 osfs->os_ffree = obd_osfs.os_ffree;
2423         }
2424
2425 out:
2426         RETURN(rc);
2427 }
2428
2429 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2430 {
2431         struct if_quotactl qctl = {
2432                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2433                 .qc_type = PRJQUOTA,
2434                 .qc_valid = QC_GENERAL,
2435         };
2436         u64 limit, curblock;
2437         int ret;
2438
2439         qctl.qc_id = ll_i2info(inode)->lli_projid;
2440         ret = quotactl_ioctl(inode->i_sb, &qctl);
2441         if (ret) {
2442                 /* ignore errors if project ID does not have
2443                  * a quota limit or feature unsupported.
2444                  */
2445                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2446                         ret = 0;
2447                 return ret;
2448         }
2449
2450         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2451                  qctl.qc_dqblk.dqb_bsoftlimit :
2452                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2453         if (limit && sfs->f_blocks > limit) {
2454                 curblock = (qctl.qc_dqblk.dqb_curspace +
2455                                 sfs->f_bsize - 1) / sfs->f_bsize;
2456                 sfs->f_blocks = limit;
2457                 sfs->f_bfree = sfs->f_bavail =
2458                         (sfs->f_blocks > curblock) ?
2459                         (sfs->f_blocks - curblock) : 0;
2460         }
2461
2462         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2463                 qctl.qc_dqblk.dqb_isoftlimit :
2464                 qctl.qc_dqblk.dqb_ihardlimit;
2465         if (limit && sfs->f_files > limit) {
2466                 sfs->f_files = limit;
2467                 sfs->f_ffree = (sfs->f_files >
2468                         qctl.qc_dqblk.dqb_curinodes) ?
2469                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2470         }
2471
2472         return 0;
2473 }
2474
2475 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2476 {
2477         struct super_block *sb = de->d_sb;
2478         struct obd_statfs osfs;
2479         __u64 fsid = huge_encode_dev(sb->s_dev);
2480         ktime_t kstart = ktime_get();
2481         int rc;
2482
2483         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2484
2485         /* Some amount of caching on the client is allowed */
2486         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2487         if (rc)
2488                 return rc;
2489
2490         statfs_unpack(sfs, &osfs);
2491
2492         /* We need to downshift for all 32-bit kernels, because we can't
2493          * tell if the kernel is being called via sys_statfs64() or not.
2494          * Stop before overflowing f_bsize - in which case it is better
2495          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2496         if (sizeof(long) < 8) {
2497                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2498                         sfs->f_bsize <<= 1;
2499
2500                         osfs.os_blocks >>= 1;
2501                         osfs.os_bfree >>= 1;
2502                         osfs.os_bavail >>= 1;
2503                 }
2504         }
2505
2506         sfs->f_blocks = osfs.os_blocks;
2507         sfs->f_bfree = osfs.os_bfree;
2508         sfs->f_bavail = osfs.os_bavail;
2509         sfs->f_fsid.val[0] = (__u32)fsid;
2510         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2511         if (ll_i2info(de->d_inode)->lli_projid)
2512                 return ll_statfs_project(de->d_inode, sfs);
2513
2514         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2515                            ktime_us_delta(ktime_get(), kstart));
2516
2517         return 0;
2518 }
2519
2520 void ll_inode_size_lock(struct inode *inode)
2521 {
2522         struct ll_inode_info *lli;
2523
2524         LASSERT(!S_ISDIR(inode->i_mode));
2525
2526         lli = ll_i2info(inode);
2527         mutex_lock(&lli->lli_size_mutex);
2528 }
2529
2530 void ll_inode_size_unlock(struct inode *inode)
2531 {
2532         struct ll_inode_info *lli;
2533
2534         lli = ll_i2info(inode);
2535         mutex_unlock(&lli->lli_size_mutex);
2536 }
2537
2538 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
2539 {
2540         /* do not clear encryption flag */
2541         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2542         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2543         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2544                 set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2545         else
2546                 clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2547 }
2548
2549 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2550 {
2551         struct ll_inode_info *lli = ll_i2info(inode);
2552         struct mdt_body *body = md->body;
2553         struct ll_sb_info *sbi = ll_i2sbi(inode);
2554         bool api32;
2555         int rc = 0;
2556
2557         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2558                 rc = cl_file_inode_init(inode, md);
2559                 if (rc)
2560                         return rc;
2561         }
2562
2563         if (S_ISDIR(inode->i_mode)) {
2564                 rc = ll_update_lsm_md(inode, md);
2565                 if (rc != 0)
2566                         return rc;
2567         }
2568
2569         if (body->mbo_valid & OBD_MD_FLACL)
2570                 lli_replace_acl(lli, md);
2571
2572         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
2573         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1, api32);
2574         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2575
2576         if (body->mbo_valid & OBD_MD_FLATIME) {
2577                 if (body->mbo_atime > inode->i_atime.tv_sec)
2578                         inode->i_atime.tv_sec = body->mbo_atime;
2579                 lli->lli_atime = body->mbo_atime;
2580         }
2581
2582         if (body->mbo_valid & OBD_MD_FLMTIME) {
2583                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2584                         CDEBUG(D_INODE,
2585                                "setting ino %lu mtime from %lld to %llu\n",
2586                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2587                                body->mbo_mtime);
2588                         inode->i_mtime.tv_sec = body->mbo_mtime;
2589                 }
2590                 lli->lli_mtime = body->mbo_mtime;
2591         }
2592
2593         if (body->mbo_valid & OBD_MD_FLCTIME) {
2594                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2595                         inode->i_ctime.tv_sec = body->mbo_ctime;
2596                 lli->lli_ctime = body->mbo_ctime;
2597         }
2598
2599         if (body->mbo_valid & OBD_MD_FLBTIME)
2600                 lli->lli_btime = body->mbo_btime;
2601
2602         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2603         if (body->mbo_valid & OBD_MD_FLFLAGS)
2604                 ll_update_inode_flags(inode, body->mbo_flags);
2605         if (body->mbo_valid & OBD_MD_FLMODE)
2606                 inode->i_mode = (inode->i_mode & S_IFMT) |
2607                                 (body->mbo_mode & ~S_IFMT);
2608
2609         if (body->mbo_valid & OBD_MD_FLTYPE)
2610                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2611                                 (body->mbo_mode & S_IFMT);
2612
2613         LASSERT(inode->i_mode != 0);
2614         if (body->mbo_valid & OBD_MD_FLUID)
2615                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2616         if (body->mbo_valid & OBD_MD_FLGID)
2617                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2618         if (body->mbo_valid & OBD_MD_FLPROJID)
2619                 lli->lli_projid = body->mbo_projid;
2620         if (body->mbo_valid & OBD_MD_FLNLINK) {
2621                 spin_lock(&inode->i_lock);
2622                 set_nlink(inode, body->mbo_nlink);
2623                 spin_unlock(&inode->i_lock);
2624         }
2625         if (body->mbo_valid & OBD_MD_FLRDEV)
2626                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2627
2628         if (body->mbo_valid & OBD_MD_FLID) {
2629                 /* FID shouldn't be changed! */
2630                 if (fid_is_sane(&lli->lli_fid)) {
2631                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2632                                  "Trying to change FID "DFID
2633                                  " to the "DFID", inode "DFID"(%p)\n",
2634                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2635                                  PFID(ll_inode2fid(inode)), inode);
2636                 } else {
2637                         lli->lli_fid = body->mbo_fid1;
2638                 }
2639         }
2640
2641         LASSERT(fid_seq(&lli->lli_fid) != 0);
2642
2643         /* In case of encrypted file without the key, please do not lose
2644          * clear text size stored into lli_lazysize in ll_merge_attr(),
2645          * we will need it in ll_prepare_close().
2646          */
2647         if (lli->lli_attr_valid & OBD_MD_FLLAZYSIZE && lli->lli_lazysize &&
2648             llcrypt_require_key(inode) == -ENOKEY)
2649                 lli->lli_attr_valid = body->mbo_valid | OBD_MD_FLLAZYSIZE;
2650         else
2651                 lli->lli_attr_valid = body->mbo_valid;
2652         if (body->mbo_valid & OBD_MD_FLSIZE) {
2653                 i_size_write(inode, body->mbo_size);
2654
2655                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2656                        PFID(ll_inode2fid(inode)),
2657                        (unsigned long long)body->mbo_size);
2658
2659                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2660                         inode->i_blocks = body->mbo_blocks;
2661         } else {
2662                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2663                         lli->lli_lazysize = body->mbo_size;
2664                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2665                         lli->lli_lazyblocks = body->mbo_blocks;
2666         }
2667
2668         if (body->mbo_valid & OBD_MD_TSTATE) {
2669                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2670                  * clear it when done to ensure to start again
2671                  * glimpsing updated attrs
2672                  */
2673                 if (body->mbo_t_state & MS_RESTORE)
2674                         set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2675                 else
2676                         clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2677         }
2678
2679         return 0;
2680 }
2681
2682 /* update directory depth to ROOT, called after LOOKUP lock is fetched. */
2683 void ll_update_dir_depth(struct inode *dir, struct inode *inode)
2684 {
2685         struct ll_inode_info *lli;
2686
2687         if (!S_ISDIR(inode->i_mode))
2688                 return;
2689
2690         if (inode == dir)
2691                 return;
2692
2693         lli = ll_i2info(inode);
2694         lli->lli_depth = ll_i2info(dir)->lli_depth + 1;
2695         CDEBUG(D_INODE, DFID" depth %hu\n", PFID(&lli->lli_fid), lli->lli_depth);
2696 }
2697
2698 void ll_truncate_inode_pages_final(struct inode *inode)
2699 {
2700         struct address_space *mapping = &inode->i_data;
2701         unsigned long nrpages;
2702         unsigned long flags;
2703
2704         truncate_inode_pages_final(mapping);
2705
2706         /* Workaround for LU-118: Note nrpages may not be totally updated when
2707          * truncate_inode_pages() returns, as there can be a page in the process
2708          * of deletion (inside __delete_from_page_cache()) in the specified
2709          * range. Thus mapping->nrpages can be non-zero when this function
2710          * returns even after truncation of the whole mapping.  Only do this if
2711          * npages isn't already zero.
2712          */
2713         nrpages = mapping->nrpages;
2714         if (nrpages) {
2715                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2716                 nrpages = mapping->nrpages;
2717                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2718         } /* Workaround end */
2719
2720         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2721                  "see https://jira.whamcloud.com/browse/LU-118\n",
2722                  ll_i2sbi(inode)->ll_fsname,
2723                  PFID(ll_inode2fid(inode)), inode, nrpages);
2724 }
2725
2726 int ll_read_inode2(struct inode *inode, void *opaque)
2727 {
2728         struct lustre_md *md = opaque;
2729         struct ll_inode_info *lli = ll_i2info(inode);
2730         int     rc;
2731         ENTRY;
2732
2733         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2734                PFID(&lli->lli_fid), inode);
2735
2736         /* Core attributes from the MDS first.  This is a new inode, and
2737          * the VFS doesn't zero times in the core inode so we have to do
2738          * it ourselves.  They will be overwritten by either MDS or OST
2739          * attributes - we just need to make sure they aren't newer.
2740          */
2741         inode->i_mtime.tv_sec = 0;
2742         inode->i_atime.tv_sec = 0;
2743         inode->i_ctime.tv_sec = 0;
2744         inode->i_rdev = 0;
2745         rc = ll_update_inode(inode, md);
2746         if (rc != 0)
2747                 RETURN(rc);
2748
2749         /* OIDEBUG(inode); */
2750
2751 #ifdef HAVE_BACKING_DEV_INFO
2752         /* initializing backing dev info. */
2753         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2754 #endif
2755         if (S_ISREG(inode->i_mode)) {
2756                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2757                 inode->i_op = &ll_file_inode_operations;
2758                 inode->i_fop = sbi->ll_fop;
2759                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2760                 EXIT;
2761         } else if (S_ISDIR(inode->i_mode)) {
2762                 inode->i_op = &ll_dir_inode_operations;
2763                 inode->i_fop = &ll_dir_operations;
2764                 EXIT;
2765         } else if (S_ISLNK(inode->i_mode)) {
2766                 inode->i_op = &ll_fast_symlink_inode_operations;
2767                 EXIT;
2768         } else {
2769                 inode->i_op = &ll_special_inode_operations;
2770
2771                 init_special_inode(inode, inode->i_mode,
2772                                    inode->i_rdev);
2773
2774                 EXIT;
2775         }
2776
2777         return 0;
2778 }
2779
2780 void ll_delete_inode(struct inode *inode)
2781 {
2782         struct ll_inode_info *lli = ll_i2info(inode);
2783         ENTRY;
2784
2785         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2786                 /* It is last chance to write out dirty pages,
2787                  * otherwise we may lose data while umount.
2788                  *
2789                  * If i_nlink is 0 then just discard data. This is safe because
2790                  * local inode gets i_nlink 0 from server only for the last
2791                  * unlink, so that file is not opened somewhere else
2792                  */
2793                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2794                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2795         }
2796
2797         ll_truncate_inode_pages_final(inode);
2798         ll_clear_inode(inode);
2799         clear_inode(inode);
2800
2801         EXIT;
2802 }
2803
2804 int ll_iocontrol(struct inode *inode, struct file *file,
2805                  unsigned int cmd, unsigned long arg)
2806 {
2807         struct ll_sb_info *sbi = ll_i2sbi(inode);
2808         struct ptlrpc_request *req = NULL;
2809         int rc, flags = 0;
2810         ENTRY;
2811
2812         switch (cmd) {
2813         case FS_IOC_GETFLAGS: {
2814                 struct mdt_body *body;
2815                 struct md_op_data *op_data;
2816
2817                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2818                                              0, 0, LUSTRE_OPC_ANY,
2819                                              NULL);
2820                 if (IS_ERR(op_data))
2821                         RETURN(PTR_ERR(op_data));
2822
2823                 op_data->op_valid = OBD_MD_FLFLAGS;
2824                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2825                 ll_finish_md_op_data(op_data);
2826                 if (rc) {
2827                         CERROR("%s: failure inode "DFID": rc = %d\n",
2828                                sbi->ll_md_exp->exp_obd->obd_name,
2829                                PFID(ll_inode2fid(inode)), rc);
2830                         RETURN(-abs(rc));
2831                 }
2832
2833                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2834
2835                 flags = body->mbo_flags;
2836
2837                 ptlrpc_req_finished(req);
2838
2839                 RETURN(put_user(flags, (int __user *)arg));
2840         }
2841         case FS_IOC_SETFLAGS: {
2842                 struct iattr *attr;
2843                 struct md_op_data *op_data;
2844                 struct cl_object *obj;
2845                 struct fsxattr fa = { 0 };
2846
2847                 if (get_user(flags, (int __user *)arg))
2848                         RETURN(-EFAULT);
2849
2850                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2851                 if (flags & LUSTRE_PROJINHERIT_FL)
2852                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2853
2854                 rc = ll_ioctl_check_project(inode, fa.fsx_xflags,
2855                                             fa.fsx_projid);
2856                 if (rc)
2857                         RETURN(rc);
2858
2859                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2860                                              LUSTRE_OPC_ANY, NULL);
2861                 if (IS_ERR(op_data))
2862                         RETURN(PTR_ERR(op_data));
2863
2864                 op_data->op_attr_flags = flags;
2865                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2866                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2867                 ll_finish_md_op_data(op_data);
2868                 ptlrpc_req_finished(req);
2869                 if (rc)
2870                         RETURN(rc);
2871
2872                 ll_update_inode_flags(inode, flags);
2873
2874                 obj = ll_i2info(inode)->lli_clob;
2875                 if (obj == NULL)
2876                         RETURN(0);
2877
2878                 OBD_ALLOC_PTR(attr);
2879                 if (attr == NULL)
2880                         RETURN(-ENOMEM);
2881
2882                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2883
2884                 OBD_FREE_PTR(attr);
2885                 RETURN(rc);
2886         }
2887         default:
2888                 RETURN(-ENOSYS);
2889         }
2890
2891         RETURN(0);
2892 }
2893
2894 int ll_flush_ctx(struct inode *inode)
2895 {
2896         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2897
2898         CDEBUG(D_SEC, "flush context for user %d\n",
2899                from_kuid(&init_user_ns, current_uid()));
2900
2901         obd_set_info_async(NULL, sbi->ll_md_exp,
2902                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2903                            0, NULL, NULL);
2904         obd_set_info_async(NULL, sbi->ll_dt_exp,
2905                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2906                            0, NULL, NULL);
2907         return 0;
2908 }
2909
2910 /* umount -f client means force down, don't save state */
2911 void ll_umount_begin(struct super_block *sb)
2912 {
2913         struct ll_sb_info *sbi = ll_s2sbi(sb);
2914         struct obd_device *obd;
2915         struct obd_ioctl_data *ioc_data;
2916         int cnt;
2917         ENTRY;
2918
2919         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2920                sb->s_count, atomic_read(&sb->s_active));
2921
2922         obd = class_exp2obd(sbi->ll_md_exp);
2923         if (obd == NULL) {
2924                 CERROR("Invalid MDC connection handle %#llx\n",
2925                        sbi->ll_md_exp->exp_handle.h_cookie);
2926                 EXIT;
2927                 return;
2928         }
2929         obd->obd_force = 1;
2930
2931         obd = class_exp2obd(sbi->ll_dt_exp);
2932         if (obd == NULL) {
2933                 CERROR("Invalid LOV connection handle %#llx\n",
2934                        sbi->ll_dt_exp->exp_handle.h_cookie);
2935                 EXIT;
2936                 return;
2937         }
2938         obd->obd_force = 1;
2939
2940         OBD_ALLOC_PTR(ioc_data);
2941         if (ioc_data) {
2942                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2943                               sizeof *ioc_data, ioc_data, NULL);
2944
2945                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2946                               sizeof *ioc_data, ioc_data, NULL);
2947
2948                 OBD_FREE_PTR(ioc_data);
2949         }
2950
2951         /* Really, we'd like to wait until there are no requests outstanding,
2952          * and then continue.  For now, we just periodically checking for vfs
2953          * to decrement mnt_cnt and hope to finish it within 10sec.
2954          */
2955         cnt = 10;
2956         while (cnt > 0 &&
2957                !may_umount(sbi->ll_mnt.mnt)) {
2958                 ssleep(1);
2959                 cnt -= 1;
2960         }
2961
2962         EXIT;
2963 }
2964
2965 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2966 {
2967         struct ll_sb_info *sbi = ll_s2sbi(sb);
2968         char *profilenm = get_profile_name(sb);
2969         int err;
2970         __u32 read_only;
2971
2972         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2973                 read_only = *flags & MS_RDONLY;
2974                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2975                                          sizeof(KEY_READ_ONLY),
2976                                          KEY_READ_ONLY, sizeof(read_only),
2977                                          &read_only, NULL);
2978                 if (err) {
2979                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2980                                       profilenm, read_only ?
2981                                       "read-only" : "read-write", err);
2982                         return err;
2983                 }
2984
2985                 if (read_only)
2986                         sb->s_flags |= SB_RDONLY;
2987                 else
2988                         sb->s_flags &= ~SB_RDONLY;
2989
2990                 if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
2991                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2992                                       read_only ?  "read-only" : "read-write");
2993         }
2994         return 0;
2995 }
2996
2997 /**
2998  * Cleanup the open handle that is cached on MDT-side.
2999  *
3000  * For open case, the client side open handling thread may hit error
3001  * after the MDT grant the open. Under such case, the client should
3002  * send close RPC to the MDT as cleanup; otherwise, the open handle
3003  * on the MDT will be leaked there until the client umount or evicted.
3004  *
3005  * In further, if someone unlinked the file, because the open handle
3006  * holds the reference on such file/object, then it will block the
3007  * subsequent threads that want to locate such object via FID.
3008  *
3009  * \param[in] sb        super block for this file-system
3010  * \param[in] open_req  pointer to the original open request
3011  */
3012 void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill)
3013 {
3014         struct mdt_body                 *body;
3015         struct md_op_data               *op_data;
3016         struct ptlrpc_request           *close_req = NULL;
3017         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
3018         ENTRY;
3019
3020         body = req_capsule_server_get(pill, &RMF_MDT_BODY);
3021         OBD_ALLOC_PTR(op_data);
3022         if (op_data == NULL) {
3023                 CWARN("%s: cannot allocate op_data to release open handle for "
3024                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
3025
3026                 RETURN_EXIT;
3027         }
3028
3029         op_data->op_fid1 = body->mbo_fid1;
3030         op_data->op_open_handle = body->mbo_open_handle;
3031         op_data->op_mod_time = ktime_get_real_seconds();
3032         md_close(exp, op_data, NULL, &close_req);
3033         ptlrpc_req_finished(close_req);
3034         ll_finish_md_op_data(op_data);
3035
3036         EXIT;
3037 }
3038
3039 int ll_prep_inode(struct inode **inode, struct req_capsule *pill,
3040                   struct super_block *sb, struct lookup_intent *it)
3041 {
3042         struct ll_sb_info *sbi = NULL;
3043         struct lustre_md md = { NULL };
3044         bool default_lmv_deleted = false;
3045         int rc;
3046
3047         ENTRY;
3048
3049         LASSERT(*inode || sb);
3050         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
3051         rc = md_get_lustre_md(sbi->ll_md_exp, pill, sbi->ll_dt_exp,
3052                               sbi->ll_md_exp, &md);
3053         if (rc != 0)
3054                 GOTO(out, rc);
3055
3056         /*
3057          * clear default_lmv only if intent_getattr reply doesn't contain it.
3058          * but it needs to be done after iget, check this early because
3059          * ll_update_lsm_md() may change md.
3060          */
3061         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
3062             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
3063                 default_lmv_deleted = true;
3064
3065         if (*inode) {
3066                 rc = ll_update_inode(*inode, &md);
3067                 if (rc != 0)
3068                         GOTO(out, rc);
3069         } else {
3070                 bool api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
3071                 struct lu_fid *fid1 = &md.body->mbo_fid1;
3072
3073                 LASSERT(sb != NULL);
3074
3075                 /*
3076                  * At this point server returns to client's same fid as client
3077                  * generated for creating. So using ->fid1 is okay here.
3078                  */
3079                 if (!fid_is_sane(fid1)) {
3080                         CERROR("%s: Fid is insane "DFID"\n",
3081                                 sbi->ll_fsname, PFID(fid1));
3082                         GOTO(out, rc = -EINVAL);
3083                 }
3084
3085                 *inode = ll_iget(sb, cl_fid_build_ino(fid1, api32), &md);
3086                 if (IS_ERR(*inode)) {
3087                         lmd_clear_acl(&md);
3088                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
3089                         *inode = NULL;
3090                         CERROR("new_inode -fatal: rc %d\n", rc);
3091                         GOTO(out, rc);
3092                 }
3093         }
3094
3095         /* Handling piggyback layout lock.
3096          * Layout lock can be piggybacked by getattr and open request.
3097          * The lsm can be applied to inode only if it comes with a layout lock
3098          * otherwise correct layout may be overwritten, for example:
3099          * 1. proc1: mdt returns a lsm but not granting layout
3100          * 2. layout was changed by another client
3101          * 3. proc2: refresh layout and layout lock granted
3102          * 4. proc1: to apply a stale layout */
3103         if (it != NULL && it->it_lock_mode != 0) {
3104                 struct lustre_handle lockh;
3105                 struct ldlm_lock *lock;
3106
3107                 lockh.cookie = it->it_lock_handle;
3108                 lock = ldlm_handle2lock(&lockh);
3109                 LASSERT(lock != NULL);
3110                 if (ldlm_has_layout(lock)) {
3111                         struct cl_object_conf conf;
3112
3113                         memset(&conf, 0, sizeof(conf));
3114                         conf.coc_opc = OBJECT_CONF_SET;
3115                         conf.coc_inode = *inode;
3116                         conf.coc_lock = lock;
3117                         conf.u.coc_layout = md.layout;
3118                         (void)ll_layout_conf(*inode, &conf);
3119                 }
3120                 LDLM_LOCK_PUT(lock);
3121         }
3122
3123         if (default_lmv_deleted)
3124                 ll_update_default_lsm_md(*inode, &md);
3125
3126         /* we may want to apply some policy for foreign file/dir */
3127         if (ll_sbi_has_foreign_symlink(sbi)) {
3128                 rc = ll_manage_foreign(*inode, &md);
3129                 if (rc < 0)
3130                         GOTO(out, rc);
3131         }
3132
3133         GOTO(out, rc = 0);
3134
3135 out:
3136         /* cleanup will be done if necessary */
3137         md_free_lustre_md(sbi->ll_md_exp, &md);
3138
3139         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
3140                 ll_intent_drop_lock(it);
3141                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, pill);
3142         }
3143
3144         return rc;
3145 }
3146
3147 int ll_obd_statfs(struct inode *inode, void __user *arg)
3148 {
3149         struct ll_sb_info *sbi = NULL;
3150         struct obd_export *exp;
3151         struct obd_ioctl_data *data = NULL;
3152         __u32 type;
3153         int len = 0, rc;
3154
3155         if (inode)
3156                 sbi = ll_i2sbi(inode);
3157         if (!sbi)
3158                 GOTO(out_statfs, rc = -EINVAL);
3159
3160         rc = obd_ioctl_getdata(&data, &len, arg);
3161         if (rc)
3162                 GOTO(out_statfs, rc);
3163
3164         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3165             !data->ioc_pbuf1 || !data->ioc_pbuf2)
3166                 GOTO(out_statfs, rc = -EINVAL);
3167
3168         if (data->ioc_inllen1 != sizeof(__u32) ||
3169             data->ioc_inllen2 != sizeof(__u32) ||
3170             data->ioc_plen1 != sizeof(struct obd_statfs) ||
3171             data->ioc_plen2 != sizeof(struct obd_uuid))
3172                 GOTO(out_statfs, rc = -EINVAL);
3173
3174         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3175         if (type & LL_STATFS_LMV)
3176                 exp = sbi->ll_md_exp;
3177         else if (type & LL_STATFS_LOV)
3178                 exp = sbi->ll_dt_exp;
3179         else
3180                 GOTO(out_statfs, rc = -ENODEV);
3181
3182         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3183         if (rc)
3184                 GOTO(out_statfs, rc);
3185 out_statfs:
3186         OBD_FREE_LARGE(data, len);
3187         return rc;
3188 }
3189
3190 /*
3191  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3192  * be called early to avoid deadlock.
3193  */
3194 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3195 {
3196         if (op_data->op_mea2_sem) {
3197                 up_read_non_owner(op_data->op_mea2_sem);
3198                 op_data->op_mea2_sem = NULL;
3199         }
3200
3201         if (op_data->op_mea1_sem) {
3202                 up_read_non_owner(op_data->op_mea1_sem);
3203                 op_data->op_mea1_sem = NULL;
3204         }
3205 }
3206
3207 /* this function prepares md_op_data hint for passing it down to MD stack. */
3208 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3209                                       struct inode *i1, struct inode *i2,
3210                                       const char *name, size_t namelen,
3211                                       __u32 mode, enum md_op_code opc,
3212                                       void *data)
3213 {
3214         struct llcrypt_name fname = { 0 };
3215         int rc;
3216
3217         LASSERT(i1 != NULL);
3218
3219         if (name == NULL) {
3220                 /* Do not reuse namelen for something else. */
3221                 if (namelen != 0)
3222                         return ERR_PTR(-EINVAL);
3223         } else {
3224                 if ((!IS_ENCRYPTED(i1) ||
3225                      (opc != LUSTRE_OPC_LOOKUP && opc != LUSTRE_OPC_CREATE)) &&
3226                     namelen > ll_i2sbi(i1)->ll_namelen)
3227                         return ERR_PTR(-ENAMETOOLONG);
3228
3229                 /* "/" is not valid name, but it's allowed */
3230                 if (!lu_name_is_valid_2(name, namelen) &&
3231                     strncmp("/", name, namelen) != 0)
3232                         return ERR_PTR(-EINVAL);
3233         }
3234
3235         if (op_data == NULL)
3236                 OBD_ALLOC_PTR(op_data);
3237
3238         if (op_data == NULL)
3239                 return ERR_PTR(-ENOMEM);
3240
3241         ll_i2gids(op_data->op_suppgids, i1, i2);
3242         op_data->op_fid1 = *ll_inode2fid(i1);
3243
3244         if (S_ISDIR(i1->i_mode)) {
3245                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3246                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3247                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3248                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3249         }
3250
3251         if (i2) {
3252                 op_data->op_fid2 = *ll_inode2fid(i2);
3253                 if (S_ISDIR(i2->i_mode)) {
3254                         if (i2 != i1) {
3255                                 /* i2 is typically a child of i1, and MUST be
3256                                  * further from the root to avoid deadlocks.
3257                                  */
3258                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3259                                 op_data->op_mea2_sem =
3260                                                 &ll_i2info(i2)->lli_lsm_sem;
3261                         }
3262                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3263                 }
3264         } else {
3265                 fid_zero(&op_data->op_fid2);
3266         }
3267
3268         if (test_bit(LL_SBI_64BIT_HASH, ll_i2sbi(i1)->ll_flags))
3269                 op_data->op_cli_flags |= CLI_HASH64;
3270
3271         if (ll_need_32bit_api(ll_i2sbi(i1)))
3272                 op_data->op_cli_flags |= CLI_API32;
3273
3274         if (opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_CREATE) {
3275                 /* In case of lookup, ll_setup_filename() has already been
3276                  * called in ll_lookup_it(), so just take provided name.
3277                  */
3278                 fname.disk_name.name = (unsigned char *)name;
3279                 fname.disk_name.len = namelen;
3280         } else if (name && namelen) {
3281                 struct qstr dname = QSTR_INIT(name, namelen);
3282                 struct inode *dir;
3283                 struct lu_fid *pfid = NULL;
3284                 struct lu_fid fid;
3285                 int lookup;
3286
3287                 if (!S_ISDIR(i1->i_mode) && i2 && S_ISDIR(i2->i_mode)) {
3288                         /* special case when called from ll_link() */
3289                         dir = i2;
3290                         lookup = 0;
3291                 } else {
3292                         dir = i1;
3293                         lookup = (int)(opc == LUSTRE_OPC_ANY);
3294                 }
3295                 if (opc == LUSTRE_OPC_ANY && lookup)
3296                         pfid = &fid;
3297                 rc = ll_setup_filename(dir, &dname, lookup, &fname, pfid);
3298                 if (rc) {
3299                         ll_finish_md_op_data(op_data);
3300                         return ERR_PTR(rc);
3301                 }
3302                 if (pfid && !fid_is_zero(pfid)) {
3303                         if (i2 == NULL)
3304                                 op_data->op_fid2 = fid;
3305                         op_data->op_bias = MDS_FID_OP;
3306                 }
3307                 if (fname.disk_name.name &&
3308                     fname.disk_name.name != (unsigned char *)name)
3309                         /* op_data->op_name must be freed after use */
3310                         op_data->op_flags |= MF_OPNAME_KMALLOCED;
3311         }
3312
3313         /* In fact LUSTRE_OPC_LOOKUP, LUSTRE_OPC_OPEN
3314          * are LUSTRE_OPC_ANY
3315          */
3316         if (opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_OPEN)
3317                 op_data->op_code = LUSTRE_OPC_ANY;
3318         else
3319                 op_data->op_code = opc;
3320         op_data->op_name = fname.disk_name.name;
3321         op_data->op_namelen = fname.disk_name.len;
3322         op_data->op_mode = mode;
3323         op_data->op_mod_time = ktime_get_real_seconds();
3324         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3325         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3326         op_data->op_cap = current_cap();
3327         op_data->op_mds = 0;
3328         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3329              filename_is_volatile(name, namelen, &op_data->op_mds)) {
3330                 op_data->op_bias |= MDS_CREATE_VOLATILE;
3331         }
3332         op_data->op_data = data;
3333
3334         return op_data;
3335 }
3336
3337 void ll_finish_md_op_data(struct md_op_data *op_data)
3338 {
3339         ll_unlock_md_op_lsm(op_data);
3340         ll_security_release_secctx(op_data->op_file_secctx,
3341                                    op_data->op_file_secctx_size);
3342         if (op_data->op_flags & MF_OPNAME_KMALLOCED)
3343                 /* allocated via ll_setup_filename called
3344                  * from ll_prep_md_op_data
3345                  */
3346                 kfree(op_data->op_name);
3347         llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3348         OBD_FREE_PTR(op_data);
3349 }
3350
3351 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3352 {
3353         struct ll_sb_info *sbi;
3354         int i;
3355
3356         LASSERT(seq && dentry);
3357         sbi = ll_s2sbi(dentry->d_sb);
3358
3359         if (test_bit(LL_SBI_NOLCK, sbi->ll_flags))
3360                 seq_puts(seq, "nolock");
3361
3362         for (i = 1; ll_sbi_flags_name[i].token != LL_SBI_NUM_MOUNT_OPT; i++) {
3363                 /* match_table in some cases has patterns for both enabled and
3364                  * disabled cases. Ignore 'no'xxx versions if bit is set.
3365                  */
3366                 if (test_bit(ll_sbi_flags_name[i].token, sbi->ll_flags) &&
3367                     strncmp(ll_sbi_flags_name[i].pattern, "no", 2)) {
3368                         if (ll_sbi_flags_name[i].token ==
3369                             LL_SBI_FOREIGN_SYMLINK) {
3370                                 seq_show_option(seq, "foreign_symlink",
3371                                                 sbi->ll_foreign_symlink_prefix);
3372                         } else {
3373                                 seq_printf(seq, ",%s",
3374                                            ll_sbi_flags_name[i].pattern);
3375                         }
3376
3377                         /* You can have either localflock or flock but not
3378                          * both. If localflock is set don't print flock or
3379                          * noflock.
3380                          */
3381                         if (ll_sbi_flags_name[i].token == LL_SBI_LOCALFLOCK)
3382                                 i += 2;
3383                 } else if (!test_bit(ll_sbi_flags_name[i].token, sbi->ll_flags) &&
3384                            !strncmp(ll_sbi_flags_name[i].pattern, "no", 2)) {
3385                         seq_printf(seq, ",%s",
3386                                    ll_sbi_flags_name[i].pattern);
3387                 }
3388         }
3389
3390         RETURN(0);
3391 }
3392
3393 /**
3394  * Get obd name by cmd, and copy out to user space
3395  */
3396 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3397 {
3398         struct ll_sb_info *sbi = ll_i2sbi(inode);
3399         struct obd_device *obd;
3400         ENTRY;
3401
3402         if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
3403                 obd = class_exp2obd(sbi->ll_dt_exp);
3404         else if (cmd == OBD_IOC_GETMDNAME)
3405                 obd = class_exp2obd(sbi->ll_md_exp);
3406         else
3407                 RETURN(-EINVAL);
3408
3409         if (!obd)
3410                 RETURN(-ENOENT);
3411
3412         if (copy_to_user((void __user *)arg, obd->obd_name,
3413                          strlen(obd->obd_name) + 1))
3414                 RETURN(-EFAULT);
3415
3416         RETURN(0);
3417 }
3418
3419 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3420 {
3421         char *path = NULL;
3422
3423         struct path p;
3424
3425         p.dentry = dentry;
3426         p.mnt = current->fs->root.mnt;
3427         path_get(&p);
3428         path = d_path(&p, buf, bufsize);
3429         path_put(&p);
3430         return path;
3431 }
3432
3433 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3434 {
3435         char *buf, *path = NULL;
3436         struct dentry *dentry = NULL;
3437         struct inode *inode = page->mapping->host;
3438
3439         /* this can be called inside spin lock so use GFP_ATOMIC. */
3440         buf = (char *)__get_free_page(GFP_ATOMIC);
3441         if (buf != NULL) {
3442                 dentry = d_find_alias(page->mapping->host);
3443                 if (dentry != NULL)
3444                         path = ll_d_path(dentry, buf, PAGE_SIZE);
3445         }
3446
3447         /* The below message is checked in recovery-small.sh test_24b */
3448         CDEBUG(D_WARNING,
3449                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3450                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3451                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3452                PFID(ll_inode2fid(inode)),
3453                (path && !IS_ERR(path)) ? path : "", ioret);
3454
3455         if (dentry != NULL)
3456                 dput(dentry);
3457
3458         if (buf != NULL)
3459                 free_page((unsigned long)buf);
3460 }
3461
3462 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3463                         struct lov_user_md **kbuf)
3464 {
3465         struct lov_user_md      lum;
3466         ssize_t                 lum_size;
3467         ENTRY;
3468
3469         if (copy_from_user(&lum, md, sizeof(lum)))
3470                 RETURN(-EFAULT);
3471
3472         lum_size = ll_lov_user_md_size(&lum);
3473         if (lum_size < 0)
3474                 RETURN(lum_size);
3475
3476         OBD_ALLOC_LARGE(*kbuf, lum_size);
3477         if (*kbuf == NULL)
3478                 RETURN(-ENOMEM);
3479
3480         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3481                 OBD_FREE_LARGE(*kbuf, lum_size);
3482                 RETURN(-EFAULT);
3483         }
3484
3485         RETURN(lum_size);
3486 }
3487
3488 /*
3489  * Compute llite root squash state after a change of root squash
3490  * configuration setting or add/remove of a lnet nid
3491  */
3492 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3493 {
3494         struct root_squash_info *squash = &sbi->ll_squash;
3495         int i;
3496         bool matched;
3497         struct lnet_process_id id;
3498
3499         /* Update norootsquash flag */
3500         spin_lock(&squash->rsi_lock);
3501         if (list_empty(&squash->rsi_nosquash_nids))
3502                 clear_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3503         else {
3504                 /* Do not apply root squash as soon as one of our NIDs is
3505                  * in the nosquash_nids list */
3506                 matched = false;
3507                 i = 0;
3508                 while (LNetGetId(i++, &id) != -ENOENT) {
3509                         if (id.nid == LNET_NID_LO_0)
3510                                 continue;
3511                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3512                                 matched = true;
3513                                 break;
3514                         }
3515                 }
3516                 if (matched)
3517                         set_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3518                 else
3519                         clear_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3520         }
3521         spin_unlock(&squash->rsi_lock);
3522 }
3523
3524 /**
3525  * Parse linkea content to extract information about a given hardlink
3526  *
3527  * \param[in]   ldata      - Initialized linkea data
3528  * \param[in]   linkno     - Link identifier
3529  * \param[out]  parent_fid - The entry's parent FID
3530  * \param[out]  ln         - Entry name destination buffer
3531  *
3532  * \retval 0 on success
3533  * \retval Appropriate negative error code on failure
3534  */
3535 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3536                             struct lu_fid *parent_fid, struct lu_name *ln)
3537 {
3538         unsigned int    idx;
3539         int             rc;
3540         ENTRY;
3541
3542         rc = linkea_init_with_rec(ldata);
3543         if (rc < 0)
3544                 RETURN(rc);
3545
3546         if (linkno >= ldata->ld_leh->leh_reccount)
3547                 /* beyond last link */
3548                 RETURN(-ENODATA);
3549
3550         linkea_first_entry(ldata);
3551         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3552                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3553                                     parent_fid);
3554                 if (idx == linkno)
3555                         break;
3556
3557                 linkea_next_entry(ldata);
3558         }
3559
3560         if (idx < linkno)
3561                 RETURN(-ENODATA);
3562
3563         RETURN(0);
3564 }
3565
3566 /**
3567  * Get parent FID and name of an identified link. Operation is performed for
3568  * a given link number, letting the caller iterate over linkno to list one or
3569  * all links of an entry.
3570  *
3571  * \param[in]     file - File descriptor against which to perform the operation
3572  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3573  *                       on and the available size. It is eventually filled with
3574  *                       the requested information or left untouched on error
3575  *
3576  * \retval - 0 on success
3577  * \retval - Appropriate negative error code on failure
3578  */
3579 int ll_getparent(struct file *file, struct getparent __user *arg)
3580 {
3581         struct inode            *inode = file_inode(file);
3582         struct linkea_data      *ldata;
3583         struct lu_buf            buf = LU_BUF_NULL;
3584         struct lu_name           ln;
3585         struct lu_fid            parent_fid;
3586         __u32                    linkno;
3587         __u32                    name_size;
3588         int                      rc;
3589
3590         ENTRY;
3591
3592         if (!capable(CAP_DAC_READ_SEARCH) &&
3593             !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
3594                 RETURN(-EPERM);
3595
3596         if (get_user(name_size, &arg->gp_name_size))
3597                 RETURN(-EFAULT);
3598
3599         if (get_user(linkno, &arg->gp_linkno))
3600                 RETURN(-EFAULT);
3601
3602         if (name_size > PATH_MAX)
3603                 RETURN(-EINVAL);
3604
3605         OBD_ALLOC(ldata, sizeof(*ldata));
3606         if (ldata == NULL)
3607                 RETURN(-ENOMEM);
3608
3609         rc = linkea_data_new(ldata, &buf);
3610         if (rc < 0)
3611                 GOTO(ldata_free, rc);
3612
3613         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3614                            buf.lb_len, OBD_MD_FLXATTR);
3615         if (rc < 0)
3616                 GOTO(lb_free, rc);
3617
3618         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3619         if (rc < 0)
3620                 GOTO(lb_free, rc);
3621
3622         if (ln.ln_namelen >= name_size)
3623                 GOTO(lb_free, rc = -EOVERFLOW);
3624
3625         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3626                 GOTO(lb_free, rc = -EFAULT);
3627
3628         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3629                 GOTO(lb_free, rc = -EFAULT);
3630
3631         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3632                 GOTO(lb_free, rc = -EFAULT);
3633
3634 lb_free:
3635         lu_buf_free(&buf);
3636 ldata_free:
3637         OBD_FREE(ldata, sizeof(*ldata));
3638
3639         RETURN(rc);
3640 }