Whamcloud - gitweb
LU-15070 llite: update default LMV upon any change
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/llite/llite_lib.c
32  *
33  * Lustre Light Super operations
34  */
35
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include <linux/cpu.h>
39 #include <linux/module.h>
40 #include <linux/random.h>
41 #include <linux/statfs.h>
42 #include <linux/time.h>
43 #include <linux/types.h>
44 #include <libcfs/linux/linux-uuid.h>
45 #include <linux/version.h>
46 #include <linux/mm.h>
47 #include <linux/user_namespace.h>
48 #include <linux/delay.h>
49 #include <linux/uidgid.h>
50 #include <linux/fs_struct.h>
51
52 #ifndef HAVE_CPUS_READ_LOCK
53 #include <libcfs/linux/linux-cpu.h>
54 #endif
55 #include <libcfs/linux/linux-misc.h>
56 #include <uapi/linux/lustre/lustre_ioctl.h>
57 #ifdef HAVE_UAPI_LINUX_MOUNT_H
58 #include <uapi/linux/mount.h>
59 #endif
60
61 #include <lustre_ha.h>
62 #include <lustre_dlm.h>
63 #include <lprocfs_status.h>
64 #include <lustre_disk.h>
65 #include <uapi/linux/lustre/lustre_param.h>
66 #include <lustre_log.h>
67 #include <cl_object.h>
68 #include <obd_cksum.h>
69 #include "llite_internal.h"
70
71 struct kmem_cache *ll_file_data_slab;
72
73 #ifndef log2
74 #define log2(n) ffz(~(n))
75 #endif
76
77 /**
78  * If there is only one number of core visible to Lustre,
79  * async readahead will be disabled, to avoid massive over
80  * subscription, we use 1/2 of active cores as default max
81  * async readahead requests.
82  */
83 static inline unsigned int ll_get_ra_async_max_active(void)
84 {
85         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
86 }
87
88 static struct ll_sb_info *ll_init_sbi(void)
89 {
90         struct ll_sb_info *sbi = NULL;
91         unsigned long pages;
92         unsigned long lru_page_max;
93         struct sysinfo si;
94         int rc;
95         int i;
96
97         ENTRY;
98
99         OBD_ALLOC_PTR(sbi);
100         if (sbi == NULL)
101                 RETURN(ERR_PTR(-ENOMEM));
102
103         rc = pcc_super_init(&sbi->ll_pcc_super);
104         if (rc < 0)
105                 GOTO(out_sbi, rc);
106
107         spin_lock_init(&sbi->ll_lock);
108         mutex_init(&sbi->ll_lco.lco_lock);
109         spin_lock_init(&sbi->ll_pp_extent_lock);
110         spin_lock_init(&sbi->ll_process_lock);
111         sbi->ll_rw_stats_on = 0;
112         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
113
114         si_meminfo(&si);
115         pages = si.totalram - si.totalhigh;
116         lru_page_max = pages / 2;
117
118         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
119         sbi->ll_ra_info.ll_readahead_wq =
120                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
121                                        0, CFS_CPT_ANY,
122                                        sbi->ll_ra_info.ra_async_max_active);
123         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
124                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
125
126         /* initialize ll_cache data */
127         sbi->ll_cache = cl_cache_init(lru_page_max);
128         if (sbi->ll_cache == NULL)
129                 GOTO(out_destroy_ra, rc = -ENOMEM);
130
131         /* initialize foreign symlink prefix path */
132         OBD_ALLOC(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
133         if (sbi->ll_foreign_symlink_prefix == NULL)
134                 GOTO(out_destroy_ra, rc = -ENOMEM);
135         memcpy(sbi->ll_foreign_symlink_prefix, "/mnt/", sizeof("/mnt/"));
136         sbi->ll_foreign_symlink_prefix_size = sizeof("/mnt/");
137
138         /* initialize foreign symlink upcall path, none by default */
139         OBD_ALLOC(sbi->ll_foreign_symlink_upcall, sizeof("none"));
140         if (sbi->ll_foreign_symlink_upcall == NULL)
141                 GOTO(out_destroy_ra, rc = -ENOMEM);
142         memcpy(sbi->ll_foreign_symlink_upcall, "none", sizeof("none"));
143         sbi->ll_foreign_symlink_upcall_items = NULL;
144         sbi->ll_foreign_symlink_upcall_nb_items = 0;
145         init_rwsem(&sbi->ll_foreign_symlink_sem);
146         /* foreign symlink support (LL_SBI_FOREIGN_SYMLINK in ll_flags)
147          * not enabled by default
148          */
149
150         sbi->ll_ra_info.ra_max_pages =
151                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
152         sbi->ll_ra_info.ra_max_pages_per_file =
153                 min(sbi->ll_ra_info.ra_max_pages / 4,
154                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
155         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
156                                 sbi->ll_ra_info.ra_max_pages_per_file;
157         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
158         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
159         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
160
161         set_bit(LL_SBI_VERBOSE, sbi->ll_flags);
162 #ifdef ENABLE_CHECKSUM
163         set_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
164 #endif
165 #ifdef ENABLE_FLOCK
166         set_bit(LL_SBI_FLOCK, sbi->ll_flags);
167 #endif
168
169 #ifdef HAVE_LRU_RESIZE_SUPPORT
170         set_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags);
171 #endif
172         set_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags);
173
174         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
175                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
176                                pp_r_hist.oh_lock);
177                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
178                                pp_w_hist.oh_lock);
179         }
180
181         /* metadata statahead is enabled by default */
182         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
183         sbi->ll_sa_max = LL_SA_RPC_DEF;
184         atomic_set(&sbi->ll_sa_total, 0);
185         atomic_set(&sbi->ll_sa_wrong, 0);
186         atomic_set(&sbi->ll_sa_running, 0);
187         atomic_set(&sbi->ll_agl_total, 0);
188         set_bit(LL_SBI_AGL_ENABLED, sbi->ll_flags);
189         set_bit(LL_SBI_FAST_READ, sbi->ll_flags);
190         set_bit(LL_SBI_TINY_WRITE, sbi->ll_flags);
191         set_bit(LL_SBI_PARALLEL_DIO, sbi->ll_flags);
192         ll_sbi_set_encrypt(sbi, true);
193
194         /* root squash */
195         sbi->ll_squash.rsi_uid = 0;
196         sbi->ll_squash.rsi_gid = 0;
197         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
198         spin_lock_init(&sbi->ll_squash.rsi_lock);
199
200         /* Per-filesystem file heat */
201         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
202         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
203
204         /* Per-fs open heat level before requesting open lock */
205         sbi->ll_oc_thrsh_count = SBI_DEFAULT_OPENCACHE_THRESHOLD_COUNT;
206         sbi->ll_oc_max_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MAX_MS;
207         sbi->ll_oc_thrsh_ms = SBI_DEFAULT_OPENCACHE_THRESHOLD_MS;
208         RETURN(sbi);
209 out_destroy_ra:
210         if (sbi->ll_foreign_symlink_prefix)
211                 OBD_FREE(sbi->ll_foreign_symlink_prefix, sizeof("/mnt/"));
212         if (sbi->ll_cache) {
213                 cl_cache_decref(sbi->ll_cache);
214                 sbi->ll_cache = NULL;
215         }
216         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
217 out_pcc:
218         pcc_super_fini(&sbi->ll_pcc_super);
219 out_sbi:
220         OBD_FREE_PTR(sbi);
221         RETURN(ERR_PTR(rc));
222 }
223
224 static void ll_free_sbi(struct super_block *sb)
225 {
226         struct ll_sb_info *sbi = ll_s2sbi(sb);
227         ENTRY;
228
229         if (sbi != NULL) {
230                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
231                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
232                 if (sbi->ll_ra_info.ll_readahead_wq)
233                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
234                 if (sbi->ll_cache != NULL) {
235                         cl_cache_decref(sbi->ll_cache);
236                         sbi->ll_cache = NULL;
237                 }
238                 if (sbi->ll_foreign_symlink_prefix) {
239                         OBD_FREE(sbi->ll_foreign_symlink_prefix,
240                                  sbi->ll_foreign_symlink_prefix_size);
241                         sbi->ll_foreign_symlink_prefix = NULL;
242                 }
243                 if (sbi->ll_foreign_symlink_upcall) {
244                         OBD_FREE(sbi->ll_foreign_symlink_upcall,
245                                  strlen(sbi->ll_foreign_symlink_upcall) +
246                                        1);
247                         sbi->ll_foreign_symlink_upcall = NULL;
248                 }
249                 if (sbi->ll_foreign_symlink_upcall_items) {
250                         int i;
251                         int nb_items = sbi->ll_foreign_symlink_upcall_nb_items;
252                         struct ll_foreign_symlink_upcall_item *items =
253                                 sbi->ll_foreign_symlink_upcall_items;
254
255                         for (i = 0 ; i < nb_items; i++)
256                                 if (items[i].type == STRING_TYPE)
257                                         OBD_FREE(items[i].string,
258                                                        items[i].size);
259
260                         OBD_FREE_LARGE(items, nb_items *
261                                 sizeof(struct ll_foreign_symlink_upcall_item));
262                         sbi->ll_foreign_symlink_upcall_items = NULL;
263                 }
264                 pcc_super_fini(&sbi->ll_pcc_super);
265                 OBD_FREE(sbi, sizeof(*sbi));
266         }
267         EXIT;
268 }
269
270 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
271 {
272         struct inode *root = NULL;
273         struct ll_sb_info *sbi = ll_s2sbi(sb);
274         struct obd_statfs *osfs = NULL;
275         struct ptlrpc_request *request = NULL;
276         struct obd_connect_data *data = NULL;
277         struct obd_uuid *uuid;
278         struct md_op_data *op_data;
279         struct lustre_md lmd;
280         u64 valid;
281         int size, err, checksum;
282         bool api32;
283
284         ENTRY;
285         sbi->ll_md_obd = class_name2obd(md);
286         if (!sbi->ll_md_obd) {
287                 CERROR("MD %s: not setup or attached\n", md);
288                 RETURN(-EINVAL);
289         }
290
291         OBD_ALLOC_PTR(data);
292         if (data == NULL)
293                 RETURN(-ENOMEM);
294
295         OBD_ALLOC_PTR(osfs);
296         if (osfs == NULL) {
297                 OBD_FREE_PTR(data);
298                 RETURN(-ENOMEM);
299         }
300
301         /* pass client page size via ocd_grant_blkbits, the server should report
302          * back its backend blocksize for grant calculation purpose */
303         data->ocd_grant_blkbits = PAGE_SHIFT;
304
305         /* indicate MDT features supported by this client */
306         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
307                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
308                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
309                                   OBD_CONNECT_SRVLOCK  |
310                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
311                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
312                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
313                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
314                                   OBD_CONNECT_64BITHASH |
315                                   OBD_CONNECT_EINPROGRESS |
316                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
317                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
318                                   OBD_CONNECT_MAX_EASIZE |
319                                   OBD_CONNECT_FLOCK_DEAD |
320                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
321                                   OBD_CONNECT_OPEN_BY_FID |
322                                   OBD_CONNECT_DIR_STRIPE |
323                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
324                                   OBD_CONNECT_SUBTREE |
325                                   OBD_CONNECT_MULTIMODRPCS |
326                                   OBD_CONNECT_GRANT_PARAM |
327                                   OBD_CONNECT_GRANT_SHRINK |
328                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
329
330         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
331                                    OBD_CONNECT2_SUM_STATFS |
332                                    OBD_CONNECT2_OVERSTRIPING |
333                                    OBD_CONNECT2_FLR |
334                                    OBD_CONNECT2_LOCK_CONVERT |
335                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
336                                    OBD_CONNECT2_INC_XID |
337                                    OBD_CONNECT2_LSOM |
338                                    OBD_CONNECT2_ASYNC_DISCARD |
339                                    OBD_CONNECT2_PCC |
340                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
341                                    OBD_CONNECT2_GETATTR_PFID |
342                                    OBD_CONNECT2_DOM_LVB |
343                                    OBD_CONNECT2_REP_MBITS |
344                                    OBD_CONNECT2_ATOMIC_OPEN_LOCK;
345
346 #ifdef HAVE_LRU_RESIZE_SUPPORT
347         if (test_bit(LL_SBI_LRU_RESIZE, sbi->ll_flags))
348                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
349 #endif
350         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
351
352         data->ocd_cksum_types = obd_cksum_types_supported_client();
353
354         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
355                 /* flag mdc connection as lightweight, only used for test
356                  * purpose, use with care */
357                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
358
359         data->ocd_ibits_known = MDS_INODELOCK_FULL;
360         data->ocd_version = LUSTRE_VERSION_CODE;
361
362         if (sb->s_flags & SB_RDONLY)
363                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
364         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags))
365                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
366
367 #ifdef SB_NOSEC
368         /* Setting this indicates we correctly support S_NOSEC (See kernel
369          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
370          */
371         sb->s_flags |= SB_NOSEC;
372 #endif
373         sbi->ll_fop = ll_select_file_operations(sbi);
374
375         /* always ping even if server suppress_pings */
376         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
377                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
378
379         obd_connect_set_secctx(data);
380         if (ll_sbi_has_encrypt(sbi))
381                 obd_connect_set_enc(data);
382
383 #if defined(CONFIG_SECURITY)
384         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
385 #endif
386
387         data->ocd_brw_size = MD_MAX_BRW_SIZE;
388
389         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
390                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
391         if (err == -EBUSY) {
392                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
393                                    "recovery, of which this client is not a "
394                                    "part. Please wait for recovery to complete,"
395                                    " abort, or time out.\n", md);
396                 GOTO(out, err);
397         } else if (err) {
398                 CERROR("cannot connect to %s: rc = %d\n", md, err);
399                 GOTO(out, err);
400         }
401
402         sbi->ll_md_exp->exp_connect_data = *data;
403
404         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
405                            LUSTRE_SEQ_METADATA);
406         if (err) {
407                 CERROR("%s: Can't init metadata layer FID infrastructure, "
408                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
409                 GOTO(out_md, err);
410         }
411
412         /* For mount, we only need fs info from MDT0, and also in DNE, it
413          * can make sure the client can be mounted as long as MDT0 is
414          * avaible */
415         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
416                         ktime_get_seconds() - sbi->ll_statfs_max_age,
417                         OBD_STATFS_FOR_MDT0);
418         if (err)
419                 GOTO(out_md_fid, err);
420
421         /* This needs to be after statfs to ensure connect has finished.
422          * Note that "data" does NOT contain the valid connect reply.
423          * If connecting to a 1.8 server there will be no LMV device, so
424          * we can access the MDC export directly and exp_connect_flags will
425          * be non-zero, but if accessing an upgraded 2.1 server it will
426          * have the correct flags filled in.
427          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
428         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
429         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
430             valid != CLIENT_CONNECT_MDT_REQD) {
431                 char *buf;
432
433                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
434                 obd_connect_flags2str(buf, PAGE_SIZE,
435                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
436                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
437                                    "feature(s) needed for correct operation "
438                                    "of this client (%s). Please upgrade "
439                                    "server or downgrade client.\n",
440                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
441                 OBD_FREE(buf, PAGE_SIZE);
442                 GOTO(out_md_fid, err = -EPROTO);
443         }
444
445         size = sizeof(*data);
446         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
447                            KEY_CONN_DATA,  &size, data);
448         if (err) {
449                 CERROR("%s: Get connect data failed: rc = %d\n",
450                        sbi->ll_md_exp->exp_obd->obd_name, err);
451                 GOTO(out_md_fid, err);
452         }
453
454         LASSERT(osfs->os_bsize);
455         sb->s_blocksize = osfs->os_bsize;
456         sb->s_blocksize_bits = log2(osfs->os_bsize);
457         sb->s_magic = LL_SUPER_MAGIC;
458         sb->s_maxbytes = MAX_LFS_FILESIZE;
459         sbi->ll_namelen = osfs->os_namelen;
460         sbi->ll_mnt.mnt = current->fs->root.mnt;
461
462         if (test_bit(LL_SBI_USER_XATTR, sbi->ll_flags) &&
463             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
464                 LCONSOLE_INFO("Disabling user_xattr feature because "
465                               "it is not supported on the server\n");
466                 clear_bit(LL_SBI_USER_XATTR, sbi->ll_flags);
467         }
468
469         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
470 #ifdef SB_POSIXACL
471                 sb->s_flags |= SB_POSIXACL;
472 #endif
473                 set_bit(LL_SBI_ACL, sbi->ll_flags);
474         } else {
475                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
476 #ifdef SB_POSIXACL
477                 sb->s_flags &= ~SB_POSIXACL;
478 #endif
479                 clear_bit(LL_SBI_ACL, sbi->ll_flags);
480         }
481
482         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
483                 set_bit(LL_SBI_64BIT_HASH, sbi->ll_flags);
484
485         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
486                 set_bit(LL_SBI_LAYOUT_LOCK, sbi->ll_flags);
487
488         if (obd_connect_has_secctx(data))
489                 set_bit(LL_SBI_FILE_SECCTX, sbi->ll_flags);
490
491         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
492                 if (ll_sbi_has_test_dummy_encryption(sbi))
493                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
494                                       sbi->ll_fsname,
495                                       sbi->ll_md_exp->exp_obd->obd_name);
496                 ll_sbi_set_encrypt(sbi, false);
497         }
498
499         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
500                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
501                         LCONSOLE_INFO("%s: disabling xattr cache due to "
502                                       "unknown maximum xattr size.\n", dt);
503                 } else if (!sbi->ll_xattr_cache_set) {
504                         /* If xattr_cache is already set (no matter 0 or 1)
505                          * during processing llog, it won't be enabled here. */
506                         set_bit(LL_SBI_XATTR_CACHE, sbi->ll_flags);
507                         sbi->ll_xattr_cache_enabled = 1;
508                 }
509         }
510
511         sbi->ll_dt_obd = class_name2obd(dt);
512         if (!sbi->ll_dt_obd) {
513                 CERROR("DT %s: not setup or attached\n", dt);
514                 GOTO(out_md_fid, err = -ENODEV);
515         }
516
517         /* pass client page size via ocd_grant_blkbits, the server should report
518          * back its backend blocksize for grant calculation purpose */
519         data->ocd_grant_blkbits = PAGE_SHIFT;
520
521         /* indicate OST features supported by this client */
522         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
523                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
524                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
525                                   OBD_CONNECT_SRVLOCK |
526                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
527                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
528                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
529                                   OBD_CONNECT_EINPROGRESS |
530                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
531                                   OBD_CONNECT_LAYOUTLOCK |
532                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
533                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
534                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
535         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
536                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK |
537                                    OBD_CONNECT2_REP_MBITS;
538
539         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
540                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
541
542         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
543          * disabled by default, because it can still be enabled on the
544          * fly via /sys. As a consequence, we still need to come to an
545          * agreement on the supported algorithms at connect time
546          */
547         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
548
549         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
550                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
551         else
552                 data->ocd_cksum_types = obd_cksum_types_supported_client();
553
554 #ifdef HAVE_LRU_RESIZE_SUPPORT
555         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
556 #endif
557         /* always ping even if server suppress_pings */
558         if (test_bit(LL_SBI_ALWAYS_PING, sbi->ll_flags))
559                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
560
561         if (ll_sbi_has_encrypt(sbi))
562                 obd_connect_set_enc(data);
563
564         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
565                "ocd_grant: %d\n", data->ocd_connect_flags,
566                data->ocd_version, data->ocd_grant);
567
568         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
569         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
570
571         data->ocd_brw_size = DT_MAX_BRW_SIZE;
572
573         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
574                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
575         if (err == -EBUSY) {
576                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
577                                    "recovery, of which this client is not a "
578                                    "part.  Please wait for recovery to "
579                                    "complete, abort, or time out.\n", dt);
580                 GOTO(out_md, err);
581         } else if (err) {
582                 CERROR("%s: Cannot connect to %s: rc = %d\n",
583                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
584                 GOTO(out_md, err);
585         }
586
587         if (ll_sbi_has_encrypt(sbi) &&
588             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
589                 if (ll_sbi_has_test_dummy_encryption(sbi))
590                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
591                                       sbi->ll_fsname, dt);
592                 ll_sbi_set_encrypt(sbi, false);
593         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
594                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
595         }
596
597         sbi->ll_dt_exp->exp_connect_data = *data;
598
599         /* Don't change value if it was specified in the config log */
600         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
601                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
602                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
603                               (data->ocd_brw_size >> PAGE_SHIFT));
604                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
605                     sbi->ll_ra_info.ra_max_pages_per_file)
606                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
607                                 sbi->ll_ra_info.ra_max_pages_per_file;
608         }
609
610         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
611                            LUSTRE_SEQ_METADATA);
612         if (err) {
613                 CERROR("%s: Can't init data layer FID infrastructure, "
614                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
615                 GOTO(out_dt, err);
616         }
617
618         mutex_lock(&sbi->ll_lco.lco_lock);
619         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
620         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
621         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
622         mutex_unlock(&sbi->ll_lco.lco_lock);
623
624         fid_zero(&sbi->ll_root_fid);
625         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
626                            &sbi->ll_root_fid);
627         if (err) {
628                 CERROR("cannot mds_connect: rc = %d\n", err);
629                 GOTO(out_lock_cn_cb, err);
630         }
631         if (!fid_is_sane(&sbi->ll_root_fid)) {
632                 CERROR("%s: Invalid root fid "DFID" during mount\n",
633                        sbi->ll_md_exp->exp_obd->obd_name,
634                        PFID(&sbi->ll_root_fid));
635                 GOTO(out_lock_cn_cb, err = -EINVAL);
636         }
637         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
638
639         sb->s_op = &lustre_super_operations;
640         sb->s_xattr = ll_xattr_handlers;
641 #if THREAD_SIZE >= 8192 /*b=17630*/
642         sb->s_export_op = &lustre_export_operations;
643 #endif
644 #ifdef HAVE_LUSTRE_CRYPTO
645         llcrypt_set_ops(sb, &lustre_cryptops);
646 #endif
647
648         /* make root inode
649          * XXX: move this to after cbd setup? */
650         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
651         if (test_bit(LL_SBI_ACL, sbi->ll_flags))
652                 valid |= OBD_MD_FLACL;
653
654         OBD_ALLOC_PTR(op_data);
655         if (op_data == NULL)
656                 GOTO(out_lock_cn_cb, err = -ENOMEM);
657
658         op_data->op_fid1 = sbi->ll_root_fid;
659         op_data->op_mode = 0;
660         op_data->op_valid = valid;
661
662         err = md_getattr(sbi->ll_md_exp, op_data, &request);
663
664         OBD_FREE_PTR(op_data);
665         if (err) {
666                 CERROR("%s: md_getattr failed for root: rc = %d\n",
667                        sbi->ll_md_exp->exp_obd->obd_name, err);
668                 GOTO(out_lock_cn_cb, err);
669         }
670
671         err = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill,
672                                sbi->ll_dt_exp, sbi->ll_md_exp, &lmd);
673         if (err) {
674                 CERROR("failed to understand root inode md: rc = %d\n", err);
675                 ptlrpc_req_finished(request);
676                 GOTO(out_lock_cn_cb, err);
677         }
678
679         LASSERT(fid_is_sane(&sbi->ll_root_fid));
680         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
681         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid, api32), &lmd);
682         md_free_lustre_md(sbi->ll_md_exp, &lmd);
683         ptlrpc_req_finished(request);
684
685         if (IS_ERR(root)) {
686                 lmd_clear_acl(&lmd);
687                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
688                 root = NULL;
689                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
690                        sbi->ll_fsname, err);
691                 GOTO(out_root, err);
692         }
693
694         checksum = test_bit(LL_SBI_CHECKSUM, sbi->ll_flags);
695         if (sbi->ll_checksum_set) {
696                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
697                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
698                                          sizeof(checksum), &checksum, NULL);
699                 if (err) {
700                         CERROR("%s: Set checksum failed: rc = %d\n",
701                                sbi->ll_dt_exp->exp_obd->obd_name, err);
702                         GOTO(out_root, err);
703                 }
704         }
705         cl_sb_init(sb);
706
707         sb->s_root = d_make_root(root);
708         if (sb->s_root == NULL) {
709                 err = -ENOMEM;
710                 CERROR("%s: can't make root dentry: rc = %d\n",
711                        sbi->ll_fsname, err);
712                 GOTO(out_root, err);
713         }
714
715         sbi->ll_sdev_orig = sb->s_dev;
716
717         /* We set sb->s_dev equal on all lustre clients in order to support
718          * NFS export clustering.  NFSD requires that the FSID be the same
719          * on all clients. */
720         /* s_dev is also used in lt_compare() to compare two fs, but that is
721          * only a node-local comparison. */
722         uuid = obd_get_uuid(sbi->ll_md_exp);
723         if (uuid != NULL)
724                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
725
726         if (data != NULL)
727                 OBD_FREE_PTR(data);
728         if (osfs != NULL)
729                 OBD_FREE_PTR(osfs);
730
731         if (sbi->ll_dt_obd) {
732                 err = sysfs_create_link(&sbi->ll_kset.kobj,
733                                         &sbi->ll_dt_obd->obd_kset.kobj,
734                                         sbi->ll_dt_obd->obd_type->typ_name);
735                 if (err < 0) {
736                         CERROR("%s: could not register %s in llite: rc = %d\n",
737                                dt, sbi->ll_fsname, err);
738                         err = 0;
739                 }
740         }
741
742         if (sbi->ll_md_obd) {
743                 err = sysfs_create_link(&sbi->ll_kset.kobj,
744                                         &sbi->ll_md_obd->obd_kset.kobj,
745                                         sbi->ll_md_obd->obd_type->typ_name);
746                 if (err < 0) {
747                         CERROR("%s: could not register %s in llite: rc = %d\n",
748                                md, sbi->ll_fsname, err);
749                         err = 0;
750                 }
751         }
752
753         RETURN(err);
754 out_root:
755         iput(root);
756 out_lock_cn_cb:
757         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
758 out_dt:
759         obd_disconnect(sbi->ll_dt_exp);
760         sbi->ll_dt_exp = NULL;
761         sbi->ll_dt_obd = NULL;
762 out_md_fid:
763         obd_fid_fini(sbi->ll_md_exp->exp_obd);
764 out_md:
765         obd_disconnect(sbi->ll_md_exp);
766         sbi->ll_md_exp = NULL;
767         sbi->ll_md_obd = NULL;
768 out:
769         if (data != NULL)
770                 OBD_FREE_PTR(data);
771         if (osfs != NULL)
772                 OBD_FREE_PTR(osfs);
773         return err;
774 }
775
776 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
777 {
778         int size, rc;
779
780         size = sizeof(*lmmsize);
781         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
782                           KEY_MAX_EASIZE, &size, lmmsize);
783         if (rc != 0) {
784                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
785                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
786                 RETURN(rc);
787         }
788
789         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
790
791         size = sizeof(int);
792         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
793                           KEY_MAX_EASIZE, &size, lmmsize);
794         if (rc)
795                 CERROR("Get max mdsize error rc %d\n", rc);
796
797         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
798
799         RETURN(rc);
800 }
801
802 /**
803  * Get the value of the default_easize parameter.
804  *
805  * \see client_obd::cl_default_mds_easize
806  *
807  * \param[in] sbi       superblock info for this filesystem
808  * \param[out] lmmsize  pointer to storage location for value
809  *
810  * \retval 0            on success
811  * \retval negative     negated errno on failure
812  */
813 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
814 {
815         int size, rc;
816
817         size = sizeof(int);
818         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
819                          KEY_DEFAULT_EASIZE, &size, lmmsize);
820         if (rc)
821                 CERROR("Get default mdsize error rc %d\n", rc);
822
823         RETURN(rc);
824 }
825
826 /**
827  * Set the default_easize parameter to the given value.
828  *
829  * \see client_obd::cl_default_mds_easize
830  *
831  * \param[in] sbi       superblock info for this filesystem
832  * \param[in] lmmsize   the size to set
833  *
834  * \retval 0            on success
835  * \retval negative     negated errno on failure
836  */
837 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
838 {
839         int rc;
840
841         if (lmmsize < sizeof(struct lov_mds_md) ||
842             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
843                 return -EINVAL;
844
845         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
846                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
847                                 sizeof(int), &lmmsize, NULL);
848
849         RETURN(rc);
850 }
851
852 static void client_common_put_super(struct super_block *sb)
853 {
854         struct ll_sb_info *sbi = ll_s2sbi(sb);
855         ENTRY;
856
857         cl_sb_fini(sb);
858
859         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
860         obd_disconnect(sbi->ll_dt_exp);
861         sbi->ll_dt_exp = NULL;
862
863         ll_debugfs_unregister_super(sb);
864
865         obd_fid_fini(sbi->ll_md_exp->exp_obd);
866         obd_disconnect(sbi->ll_md_exp);
867         sbi->ll_md_exp = NULL;
868
869         EXIT;
870 }
871
872 void ll_kill_super(struct super_block *sb)
873 {
874         struct ll_sb_info *sbi;
875         ENTRY;
876
877         /* not init sb ?*/
878         if (!(sb->s_flags & SB_ACTIVE))
879                 return;
880
881         sbi = ll_s2sbi(sb);
882         /* we need restore s_dev from changed for clustred NFS before put_super
883          * because new kernels have cached s_dev and change sb->s_dev in
884          * put_super not affected real removing devices */
885         if (sbi) {
886                 sb->s_dev = sbi->ll_sdev_orig;
887
888                 /* wait running statahead threads to quit */
889                 while (atomic_read(&sbi->ll_sa_running) > 0)
890                         schedule_timeout_uninterruptible(
891                                 cfs_time_seconds(1) >> 3);
892         }
893
894         EXIT;
895 }
896
897 /* Since we use this table for ll_sbi_flags_seq_show make
898  * sure what you want displayed for a specific token that
899  * is listed more than once below be listed first. For
900  * example we want "checksum" displayed, not "nochecksum"
901  * for the sbi_flags.
902  */
903 static const match_table_t ll_sbi_flags_name = {
904         {LL_SBI_NOLCK,                  "nolock"},
905         {LL_SBI_CHECKSUM,               "checksum"},
906         {LL_SBI_CHECKSUM,               "nochecksum"},
907         {LL_SBI_LOCALFLOCK,             "localflock"},
908         {LL_SBI_FLOCK,                  "flock"},
909         {LL_SBI_FLOCK,                  "noflock"},
910         {LL_SBI_USER_XATTR,             "user_xattr"},
911         {LL_SBI_USER_XATTR,             "nouser_xattr"},
912         {LL_SBI_LRU_RESIZE,             "lruresize"},
913         {LL_SBI_LRU_RESIZE,             "nolruresize"},
914         {LL_SBI_LAZYSTATFS,             "lazystatfs"},
915         {LL_SBI_LAZYSTATFS,             "nolazystatfs"},
916         {LL_SBI_32BIT_API,              "32bitapi"},
917         {LL_SBI_USER_FID2PATH,          "user_fid2path"},
918         {LL_SBI_USER_FID2PATH,          "nouser_fid2path"},
919         {LL_SBI_VERBOSE,                "verbose"},
920         {LL_SBI_VERBOSE,                "noverbose"},
921         {LL_SBI_ALWAYS_PING,            "always_ping"},
922         {LL_SBI_TEST_DUMMY_ENCRYPTION,  "test_dummy_encryption"},
923         {LL_SBI_ENCRYPT,                "encrypt"},
924         {LL_SBI_ENCRYPT,                "noencrypt"},
925         {LL_SBI_FOREIGN_SYMLINK,        "foreign_symlink=%s"},
926         {LL_SBI_NUM_MOUNT_OPT,          NULL},
927
928         {LL_SBI_ACL,                    "acl"},
929         {LL_SBI_AGL_ENABLED,            "agl"},
930         {LL_SBI_64BIT_HASH,             "64bit_hash"},
931         {LL_SBI_LAYOUT_LOCK,            "layout"},
932         {LL_SBI_XATTR_CACHE,            "xattr_cache"},
933         {LL_SBI_NOROOTSQUASH,           "norootsquash"},
934         {LL_SBI_FAST_READ,              "fast_read"},
935         {LL_SBI_FILE_SECCTX,            "file_secctx"},
936         {LL_SBI_TINY_WRITE,             "tiny_write"},
937         {LL_SBI_FILE_HEAT,              "file_heat"},
938         {LL_SBI_PARALLEL_DIO,           "parallel_dio"},
939 };
940
941 int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
942 {
943         struct super_block *sb = m->private;
944         int i;
945
946         for (i = 0; i < LL_SBI_NUM_FLAGS; i++) {
947                 int j;
948
949                 if (!test_bit(i, ll_s2sbi(sb)->ll_flags))
950                         continue;
951
952                 for (j = 0; j < ARRAY_SIZE(ll_sbi_flags_name); j++) {
953                         if (ll_sbi_flags_name[j].token == i &&
954                             ll_sbi_flags_name[j].pattern) {
955                                 seq_printf(m, "%s ",
956                                            ll_sbi_flags_name[j].pattern);
957                                 break;
958                         }
959                 }
960         }
961         seq_puts(m, "\b\n");
962         return 0;
963 }
964
965 /* non-client-specific mount options are parsed in lmd_parse */
966 static int ll_options(char *options, struct super_block *sb)
967 {
968         struct ll_sb_info *sbi = ll_s2sbi(sb);
969         char *s2, *s1, *opts;
970
971         ENTRY;
972         if (!options)
973                 RETURN(0);
974
975         /* Don't stomp on lmd_opts */
976         opts = kstrdup(options, GFP_KERNEL);
977         if (!opts)
978                 RETURN(-ENOMEM);
979         s1 = opts;
980         s2 = opts;
981
982         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
983
984         while ((s1 = strsep(&opts, ",")) != NULL) {
985                 substring_t args[MAX_OPT_ARGS];
986                 bool turn_off = false;
987                 int token;
988
989                 if (!*s1)
990                         continue;
991
992                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
993
994                 if (strncmp(s1, "no", 2) == 0)
995                         turn_off = true;
996
997                 /*
998                  * Initialize args struct so we know whether arg was
999                  * found; some options take optional arguments.
1000                  */
1001                 args[0].to = NULL;
1002                 args[0].from = NULL;
1003                 token = match_token(s1, ll_sbi_flags_name, args);
1004                 if (token == LL_SBI_NUM_MOUNT_OPT) {
1005                         if (match_wildcard("context", s1) ||
1006                             match_wildcard("fscontext", s1) ||
1007                             match_wildcard("defcontext", s1) ||
1008                             match_wildcard("rootcontext",s1))
1009                                 continue;
1010
1011                         LCONSOLE_ERROR_MSG(0x152,
1012                                            "Unknown option '%s', won't mount.\n",
1013                                            s1);
1014                         RETURN(-EINVAL);
1015                 }
1016
1017                 switch (token) {
1018                 case LL_SBI_NOLCK:
1019                 case LL_SBI_32BIT_API:
1020                 case LL_SBI_64BIT_HASH:
1021                 case LL_SBI_ALWAYS_PING:
1022                         set_bit(token, sbi->ll_flags);
1023                         break;
1024
1025                 case LL_SBI_FLOCK:
1026                         clear_bit(LL_SBI_LOCALFLOCK, sbi->ll_flags);
1027                         if (turn_off)
1028                                 clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1029                         else
1030                                 set_bit(token, sbi->ll_flags);
1031                         break;
1032
1033                 case LL_SBI_LOCALFLOCK:
1034                         clear_bit(LL_SBI_FLOCK, sbi->ll_flags);
1035                         set_bit(token, sbi->ll_flags);
1036                         break;
1037
1038                 case LL_SBI_CHECKSUM:
1039                         sbi->ll_checksum_set = 1;
1040                         /* fall through */
1041                 case LL_SBI_USER_XATTR:
1042                 case LL_SBI_USER_FID2PATH:
1043                 case LL_SBI_LRU_RESIZE:
1044                 case LL_SBI_LAZYSTATFS:
1045                 case LL_SBI_VERBOSE:
1046                         if (turn_off)
1047                                 clear_bit(token, sbi->ll_flags);
1048                         else
1049                                 set_bit(token, sbi->ll_flags);
1050                         break;
1051                 case LL_SBI_TEST_DUMMY_ENCRYPTION: {
1052 #ifdef HAVE_LUSTRE_CRYPTO
1053                         set_bit(token, sbi->ll_flags);
1054 #else
1055                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
1056 #endif
1057                         break;
1058                 }
1059                 case LL_SBI_ENCRYPT:
1060 #ifdef HAVE_LUSTRE_CRYPTO
1061                         if (turn_off)
1062                                 clear_bit(token, sbi->ll_flags);
1063                         else
1064                                 set_bit(token, sbi->ll_flags);
1065 #else
1066                         LCONSOLE_WARN("noencrypt or encrypt mount option ignored: encryption not supported\n");
1067 #endif
1068                         break;
1069                 case LL_SBI_FOREIGN_SYMLINK:
1070                         /* non-default prefix provided ? */
1071                         if (args->from) {
1072                                 size_t old_len;
1073                                 char *old;
1074
1075                                 /* path must be absolute */
1076                                 if (args->from[0] != '/') {
1077                                         LCONSOLE_ERROR_MSG(0x152,
1078                                                            "foreign prefix '%s' must be an absolute path\n",
1079                                                            args->from);
1080                                         RETURN(-EINVAL);
1081                                 }
1082
1083                                 old_len = sbi->ll_foreign_symlink_prefix_size;
1084                                 old = sbi->ll_foreign_symlink_prefix;
1085                                 /* alloc for path length and '\0' */
1086                                 sbi->ll_foreign_symlink_prefix = match_strdup(args);
1087                                 if (!sbi->ll_foreign_symlink_prefix) {
1088                                         /* restore previous */
1089                                         sbi->ll_foreign_symlink_prefix = old;
1090                                         sbi->ll_foreign_symlink_prefix_size =
1091                                                 old_len;
1092                                         RETURN(-ENOMEM);
1093                                 }
1094                                 sbi->ll_foreign_symlink_prefix_size =
1095                                         args->to - args->from + 1;
1096                                 OBD_ALLOC_POST(sbi->ll_foreign_symlink_prefix,
1097                                                sbi->ll_foreign_symlink_prefix_size,
1098                                                "kmalloced");
1099                                 if (old)
1100                                         OBD_FREE(old, old_len);
1101
1102                                 /* enable foreign symlink support */
1103                                 set_bit(token, sbi->ll_flags);
1104                         } else {
1105                                 LCONSOLE_ERROR_MSG(0x152,
1106                                                    "invalid %s option\n", s1);
1107                         }
1108                 /* fall through */
1109                 default:
1110                         break;
1111                 }
1112         }
1113         kfree(opts);
1114         RETURN(0);
1115 }
1116
1117 void ll_lli_init(struct ll_inode_info *lli)
1118 {
1119         lli->lli_inode_magic = LLI_INODE_MAGIC;
1120         lli->lli_flags = 0;
1121         rwlock_init(&lli->lli_lock);
1122         lli->lli_posix_acl = NULL;
1123         /* Do not set lli_fid, it has been initialized already. */
1124         fid_zero(&lli->lli_pfid);
1125         lli->lli_mds_read_och = NULL;
1126         lli->lli_mds_write_och = NULL;
1127         lli->lli_mds_exec_och = NULL;
1128         lli->lli_open_fd_read_count = 0;
1129         lli->lli_open_fd_write_count = 0;
1130         lli->lli_open_fd_exec_count = 0;
1131         mutex_init(&lli->lli_och_mutex);
1132         spin_lock_init(&lli->lli_agl_lock);
1133         spin_lock_init(&lli->lli_layout_lock);
1134         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1135         lli->lli_clob = NULL;
1136
1137         init_rwsem(&lli->lli_xattrs_list_rwsem);
1138         mutex_init(&lli->lli_xattrs_enq_lock);
1139
1140         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1141         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1142                 lli->lli_opendir_key = NULL;
1143                 lli->lli_sai = NULL;
1144                 spin_lock_init(&lli->lli_sa_lock);
1145                 lli->lli_opendir_pid = 0;
1146                 lli->lli_sa_enabled = 0;
1147                 init_rwsem(&lli->lli_lsm_sem);
1148         } else {
1149                 mutex_init(&lli->lli_size_mutex);
1150                 mutex_init(&lli->lli_setattr_mutex);
1151                 lli->lli_symlink_name = NULL;
1152                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1153                 range_lock_tree_init(&lli->lli_write_tree);
1154                 init_rwsem(&lli->lli_glimpse_sem);
1155                 lli->lli_glimpse_time = ktime_set(0, 0);
1156                 INIT_LIST_HEAD(&lli->lli_agl_list);
1157                 lli->lli_agl_index = 0;
1158                 lli->lli_async_rc = 0;
1159                 spin_lock_init(&lli->lli_heat_lock);
1160                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1161                 lli->lli_heat_flags = 0;
1162                 mutex_init(&lli->lli_pcc_lock);
1163                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1164                 lli->lli_pcc_inode = NULL;
1165                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1166                 lli->lli_pcc_generation = 0;
1167                 mutex_init(&lli->lli_group_mutex);
1168                 lli->lli_group_users = 0;
1169                 lli->lli_group_gid = 0;
1170         }
1171         mutex_init(&lli->lli_layout_mutex);
1172         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1173         /* ll_cl_context initialize */
1174         INIT_LIST_HEAD(&lli->lli_lccs);
1175 }
1176
1177 #define MAX_STRING_SIZE 128
1178
1179 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1180
1181 #define LSI_BDI_INITIALIZED     0x00400000
1182
1183 #ifndef HAVE_BDI_CAP_MAP_COPY
1184 # define BDI_CAP_MAP_COPY       0
1185 #endif
1186
1187 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1188 {
1189         struct  lustre_sb_info *lsi = s2lsi(sb);
1190         char buf[MAX_STRING_SIZE];
1191         va_list args;
1192         int err;
1193
1194         err = bdi_init(&lsi->lsi_bdi);
1195         if (err)
1196                 return err;
1197
1198         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1199         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1200         lsi->lsi_bdi.name = "lustre";
1201         va_start(args, fmt);
1202         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1203         va_end(args);
1204         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1205         va_end(args);
1206         if (!err)
1207                 sb->s_bdi = &lsi->lsi_bdi;
1208
1209         return err;
1210 }
1211 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1212
1213 int ll_fill_super(struct super_block *sb)
1214 {
1215         struct  lustre_profile *lprof = NULL;
1216         struct  lustre_sb_info *lsi = s2lsi(sb);
1217         struct  ll_sb_info *sbi = NULL;
1218         char    *dt = NULL, *md = NULL;
1219         char    *profilenm = get_profile_name(sb);
1220         struct config_llog_instance *cfg;
1221         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1222         const int instlen = LUSTRE_MAXINSTANCE + 2;
1223         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1224         char name[MAX_STRING_SIZE];
1225         int md_len = 0;
1226         int dt_len = 0;
1227         uuid_t uuid;
1228         char *ptr;
1229         int len;
1230         int err;
1231
1232         ENTRY;
1233         /* for ASLR, to map between cfg_instance and hashed ptr */
1234         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1235                profilenm, cfg_instance, sb);
1236
1237         OBD_RACE(OBD_FAIL_LLITE_RACE_MOUNT);
1238
1239         OBD_ALLOC_PTR(cfg);
1240         if (cfg == NULL)
1241                 GOTO(out_free_cfg, err = -ENOMEM);
1242
1243         /* client additional sb info */
1244         lsi->lsi_llsbi = sbi = ll_init_sbi();
1245         if (IS_ERR(sbi))
1246                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1247
1248         err = ll_options(lsi->lsi_lmd->lmd_opts, sb);
1249         if (err)
1250                 GOTO(out_free_cfg, err);
1251
1252         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1253         sb->s_d_op = &ll_d_ops;
1254
1255         /* UUID handling */
1256         generate_random_uuid(uuid.b);
1257         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1258
1259         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1260
1261         /* Get fsname */
1262         len = strlen(profilenm);
1263         ptr = strrchr(profilenm, '-');
1264         if (ptr && (strcmp(ptr, "-client") == 0))
1265                 len -= 7;
1266
1267         if (len > LUSTRE_MAXFSNAME) {
1268                 if (unlikely(len >= MAX_STRING_SIZE))
1269                         len = MAX_STRING_SIZE - 1;
1270                 strncpy(name, profilenm, len);
1271                 name[len] = '\0';
1272                 err = -ENAMETOOLONG;
1273                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1274                        name, LUSTRE_MAXFSNAME, err);
1275                 GOTO(out_free_cfg, err);
1276         }
1277         strncpy(sbi->ll_fsname, profilenm, len);
1278         sbi->ll_fsname[len] = '\0';
1279
1280         /* Mount info */
1281         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1282                  profilenm, cfg_instance);
1283
1284         err = super_setup_bdi_name(sb, "%s", name);
1285         if (err)
1286                 GOTO(out_free_cfg, err);
1287
1288         /* Call ll_debugfs_register_super() before lustre_process_log()
1289          * so that "llite.*.*" params can be processed correctly.
1290          */
1291         err = ll_debugfs_register_super(sb, name);
1292         if (err < 0) {
1293                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1294                        sbi->ll_fsname, err);
1295                 err = 0;
1296         }
1297
1298         /* The cfg_instance is a value unique to this super, in case some
1299          * joker tries to mount the same fs at two mount points.
1300          */
1301         cfg->cfg_instance = cfg_instance;
1302         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1303         cfg->cfg_callback = class_config_llog_handler;
1304         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1305         /* set up client obds */
1306         err = lustre_process_log(sb, profilenm, cfg);
1307         if (err < 0)
1308                 GOTO(out_debugfs, err);
1309
1310         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1311         lprof = class_get_profile(profilenm);
1312         if (lprof == NULL) {
1313                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1314                                    " read from the MGS.  Does that filesystem "
1315                                    "exist?\n", profilenm);
1316                 GOTO(out_debugfs, err = -EINVAL);
1317         }
1318         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1319                lprof->lp_md, lprof->lp_dt);
1320
1321         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1322         OBD_ALLOC(dt, dt_len);
1323         if (!dt)
1324                 GOTO(out_profile, err = -ENOMEM);
1325         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1326
1327         md_len = strlen(lprof->lp_md) + instlen + 2;
1328         OBD_ALLOC(md, md_len);
1329         if (!md)
1330                 GOTO(out_free_dt, err = -ENOMEM);
1331         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1332
1333         /* connections, registrations, sb setup */
1334         err = client_common_fill_super(sb, md, dt);
1335         if (err < 0)
1336                 GOTO(out_free_md, err);
1337
1338         sbi->ll_client_common_fill_super_succeeded = 1;
1339
1340 out_free_md:
1341         if (md)
1342                 OBD_FREE(md, md_len);
1343 out_free_dt:
1344         if (dt)
1345                 OBD_FREE(dt, dt_len);
1346 out_profile:
1347         if (lprof)
1348                 class_put_profile(lprof);
1349 out_debugfs:
1350         if (err < 0)
1351                 ll_debugfs_unregister_super(sb);
1352 out_free_cfg:
1353         if (cfg)
1354                 OBD_FREE_PTR(cfg);
1355
1356         if (err)
1357                 ll_put_super(sb);
1358         else if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1359                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1360         RETURN(err);
1361 } /* ll_fill_super */
1362
1363 void ll_put_super(struct super_block *sb)
1364 {
1365         struct config_llog_instance cfg, params_cfg;
1366         struct obd_device *obd;
1367         struct lustre_sb_info *lsi = s2lsi(sb);
1368         struct ll_sb_info *sbi = ll_s2sbi(sb);
1369         char *profilenm = get_profile_name(sb);
1370         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1371         long ccc_count;
1372         int next, force = 1, rc = 0;
1373         ENTRY;
1374
1375         if (IS_ERR(sbi))
1376                 GOTO(out_no_sbi, 0);
1377
1378         /* Should replace instance_id with something better for ASLR */
1379         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1380                profilenm, cfg_instance, sb);
1381
1382         cfg.cfg_instance = cfg_instance;
1383         lustre_end_log(sb, profilenm, &cfg);
1384
1385         params_cfg.cfg_instance = cfg_instance;
1386         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1387
1388         if (sbi->ll_md_exp) {
1389                 obd = class_exp2obd(sbi->ll_md_exp);
1390                 if (obd)
1391                         force = obd->obd_force;
1392         }
1393
1394         /* Wait for unstable pages to be committed to stable storage */
1395         if (force == 0) {
1396                 rc = l_wait_event_abortable(
1397                         sbi->ll_cache->ccc_unstable_waitq,
1398                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1399         }
1400
1401         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1402         if (force == 0 && rc != -ERESTARTSYS)
1403                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1404
1405         /* We need to set force before the lov_disconnect in
1406          * lustre_common_put_super, since l_d cleans up osc's as well.
1407          */
1408         if (force) {
1409                 next = 0;
1410                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1411                                                      &next)) != NULL) {
1412                         obd->obd_force = force;
1413                 }
1414         }
1415
1416         if (sbi->ll_client_common_fill_super_succeeded) {
1417                 /* Only if client_common_fill_super succeeded */
1418                 client_common_put_super(sb);
1419         }
1420
1421         next = 0;
1422         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1423                 class_manual_cleanup(obd);
1424
1425         if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
1426                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1427
1428         if (profilenm)
1429                 class_del_profile(profilenm);
1430
1431 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1432         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1433                 bdi_destroy(&lsi->lsi_bdi);
1434                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1435         }
1436 #endif
1437
1438         ll_free_sbi(sb);
1439         lsi->lsi_llsbi = NULL;
1440 out_no_sbi:
1441         lustre_common_put_super(sb);
1442
1443         cl_env_cache_purge(~0);
1444
1445         EXIT;
1446 } /* client_put_super */
1447
1448 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1449 {
1450         struct inode *inode = NULL;
1451
1452         /* NOTE: we depend on atomic igrab() -bzzz */
1453         lock_res_and_lock(lock);
1454         if (lock->l_resource->lr_lvb_inode) {
1455                 struct ll_inode_info * lli;
1456                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1457                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1458                         inode = igrab(lock->l_resource->lr_lvb_inode);
1459                 } else {
1460                         inode = lock->l_resource->lr_lvb_inode;
1461                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1462                                          D_WARNING, lock, "lr_lvb_inode %p is "
1463                                          "bogus: magic %08x",
1464                                          lock->l_resource->lr_lvb_inode,
1465                                          lli->lli_inode_magic);
1466                         inode = NULL;
1467                 }
1468         }
1469         unlock_res_and_lock(lock);
1470         return inode;
1471 }
1472
1473 void ll_dir_clear_lsm_md(struct inode *inode)
1474 {
1475         struct ll_inode_info *lli = ll_i2info(inode);
1476
1477         LASSERT(S_ISDIR(inode->i_mode));
1478
1479         if (lli->lli_lsm_md) {
1480                 lmv_free_memmd(lli->lli_lsm_md);
1481                 lli->lli_lsm_md = NULL;
1482         }
1483
1484         if (lli->lli_default_lsm_md) {
1485                 lmv_free_memmd(lli->lli_default_lsm_md);
1486                 lli->lli_default_lsm_md = NULL;
1487         }
1488 }
1489
1490 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1491                                       const struct lu_fid *fid,
1492                                       struct lustre_md *md)
1493 {
1494         struct ll_sb_info *sbi = ll_s2sbi(sb);
1495         struct ll_inode_info *lli;
1496         struct mdt_body *body = md->body;
1497         struct inode *inode;
1498         ino_t ino;
1499
1500         ENTRY;
1501
1502         LASSERT(md->lmv);
1503         ino = cl_fid_build_ino(fid, test_bit(LL_SBI_32BIT_API, sbi->ll_flags));
1504         inode = iget_locked(sb, ino);
1505         if (inode == NULL) {
1506                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1507                        sbi->ll_fsname, PFID(fid));
1508                 RETURN(ERR_PTR(-ENOENT));
1509         }
1510
1511         lli = ll_i2info(inode);
1512         if (inode->i_state & I_NEW) {
1513                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1514                                 (body->mbo_mode & S_IFMT);
1515                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1516                          PFID(fid));
1517
1518                 inode->i_mtime.tv_sec = 0;
1519                 inode->i_atime.tv_sec = 0;
1520                 inode->i_ctime.tv_sec = 0;
1521                 inode->i_rdev = 0;
1522
1523 #ifdef HAVE_BACKING_DEV_INFO
1524                 /* initializing backing dev info. */
1525                 inode->i_mapping->backing_dev_info =
1526                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1527 #endif
1528                 inode->i_op = &ll_dir_inode_operations;
1529                 inode->i_fop = &ll_dir_operations;
1530                 lli->lli_fid = *fid;
1531                 ll_lli_init(lli);
1532
1533                 /* master object FID */
1534                 lli->lli_pfid = body->mbo_fid1;
1535                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1536                        lli, PFID(fid), PFID(&lli->lli_pfid));
1537                 unlock_new_inode(inode);
1538         } else {
1539                 /* in directory restripe/auto-split, a directory will be
1540                  * transformed to a stripe if it's plain, set its pfid here,
1541                  * otherwise ll_lock_cancel_bits() can't find the master inode.
1542                  */
1543                 lli->lli_pfid = body->mbo_fid1;
1544         }
1545
1546         RETURN(inode);
1547 }
1548
1549 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1550 {
1551         struct lu_fid *fid;
1552         struct lmv_stripe_md *lsm = md->lmv;
1553         struct ll_inode_info *lli = ll_i2info(inode);
1554         int i;
1555
1556         LASSERT(lsm != NULL);
1557
1558         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1559                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1560         lsm_md_dump(D_INODE, lsm);
1561
1562         if (!lmv_dir_striped(lsm))
1563                 goto out;
1564
1565         /* XXX sigh, this lsm_root initialization should be in
1566          * LMV layer, but it needs ll_iget right now, so we
1567          * put this here right now. */
1568         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1569                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1570                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1571
1572                 if (!fid_is_sane(fid))
1573                         continue;
1574
1575                 /* Unfortunately ll_iget will call ll_update_inode,
1576                  * where the initialization of slave inode is slightly
1577                  * different, so it reset lsm_md to NULL to avoid
1578                  * initializing lsm for slave inode. */
1579                 lsm->lsm_md_oinfo[i].lmo_root =
1580                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1581                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1582                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1583
1584                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1585                         while (i-- > 0) {
1586                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1587                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1588                         }
1589                         return rc;
1590                 }
1591         }
1592 out:
1593         lli->lli_lsm_md = lsm;
1594
1595         return 0;
1596 }
1597
1598 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1599 {
1600         struct ll_inode_info *lli = ll_i2info(inode);
1601
1602         ENTRY;
1603
1604         if (!md->default_lmv) {
1605                 /* clear default lsm */
1606                 if (lli->lli_default_lsm_md) {
1607                         down_write(&lli->lli_lsm_sem);
1608                         if (lli->lli_default_lsm_md) {
1609                                 lmv_free_memmd(lli->lli_default_lsm_md);
1610                                 lli->lli_default_lsm_md = NULL;
1611                         }
1612                         up_write(&lli->lli_lsm_sem);
1613                 }
1614                 RETURN_EXIT;
1615         }
1616
1617         if (lli->lli_default_lsm_md) {
1618                 /* do nonthing if default lsm isn't changed */
1619                 down_read(&lli->lli_lsm_sem);
1620                 if (lli->lli_default_lsm_md &&
1621                     lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1622                         up_read(&lli->lli_lsm_sem);
1623                         RETURN_EXIT;
1624                 }
1625                 up_read(&lli->lli_lsm_sem);
1626         }
1627
1628         down_write(&lli->lli_lsm_sem);
1629         if (lli->lli_default_lsm_md)
1630                 lmv_free_memmd(lli->lli_default_lsm_md);
1631         lli->lli_default_lsm_md = md->default_lmv;
1632         lsm_md_dump(D_INODE, md->default_lmv);
1633         md->default_lmv = NULL;
1634         up_write(&lli->lli_lsm_sem);
1635         RETURN_EXIT;
1636 }
1637
1638 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1639 {
1640         struct ll_inode_info *lli = ll_i2info(inode);
1641         struct lmv_stripe_md *lsm = md->lmv;
1642         struct cl_attr  *attr;
1643         int rc = 0;
1644
1645         ENTRY;
1646
1647         LASSERT(S_ISDIR(inode->i_mode));
1648         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1649                PFID(ll_inode2fid(inode)));
1650
1651         /* update default LMV */
1652         if (md->default_lmv)
1653                 ll_update_default_lsm_md(inode, md);
1654
1655         /* after dir migration/restripe, a stripe may be turned into a
1656          * directory, in this case, zero out its lli_pfid.
1657          */
1658         if (unlikely(fid_is_norm(&lli->lli_pfid)))
1659                 fid_zero(&lli->lli_pfid);
1660
1661         /*
1662          * no striped information from request, lustre_md from req does not
1663          * include stripeEA, see ll_md_setattr()
1664          */
1665         if (!lsm)
1666                 RETURN(0);
1667
1668         /*
1669          * normally dir layout doesn't change, only take read lock to check
1670          * that to avoid blocking other MD operations.
1671          */
1672         down_read(&lli->lli_lsm_sem);
1673
1674         /* some current lookup initialized lsm, and unchanged */
1675         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1676                 GOTO(unlock, rc = 0);
1677
1678         /* if dir layout doesn't match, check whether version is increased,
1679          * which means layout is changed, this happens in dir split/merge and
1680          * lfsck.
1681          *
1682          * foreign LMV should not change.
1683          */
1684         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1685             lsm->lsm_md_layout_version <=
1686             lli->lli_lsm_md->lsm_md_layout_version) {
1687                 CERROR("%s: "DFID" dir layout mismatch:\n",
1688                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1689                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1690                 lsm_md_dump(D_ERROR, lsm);
1691                 GOTO(unlock, rc = -EINVAL);
1692         }
1693
1694         up_read(&lli->lli_lsm_sem);
1695         down_write(&lli->lli_lsm_sem);
1696         /* clear existing lsm */
1697         if (lli->lli_lsm_md) {
1698                 lmv_free_memmd(lli->lli_lsm_md);
1699                 lli->lli_lsm_md = NULL;
1700         }
1701
1702         rc = ll_init_lsm_md(inode, md);
1703         up_write(&lli->lli_lsm_sem);
1704
1705         if (rc)
1706                 RETURN(rc);
1707
1708         /* set md->lmv to NULL, so the following free lustre_md will not free
1709          * this lsm.
1710          */
1711         md->lmv = NULL;
1712
1713         /* md_merge_attr() may take long, since lsm is already set, switch to
1714          * read lock.
1715          */
1716         down_read(&lli->lli_lsm_sem);
1717
1718         if (!lmv_dir_striped(lli->lli_lsm_md))
1719                 GOTO(unlock, rc = 0);
1720
1721         OBD_ALLOC_PTR(attr);
1722         if (!attr)
1723                 GOTO(unlock, rc = -ENOMEM);
1724
1725         /* validate the lsm */
1726         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1727                            ll_md_blocking_ast);
1728         if (!rc) {
1729                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1730                         md->body->mbo_nlink = attr->cat_nlink;
1731                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1732                         md->body->mbo_size = attr->cat_size;
1733                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1734                         md->body->mbo_atime = attr->cat_atime;
1735                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1736                         md->body->mbo_ctime = attr->cat_ctime;
1737                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1738                         md->body->mbo_mtime = attr->cat_mtime;
1739         }
1740
1741         OBD_FREE_PTR(attr);
1742         GOTO(unlock, rc);
1743 unlock:
1744         up_read(&lli->lli_lsm_sem);
1745
1746         return rc;
1747 }
1748
1749 void ll_clear_inode(struct inode *inode)
1750 {
1751         struct ll_inode_info *lli = ll_i2info(inode);
1752         struct ll_sb_info *sbi = ll_i2sbi(inode);
1753
1754         ENTRY;
1755
1756         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1757                PFID(ll_inode2fid(inode)), inode);
1758
1759         if (S_ISDIR(inode->i_mode)) {
1760                 /* these should have been cleared in ll_file_release */
1761                 LASSERT(lli->lli_opendir_key == NULL);
1762                 LASSERT(lli->lli_sai == NULL);
1763                 LASSERT(lli->lli_opendir_pid == 0);
1764         } else {
1765                 pcc_inode_free(inode);
1766         }
1767
1768         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1769
1770         LASSERT(!lli->lli_open_fd_write_count);
1771         LASSERT(!lli->lli_open_fd_read_count);
1772         LASSERT(!lli->lli_open_fd_exec_count);
1773
1774         if (lli->lli_mds_write_och)
1775                 ll_md_real_close(inode, FMODE_WRITE);
1776         if (lli->lli_mds_exec_och)
1777                 ll_md_real_close(inode, FMODE_EXEC);
1778         if (lli->lli_mds_read_och)
1779                 ll_md_real_close(inode, FMODE_READ);
1780
1781         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1782                 OBD_FREE(lli->lli_symlink_name,
1783                          strlen(lli->lli_symlink_name) + 1);
1784                 lli->lli_symlink_name = NULL;
1785         }
1786
1787         ll_xattr_cache_destroy(inode);
1788
1789         forget_all_cached_acls(inode);
1790         lli_clear_acl(lli);
1791         lli->lli_inode_magic = LLI_INODE_DEAD;
1792
1793         if (S_ISDIR(inode->i_mode))
1794                 ll_dir_clear_lsm_md(inode);
1795         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1796                 LASSERT(list_empty(&lli->lli_agl_list));
1797
1798         /*
1799          * XXX This has to be done before lsm is freed below, because
1800          * cl_object still uses inode lsm.
1801          */
1802         cl_inode_fini(inode);
1803
1804         llcrypt_put_encryption_info(inode);
1805
1806         EXIT;
1807 }
1808
1809 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1810 {
1811         struct lustre_md md;
1812         struct inode *inode = dentry->d_inode;
1813         struct ll_sb_info *sbi = ll_i2sbi(inode);
1814         struct ptlrpc_request *request = NULL;
1815         int rc, ia_valid;
1816
1817         ENTRY;
1818
1819         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1820                                      LUSTRE_OPC_ANY, NULL);
1821         if (IS_ERR(op_data))
1822                 RETURN(PTR_ERR(op_data));
1823
1824         /* If this is a chgrp of a regular file, we want to reserve enough
1825          * quota to cover the entire file size.
1826          */
1827         if (S_ISREG(inode->i_mode) && op_data->op_attr.ia_valid & ATTR_GID &&
1828             from_kgid(&init_user_ns, op_data->op_attr.ia_gid) !=
1829             from_kgid(&init_user_ns, inode->i_gid)) {
1830                 op_data->op_xvalid |= OP_XVALID_BLOCKS;
1831                 op_data->op_attr_blocks = inode->i_blocks;
1832         }
1833
1834
1835         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1836         if (rc) {
1837                 ptlrpc_req_finished(request);
1838                 if (rc == -ENOENT) {
1839                         clear_nlink(inode);
1840                         /* Unlinked special device node? Or just a race?
1841                          * Pretend we done everything. */
1842                         if (!S_ISREG(inode->i_mode) &&
1843                             !S_ISDIR(inode->i_mode)) {
1844                                 ia_valid = op_data->op_attr.ia_valid;
1845                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1846                                 rc = simple_setattr(dentry, &op_data->op_attr);
1847                                 op_data->op_attr.ia_valid = ia_valid;
1848                         }
1849                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1850                         CERROR("md_setattr fails: rc = %d\n", rc);
1851                 }
1852                 RETURN(rc);
1853         }
1854
1855         rc = md_get_lustre_md(sbi->ll_md_exp, &request->rq_pill, sbi->ll_dt_exp,
1856                               sbi->ll_md_exp, &md);
1857         if (rc) {
1858                 ptlrpc_req_finished(request);
1859                 RETURN(rc);
1860         }
1861
1862         ia_valid = op_data->op_attr.ia_valid;
1863         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1864          * cache is not cleared yet. */
1865         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1866         if (S_ISREG(inode->i_mode))
1867                 inode_lock(inode);
1868         rc = simple_setattr(dentry, &op_data->op_attr);
1869         if (S_ISREG(inode->i_mode))
1870                 inode_unlock(inode);
1871         op_data->op_attr.ia_valid = ia_valid;
1872
1873         rc = ll_update_inode(inode, &md);
1874         ptlrpc_req_finished(request);
1875
1876         RETURN(rc);
1877 }
1878
1879 /**
1880  * Zero portion of page that is part of @inode.
1881  * This implies, if necessary:
1882  * - taking cl_lock on range corresponding to concerned page
1883  * - grabbing vm page
1884  * - associating cl_page
1885  * - proceeding to clio read
1886  * - zeroing range in page
1887  * - proceeding to cl_page flush
1888  * - releasing cl_lock
1889  *
1890  * \param[in] inode     inode
1891  * \param[in] index     page index
1892  * \param[in] offset    offset in page to start zero from
1893  * \param[in] len       len to zero
1894  *
1895  * \retval 0            on success
1896  * \retval negative     errno on failure
1897  */
1898 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1899                     unsigned len)
1900 {
1901         struct ll_inode_info *lli = ll_i2info(inode);
1902         struct cl_object *clob = lli->lli_clob;
1903         __u16 refcheck;
1904         struct lu_env *env = NULL;
1905         struct cl_io *io = NULL;
1906         struct cl_page *clpage = NULL;
1907         struct page *vmpage = NULL;
1908         unsigned from = index << PAGE_SHIFT;
1909         struct cl_lock *lock = NULL;
1910         struct cl_lock_descr *descr = NULL;
1911         struct cl_2queue *queue = NULL;
1912         struct cl_sync_io *anchor = NULL;
1913         bool holdinglock = false;
1914         bool lockedbymyself = true;
1915         int rc;
1916
1917         ENTRY;
1918
1919         env = cl_env_get(&refcheck);
1920         if (IS_ERR(env))
1921                 RETURN(PTR_ERR(env));
1922
1923         io = vvp_env_thread_io(env);
1924         io->ci_obj = clob;
1925         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1926         if (rc)
1927                 GOTO(putenv, rc);
1928
1929         lock = vvp_env_lock(env);
1930         descr = &lock->cll_descr;
1931         descr->cld_obj   = io->ci_obj;
1932         descr->cld_start = cl_index(io->ci_obj, from);
1933         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1934         descr->cld_mode  = CLM_WRITE;
1935         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1936
1937         /* request lock for page */
1938         rc = cl_lock_request(env, io, lock);
1939         /* -ECANCELED indicates a matching lock with a different extent
1940          * was already present, and -EEXIST indicates a matching lock
1941          * on exactly the same extent was already present.
1942          * In both cases it means we are covered.
1943          */
1944         if (rc == -ECANCELED || rc == -EEXIST)
1945                 rc = 0;
1946         else if (rc < 0)
1947                 GOTO(iofini, rc);
1948         else
1949                 holdinglock = true;
1950
1951         /* grab page */
1952         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1953         if (vmpage == NULL)
1954                 GOTO(rellock, rc = -EOPNOTSUPP);
1955
1956         if (!PageDirty(vmpage)) {
1957                 /* associate cl_page */
1958                 clpage = cl_page_find(env, clob, vmpage->index,
1959                                       vmpage, CPT_CACHEABLE);
1960                 if (IS_ERR(clpage))
1961                         GOTO(pagefini, rc = PTR_ERR(clpage));
1962
1963                 cl_page_assume(env, io, clpage);
1964         }
1965
1966         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1967             !PageWriteback(vmpage)) {
1968                 /* read page */
1969                 /* set PagePrivate2 to detect special case of empty page
1970                  * in osc_brw_fini_request()
1971                  */
1972                 SetPagePrivate2(vmpage);
1973                 rc = ll_io_read_page(env, io, clpage, NULL);
1974                 if (!PagePrivate2(vmpage))
1975                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1976                          * meaning we read an empty page. In this case, in order
1977                          * to avoid allocating unnecessary block in truncated
1978                          * file, we must not zero and write as below. Subsequent
1979                          * server-side truncate will handle things correctly.
1980                          */
1981                         GOTO(clpfini, rc = 0);
1982                 ClearPagePrivate2(vmpage);
1983                 if (rc)
1984                         GOTO(clpfini, rc);
1985                 lockedbymyself = trylock_page(vmpage);
1986                 cl_page_assume(env, io, clpage);
1987         }
1988
1989         /* zero range in page */
1990         zero_user(vmpage, offset, len);
1991
1992         if (holdinglock && clpage) {
1993                 /* explicitly write newly modified page */
1994                 queue = &io->ci_queue;
1995                 cl_2queue_init(queue);
1996                 anchor = &vvp_env_info(env)->vti_anchor;
1997                 cl_sync_io_init(anchor, 1);
1998                 clpage->cp_sync_io = anchor;
1999                 cl_2queue_add(queue, clpage, true);
2000                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
2001                 if (rc)
2002                         GOTO(queuefini1, rc);
2003                 rc = cl_sync_io_wait(env, anchor, 0);
2004                 if (rc)
2005                         GOTO(queuefini2, rc);
2006                 cl_page_assume(env, io, clpage);
2007
2008 queuefini2:
2009                 cl_2queue_discard(env, io, queue);
2010 queuefini1:
2011                 cl_2queue_disown(env, io, queue);
2012                 cl_2queue_fini(env, queue);
2013         }
2014
2015 clpfini:
2016         if (clpage)
2017                 cl_page_put(env, clpage);
2018 pagefini:
2019         if (lockedbymyself) {
2020                 unlock_page(vmpage);
2021                 put_page(vmpage);
2022         }
2023 rellock:
2024         if (holdinglock)
2025                 cl_lock_release(env, lock);
2026 iofini:
2027         cl_io_fini(env, io);
2028 putenv:
2029         if (env)
2030                 cl_env_put(env, &refcheck);
2031
2032         RETURN(rc);
2033 }
2034
2035 /* If this inode has objects allocated to it (lsm != NULL), then the OST
2036  * object(s) determine the file size and mtime.  Otherwise, the MDS will
2037  * keep these values until such a time that objects are allocated for it.
2038  * We do the MDS operations first, as it is checking permissions for us.
2039  * We don't to the MDS RPC if there is nothing that we want to store there,
2040  * otherwise there is no harm in updating mtime/atime on the MDS if we are
2041  * going to do an RPC anyways.
2042  *
2043  * If we are doing a truncate, we will send the mtime and ctime updates
2044  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
2045  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
2046  * at the same time.
2047  *
2048  * In case of HSMimport, we only set attr on MDS.
2049  */
2050 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
2051                    enum op_xvalid xvalid, bool hsm_import)
2052 {
2053         struct inode *inode = dentry->d_inode;
2054         struct ll_inode_info *lli = ll_i2info(inode);
2055         struct md_op_data *op_data = NULL;
2056         ktime_t kstart = ktime_get();
2057         int rc = 0;
2058
2059         ENTRY;
2060
2061         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
2062                "valid %x, hsm_import %d\n",
2063                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
2064                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
2065                hsm_import);
2066
2067         if (attr->ia_valid & ATTR_SIZE) {
2068                 /* Check new size against VFS/VM file size limit and rlimit */
2069                 rc = inode_newsize_ok(inode, attr->ia_size);
2070                 if (rc)
2071                         RETURN(rc);
2072
2073                 /* The maximum Lustre file size is variable, based on the
2074                  * OST maximum object size and number of stripes.  This
2075                  * needs another check in addition to the VFS check above. */
2076                 if (attr->ia_size > ll_file_maxbytes(inode)) {
2077                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
2078                                PFID(&lli->lli_fid), attr->ia_size,
2079                                ll_file_maxbytes(inode));
2080                         RETURN(-EFBIG);
2081                 }
2082
2083                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2084         }
2085
2086         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
2087         if (attr->ia_valid & TIMES_SET_FLAGS) {
2088                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2089                     !capable(CAP_FOWNER))
2090                         RETURN(-EPERM);
2091         }
2092
2093         /* We mark all of the fields "set" so MDS/OST does not re-set them */
2094         if (!(xvalid & OP_XVALID_CTIME_SET) &&
2095              (attr->ia_valid & ATTR_CTIME)) {
2096                 attr->ia_ctime = current_time(inode);
2097                 xvalid |= OP_XVALID_CTIME_SET;
2098         }
2099         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
2100             (attr->ia_valid & ATTR_ATIME)) {
2101                 attr->ia_atime = current_time(inode);
2102                 attr->ia_valid |= ATTR_ATIME_SET;
2103         }
2104         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
2105             (attr->ia_valid & ATTR_MTIME)) {
2106                 attr->ia_mtime = current_time(inode);
2107                 attr->ia_valid |= ATTR_MTIME_SET;
2108         }
2109
2110         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
2111                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
2112                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
2113                        ktime_get_real_seconds());
2114
2115         if (S_ISREG(inode->i_mode))
2116                 inode_unlock(inode);
2117
2118         /* We always do an MDS RPC, even if we're only changing the size;
2119          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
2120
2121         OBD_ALLOC_PTR(op_data);
2122         if (op_data == NULL)
2123                 GOTO(out, rc = -ENOMEM);
2124
2125         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
2126                 /* If we are changing file size, file content is
2127                  * modified, flag it.
2128                  */
2129                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2130                 op_data->op_bias |= MDS_DATA_MODIFIED;
2131                 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
2132         }
2133
2134         if (attr->ia_valid & ATTR_FILE) {
2135                 struct ll_file_data *fd = attr->ia_file->private_data;
2136
2137                 if (fd->fd_lease_och)
2138                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2139         }
2140
2141         op_data->op_attr = *attr;
2142         op_data->op_xvalid = xvalid;
2143
2144         rc = ll_md_setattr(dentry, op_data);
2145         if (rc)
2146                 GOTO(out, rc);
2147
2148         if (!S_ISREG(inode->i_mode) || hsm_import)
2149                 GOTO(out, rc = 0);
2150
2151         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2152                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2153             xvalid & OP_XVALID_CTIME_SET) {
2154                 bool cached = false;
2155
2156                 rc = pcc_inode_setattr(inode, attr, &cached);
2157                 if (cached) {
2158                         if (rc) {
2159                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2160                                        "rc = %d\n",
2161                                        ll_i2sbi(inode)->ll_fsname,
2162                                        PFID(&lli->lli_fid), rc);
2163                                 GOTO(out, rc);
2164                         }
2165                 } else {
2166                         unsigned int flags = 0;
2167
2168                         /* For truncate and utimes sending attributes to OSTs,
2169                          * setting mtime/atime to the past will be performed
2170                          * under PW [0:EOF] extent lock (new_size:EOF for
2171                          * truncate). It may seem excessive to send mtime/atime
2172                          * updates to OSTs when not setting times to past, but
2173                          * it is necessary due to possible time
2174                          * de-synchronization between MDT inode and OST objects
2175                          */
2176                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
2177                                 xvalid |= OP_XVALID_FLAGS;
2178                                 flags = LUSTRE_ENCRYPT_FL;
2179                                 /* Call to ll_io_zero_page is not necessary if
2180                                  * truncating on PAGE_SIZE boundary, because
2181                                  * whole pages will be wiped.
2182                                  * In case of Direct IO, all we need is to set
2183                                  * new size.
2184                                  */
2185                                 if (attr->ia_valid & ATTR_SIZE &&
2186                                     attr->ia_size & ~PAGE_MASK &&
2187                                     !(attr->ia_valid & ATTR_FILE &&
2188                                       attr->ia_file->f_flags & O_DIRECT)) {
2189                                         pgoff_t offset =
2190                                                 attr->ia_size & (PAGE_SIZE - 1);
2191
2192                                         rc = ll_io_zero_page(inode,
2193                                                     attr->ia_size >> PAGE_SHIFT,
2194                                                     offset, PAGE_SIZE - offset);
2195                                         if (rc)
2196                                                 GOTO(out, rc);
2197                                 }
2198                         }
2199                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2200                 }
2201         }
2202
2203         /* If the file was restored, it needs to set dirty flag.
2204          *
2205          * We've already sent MDS_DATA_MODIFIED flag in
2206          * ll_md_setattr() for truncate. However, the MDT refuses to
2207          * set the HS_DIRTY flag on released files, so we have to set
2208          * it again if the file has been restored. Please check how
2209          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2210          *
2211          * Please notice that if the file is not released, the previous
2212          * MDS_DATA_MODIFIED has taken effect and usually
2213          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2214          * This way we can save an RPC for common open + trunc
2215          * operation. */
2216         if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
2217                 struct hsm_state_set hss = {
2218                         .hss_valid = HSS_SETMASK,
2219                         .hss_setmask = HS_DIRTY,
2220                 };
2221                 int rc2;
2222
2223                 rc2 = ll_hsm_state_set(inode, &hss);
2224                 /* truncate and write can happen at the same time, so that
2225                  * the file can be set modified even though the file is not
2226                  * restored from released state, and ll_hsm_state_set() is
2227                  * not applicable for the file, and rc2 < 0 is normal in this
2228                  * case. */
2229                 if (rc2 < 0)
2230                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2231                                PFID(ll_inode2fid(inode)), rc2);
2232         }
2233
2234         EXIT;
2235 out:
2236         if (op_data != NULL)
2237                 ll_finish_md_op_data(op_data);
2238
2239         if (S_ISREG(inode->i_mode)) {
2240                 inode_lock(inode);
2241                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2242                         inode_dio_wait(inode);
2243                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2244                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2245                  * inode flags, so there is a gap where S_NOSEC is not set.
2246                  * This can cause a writer to take the i_mutex unnecessarily,
2247                  * but this is safe to do and should be rare. */
2248                 inode_has_no_xattr(inode);
2249         }
2250
2251         if (!rc)
2252                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2253                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2254                                    ktime_us_delta(ktime_get(), kstart));
2255
2256         return rc;
2257 }
2258
2259 int ll_setattr(struct dentry *de, struct iattr *attr)
2260 {
2261         int mode = de->d_inode->i_mode;
2262         enum op_xvalid xvalid = 0;
2263         int rc;
2264
2265         rc = llcrypt_prepare_setattr(de, attr);
2266         if (rc)
2267                 return rc;
2268
2269         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2270                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2271                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2272
2273         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2274                                (ATTR_SIZE|ATTR_MODE)) &&
2275             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2276              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2277               !(attr->ia_mode & S_ISGID))))
2278                 attr->ia_valid |= ATTR_FORCE;
2279
2280         if ((attr->ia_valid & ATTR_MODE) &&
2281             (mode & S_ISUID) &&
2282             !(attr->ia_mode & S_ISUID) &&
2283             !(attr->ia_valid & ATTR_KILL_SUID))
2284                 attr->ia_valid |= ATTR_KILL_SUID;
2285
2286         if ((attr->ia_valid & ATTR_MODE) &&
2287             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2288             !(attr->ia_mode & S_ISGID) &&
2289             !(attr->ia_valid & ATTR_KILL_SGID))
2290                 attr->ia_valid |= ATTR_KILL_SGID;
2291
2292         return ll_setattr_raw(de, attr, xvalid, false);
2293 }
2294
2295 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2296                        u32 flags)
2297 {
2298         struct obd_statfs obd_osfs = { 0 };
2299         time64_t max_age;
2300         int rc;
2301
2302         ENTRY;
2303         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2304
2305         if (test_bit(LL_SBI_LAZYSTATFS, sbi->ll_flags))
2306                 flags |= OBD_STATFS_NODELAY;
2307
2308         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2309         if (rc)
2310                 RETURN(rc);
2311
2312         osfs->os_type = LL_SUPER_MAGIC;
2313
2314         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2315               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2316
2317         if (osfs->os_state & OS_STATFS_SUM)
2318                 GOTO(out, rc);
2319
2320         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2321         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2322                 GOTO(out, rc = 0);
2323
2324         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2325                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2326                obd_osfs.os_files);
2327
2328         osfs->os_bsize = obd_osfs.os_bsize;
2329         osfs->os_blocks = obd_osfs.os_blocks;
2330         osfs->os_bfree = obd_osfs.os_bfree;
2331         osfs->os_bavail = obd_osfs.os_bavail;
2332
2333         /* If we have _some_ OSTs, but don't have as many free objects on the
2334          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2335          * to compensate, so that the "inodes in use" number is correct.
2336          * This should be kept in sync with lod_statfs() behaviour.
2337          */
2338         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2339                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2340                                  obd_osfs.os_ffree;
2341                 osfs->os_ffree = obd_osfs.os_ffree;
2342         }
2343
2344 out:
2345         RETURN(rc);
2346 }
2347
2348 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2349 {
2350         struct if_quotactl qctl = {
2351                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2352                 .qc_type = PRJQUOTA,
2353                 .qc_valid = QC_GENERAL,
2354         };
2355         u64 limit, curblock;
2356         int ret;
2357
2358         qctl.qc_id = ll_i2info(inode)->lli_projid;
2359         ret = quotactl_ioctl(inode->i_sb, &qctl);
2360         if (ret) {
2361                 /* ignore errors if project ID does not have
2362                  * a quota limit or feature unsupported.
2363                  */
2364                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2365                         ret = 0;
2366                 return ret;
2367         }
2368
2369         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2370                  qctl.qc_dqblk.dqb_bsoftlimit :
2371                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2372         if (limit && sfs->f_blocks > limit) {
2373                 curblock = (qctl.qc_dqblk.dqb_curspace +
2374                                 sfs->f_bsize - 1) / sfs->f_bsize;
2375                 sfs->f_blocks = limit;
2376                 sfs->f_bfree = sfs->f_bavail =
2377                         (sfs->f_blocks > curblock) ?
2378                         (sfs->f_blocks - curblock) : 0;
2379         }
2380
2381         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2382                 qctl.qc_dqblk.dqb_isoftlimit :
2383                 qctl.qc_dqblk.dqb_ihardlimit;
2384         if (limit && sfs->f_files > limit) {
2385                 sfs->f_files = limit;
2386                 sfs->f_ffree = (sfs->f_files >
2387                         qctl.qc_dqblk.dqb_curinodes) ?
2388                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2389         }
2390
2391         return 0;
2392 }
2393
2394 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2395 {
2396         struct super_block *sb = de->d_sb;
2397         struct obd_statfs osfs;
2398         __u64 fsid = huge_encode_dev(sb->s_dev);
2399         ktime_t kstart = ktime_get();
2400         int rc;
2401
2402         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2403
2404         /* Some amount of caching on the client is allowed */
2405         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2406         if (rc)
2407                 return rc;
2408
2409         statfs_unpack(sfs, &osfs);
2410
2411         /* We need to downshift for all 32-bit kernels, because we can't
2412          * tell if the kernel is being called via sys_statfs64() or not.
2413          * Stop before overflowing f_bsize - in which case it is better
2414          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2415         if (sizeof(long) < 8) {
2416                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2417                         sfs->f_bsize <<= 1;
2418
2419                         osfs.os_blocks >>= 1;
2420                         osfs.os_bfree >>= 1;
2421                         osfs.os_bavail >>= 1;
2422                 }
2423         }
2424
2425         sfs->f_blocks = osfs.os_blocks;
2426         sfs->f_bfree = osfs.os_bfree;
2427         sfs->f_bavail = osfs.os_bavail;
2428         sfs->f_fsid.val[0] = (__u32)fsid;
2429         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2430         if (ll_i2info(de->d_inode)->lli_projid)
2431                 return ll_statfs_project(de->d_inode, sfs);
2432
2433         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2434                            ktime_us_delta(ktime_get(), kstart));
2435
2436         return 0;
2437 }
2438
2439 void ll_inode_size_lock(struct inode *inode)
2440 {
2441         struct ll_inode_info *lli;
2442
2443         LASSERT(!S_ISDIR(inode->i_mode));
2444
2445         lli = ll_i2info(inode);
2446         mutex_lock(&lli->lli_size_mutex);
2447 }
2448
2449 void ll_inode_size_unlock(struct inode *inode)
2450 {
2451         struct ll_inode_info *lli;
2452
2453         lli = ll_i2info(inode);
2454         mutex_unlock(&lli->lli_size_mutex);
2455 }
2456
2457 void ll_update_inode_flags(struct inode *inode, unsigned int ext_flags)
2458 {
2459         /* do not clear encryption flag */
2460         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2461         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2462         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2463                 set_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2464         else
2465                 clear_bit(LLIF_PROJECT_INHERIT, &ll_i2info(inode)->lli_flags);
2466 }
2467
2468 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2469 {
2470         struct ll_inode_info *lli = ll_i2info(inode);
2471         struct mdt_body *body = md->body;
2472         struct ll_sb_info *sbi = ll_i2sbi(inode);
2473         bool api32;
2474         int rc = 0;
2475
2476         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2477                 rc = cl_file_inode_init(inode, md);
2478                 if (rc)
2479                         return rc;
2480         }
2481
2482         if (S_ISDIR(inode->i_mode)) {
2483                 rc = ll_update_lsm_md(inode, md);
2484                 if (rc != 0)
2485                         return rc;
2486         }
2487
2488         if (body->mbo_valid & OBD_MD_FLACL)
2489                 lli_replace_acl(lli, md);
2490
2491         api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
2492         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1, api32);
2493         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2494
2495         if (body->mbo_valid & OBD_MD_FLATIME) {
2496                 if (body->mbo_atime > inode->i_atime.tv_sec)
2497                         inode->i_atime.tv_sec = body->mbo_atime;
2498                 lli->lli_atime = body->mbo_atime;
2499         }
2500
2501         if (body->mbo_valid & OBD_MD_FLMTIME) {
2502                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2503                         CDEBUG(D_INODE,
2504                                "setting ino %lu mtime from %lld to %llu\n",
2505                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2506                                body->mbo_mtime);
2507                         inode->i_mtime.tv_sec = body->mbo_mtime;
2508                 }
2509                 lli->lli_mtime = body->mbo_mtime;
2510         }
2511
2512         if (body->mbo_valid & OBD_MD_FLCTIME) {
2513                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2514                         inode->i_ctime.tv_sec = body->mbo_ctime;
2515                 lli->lli_ctime = body->mbo_ctime;
2516         }
2517
2518         if (body->mbo_valid & OBD_MD_FLBTIME)
2519                 lli->lli_btime = body->mbo_btime;
2520
2521         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2522         if (body->mbo_valid & OBD_MD_FLFLAGS)
2523                 ll_update_inode_flags(inode, body->mbo_flags);
2524         if (body->mbo_valid & OBD_MD_FLMODE)
2525                 inode->i_mode = (inode->i_mode & S_IFMT) |
2526                                 (body->mbo_mode & ~S_IFMT);
2527
2528         if (body->mbo_valid & OBD_MD_FLTYPE)
2529                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2530                                 (body->mbo_mode & S_IFMT);
2531
2532         LASSERT(inode->i_mode != 0);
2533         if (body->mbo_valid & OBD_MD_FLUID)
2534                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2535         if (body->mbo_valid & OBD_MD_FLGID)
2536                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2537         if (body->mbo_valid & OBD_MD_FLPROJID)
2538                 lli->lli_projid = body->mbo_projid;
2539         if (body->mbo_valid & OBD_MD_FLNLINK) {
2540                 spin_lock(&inode->i_lock);
2541                 set_nlink(inode, body->mbo_nlink);
2542                 spin_unlock(&inode->i_lock);
2543         }
2544         if (body->mbo_valid & OBD_MD_FLRDEV)
2545                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2546
2547         if (body->mbo_valid & OBD_MD_FLID) {
2548                 /* FID shouldn't be changed! */
2549                 if (fid_is_sane(&lli->lli_fid)) {
2550                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2551                                  "Trying to change FID "DFID
2552                                  " to the "DFID", inode "DFID"(%p)\n",
2553                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2554                                  PFID(ll_inode2fid(inode)), inode);
2555                 } else {
2556                         lli->lli_fid = body->mbo_fid1;
2557                 }
2558         }
2559
2560         LASSERT(fid_seq(&lli->lli_fid) != 0);
2561
2562         lli->lli_attr_valid = body->mbo_valid;
2563         if (body->mbo_valid & OBD_MD_FLSIZE) {
2564                 i_size_write(inode, body->mbo_size);
2565
2566                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2567                        PFID(ll_inode2fid(inode)),
2568                        (unsigned long long)body->mbo_size);
2569
2570                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2571                         inode->i_blocks = body->mbo_blocks;
2572         } else {
2573                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2574                         lli->lli_lazysize = body->mbo_size;
2575                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2576                         lli->lli_lazyblocks = body->mbo_blocks;
2577         }
2578
2579         if (body->mbo_valid & OBD_MD_TSTATE) {
2580                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2581                  * clear it when done to ensure to start again
2582                  * glimpsing updated attrs
2583                  */
2584                 if (body->mbo_t_state & MS_RESTORE)
2585                         set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2586                 else
2587                         clear_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
2588         }
2589
2590         return 0;
2591 }
2592
2593 /* update directory depth to ROOT, called after LOOKUP lock is fetched. */
2594 void ll_update_dir_depth(struct inode *dir, struct inode *inode)
2595 {
2596         struct ll_inode_info *lli;
2597
2598         if (!S_ISDIR(inode->i_mode))
2599                 return;
2600
2601         if (inode == dir)
2602                 return;
2603
2604         lli = ll_i2info(inode);
2605         lli->lli_depth = ll_i2info(dir)->lli_depth + 1;
2606         CDEBUG(D_INODE, DFID" depth %hu\n", PFID(&lli->lli_fid), lli->lli_depth);
2607 }
2608
2609 void ll_truncate_inode_pages_final(struct inode *inode)
2610 {
2611         struct address_space *mapping = &inode->i_data;
2612         unsigned long nrpages;
2613         unsigned long flags;
2614
2615         truncate_inode_pages_final(mapping);
2616
2617         /* Workaround for LU-118: Note nrpages may not be totally updated when
2618          * truncate_inode_pages() returns, as there can be a page in the process
2619          * of deletion (inside __delete_from_page_cache()) in the specified
2620          * range. Thus mapping->nrpages can be non-zero when this function
2621          * returns even after truncation of the whole mapping.  Only do this if
2622          * npages isn't already zero.
2623          */
2624         nrpages = mapping->nrpages;
2625         if (nrpages) {
2626                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2627                 nrpages = mapping->nrpages;
2628                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2629         } /* Workaround end */
2630
2631         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2632                  "see https://jira.whamcloud.com/browse/LU-118\n",
2633                  ll_i2sbi(inode)->ll_fsname,
2634                  PFID(ll_inode2fid(inode)), inode, nrpages);
2635 }
2636
2637 int ll_read_inode2(struct inode *inode, void *opaque)
2638 {
2639         struct lustre_md *md = opaque;
2640         struct ll_inode_info *lli = ll_i2info(inode);
2641         int     rc;
2642         ENTRY;
2643
2644         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2645                PFID(&lli->lli_fid), inode);
2646
2647         /* Core attributes from the MDS first.  This is a new inode, and
2648          * the VFS doesn't zero times in the core inode so we have to do
2649          * it ourselves.  They will be overwritten by either MDS or OST
2650          * attributes - we just need to make sure they aren't newer.
2651          */
2652         inode->i_mtime.tv_sec = 0;
2653         inode->i_atime.tv_sec = 0;
2654         inode->i_ctime.tv_sec = 0;
2655         inode->i_rdev = 0;
2656         rc = ll_update_inode(inode, md);
2657         if (rc != 0)
2658                 RETURN(rc);
2659
2660         /* OIDEBUG(inode); */
2661
2662 #ifdef HAVE_BACKING_DEV_INFO
2663         /* initializing backing dev info. */
2664         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2665 #endif
2666         if (S_ISREG(inode->i_mode)) {
2667                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2668                 inode->i_op = &ll_file_inode_operations;
2669                 inode->i_fop = sbi->ll_fop;
2670                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2671                 EXIT;
2672         } else if (S_ISDIR(inode->i_mode)) {
2673                 inode->i_op = &ll_dir_inode_operations;
2674                 inode->i_fop = &ll_dir_operations;
2675                 EXIT;
2676         } else if (S_ISLNK(inode->i_mode)) {
2677                 inode->i_op = &ll_fast_symlink_inode_operations;
2678                 EXIT;
2679         } else {
2680                 inode->i_op = &ll_special_inode_operations;
2681
2682                 init_special_inode(inode, inode->i_mode,
2683                                    inode->i_rdev);
2684
2685                 EXIT;
2686         }
2687
2688         return 0;
2689 }
2690
2691 void ll_delete_inode(struct inode *inode)
2692 {
2693         struct ll_inode_info *lli = ll_i2info(inode);
2694         ENTRY;
2695
2696         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2697                 /* It is last chance to write out dirty pages,
2698                  * otherwise we may lose data while umount.
2699                  *
2700                  * If i_nlink is 0 then just discard data. This is safe because
2701                  * local inode gets i_nlink 0 from server only for the last
2702                  * unlink, so that file is not opened somewhere else
2703                  */
2704                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2705                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2706         }
2707
2708         ll_truncate_inode_pages_final(inode);
2709         ll_clear_inode(inode);
2710         clear_inode(inode);
2711
2712         EXIT;
2713 }
2714
2715 int ll_iocontrol(struct inode *inode, struct file *file,
2716                  unsigned int cmd, unsigned long arg)
2717 {
2718         struct ll_sb_info *sbi = ll_i2sbi(inode);
2719         struct ptlrpc_request *req = NULL;
2720         int rc, flags = 0;
2721         ENTRY;
2722
2723         switch (cmd) {
2724         case FS_IOC_GETFLAGS: {
2725                 struct mdt_body *body;
2726                 struct md_op_data *op_data;
2727
2728                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2729                                              0, 0, LUSTRE_OPC_ANY,
2730                                              NULL);
2731                 if (IS_ERR(op_data))
2732                         RETURN(PTR_ERR(op_data));
2733
2734                 op_data->op_valid = OBD_MD_FLFLAGS;
2735                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2736                 ll_finish_md_op_data(op_data);
2737                 if (rc) {
2738                         CERROR("%s: failure inode "DFID": rc = %d\n",
2739                                sbi->ll_md_exp->exp_obd->obd_name,
2740                                PFID(ll_inode2fid(inode)), rc);
2741                         RETURN(-abs(rc));
2742                 }
2743
2744                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2745
2746                 flags = body->mbo_flags;
2747
2748                 ptlrpc_req_finished(req);
2749
2750                 RETURN(put_user(flags, (int __user *)arg));
2751         }
2752         case FS_IOC_SETFLAGS: {
2753                 struct iattr *attr;
2754                 struct md_op_data *op_data;
2755                 struct cl_object *obj;
2756                 struct fsxattr fa = { 0 };
2757
2758                 if (get_user(flags, (int __user *)arg))
2759                         RETURN(-EFAULT);
2760
2761                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2762                 if (flags & LUSTRE_PROJINHERIT_FL)
2763                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2764
2765                 rc = ll_ioctl_check_project(inode, fa.fsx_xflags,
2766                                             fa.fsx_projid);
2767                 if (rc)
2768                         RETURN(rc);
2769
2770                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2771                                              LUSTRE_OPC_ANY, NULL);
2772                 if (IS_ERR(op_data))
2773                         RETURN(PTR_ERR(op_data));
2774
2775                 op_data->op_attr_flags = flags;
2776                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2777                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2778                 ll_finish_md_op_data(op_data);
2779                 ptlrpc_req_finished(req);
2780                 if (rc)
2781                         RETURN(rc);
2782
2783                 ll_update_inode_flags(inode, flags);
2784
2785                 obj = ll_i2info(inode)->lli_clob;
2786                 if (obj == NULL)
2787                         RETURN(0);
2788
2789                 OBD_ALLOC_PTR(attr);
2790                 if (attr == NULL)
2791                         RETURN(-ENOMEM);
2792
2793                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2794
2795                 OBD_FREE_PTR(attr);
2796                 RETURN(rc);
2797         }
2798         default:
2799                 RETURN(-ENOSYS);
2800         }
2801
2802         RETURN(0);
2803 }
2804
2805 int ll_flush_ctx(struct inode *inode)
2806 {
2807         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2808
2809         CDEBUG(D_SEC, "flush context for user %d\n",
2810                from_kuid(&init_user_ns, current_uid()));
2811
2812         obd_set_info_async(NULL, sbi->ll_md_exp,
2813                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2814                            0, NULL, NULL);
2815         obd_set_info_async(NULL, sbi->ll_dt_exp,
2816                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2817                            0, NULL, NULL);
2818         return 0;
2819 }
2820
2821 /* umount -f client means force down, don't save state */
2822 void ll_umount_begin(struct super_block *sb)
2823 {
2824         struct ll_sb_info *sbi = ll_s2sbi(sb);
2825         struct obd_device *obd;
2826         struct obd_ioctl_data *ioc_data;
2827         int cnt;
2828         ENTRY;
2829
2830         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2831                sb->s_count, atomic_read(&sb->s_active));
2832
2833         obd = class_exp2obd(sbi->ll_md_exp);
2834         if (obd == NULL) {
2835                 CERROR("Invalid MDC connection handle %#llx\n",
2836                        sbi->ll_md_exp->exp_handle.h_cookie);
2837                 EXIT;
2838                 return;
2839         }
2840         obd->obd_force = 1;
2841
2842         obd = class_exp2obd(sbi->ll_dt_exp);
2843         if (obd == NULL) {
2844                 CERROR("Invalid LOV connection handle %#llx\n",
2845                        sbi->ll_dt_exp->exp_handle.h_cookie);
2846                 EXIT;
2847                 return;
2848         }
2849         obd->obd_force = 1;
2850
2851         OBD_ALLOC_PTR(ioc_data);
2852         if (ioc_data) {
2853                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2854                               sizeof *ioc_data, ioc_data, NULL);
2855
2856                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2857                               sizeof *ioc_data, ioc_data, NULL);
2858
2859                 OBD_FREE_PTR(ioc_data);
2860         }
2861
2862         /* Really, we'd like to wait until there are no requests outstanding,
2863          * and then continue.  For now, we just periodically checking for vfs
2864          * to decrement mnt_cnt and hope to finish it within 10sec.
2865          */
2866         cnt = 10;
2867         while (cnt > 0 &&
2868                !may_umount(sbi->ll_mnt.mnt)) {
2869                 ssleep(1);
2870                 cnt -= 1;
2871         }
2872
2873         EXIT;
2874 }
2875
2876 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2877 {
2878         struct ll_sb_info *sbi = ll_s2sbi(sb);
2879         char *profilenm = get_profile_name(sb);
2880         int err;
2881         __u32 read_only;
2882
2883         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2884                 read_only = *flags & MS_RDONLY;
2885                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2886                                          sizeof(KEY_READ_ONLY),
2887                                          KEY_READ_ONLY, sizeof(read_only),
2888                                          &read_only, NULL);
2889                 if (err) {
2890                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2891                                       profilenm, read_only ?
2892                                       "read-only" : "read-write", err);
2893                         return err;
2894                 }
2895
2896                 if (read_only)
2897                         sb->s_flags |= SB_RDONLY;
2898                 else
2899                         sb->s_flags &= ~SB_RDONLY;
2900
2901                 if (test_bit(LL_SBI_VERBOSE, sbi->ll_flags))
2902                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2903                                       read_only ?  "read-only" : "read-write");
2904         }
2905         return 0;
2906 }
2907
2908 /**
2909  * Cleanup the open handle that is cached on MDT-side.
2910  *
2911  * For open case, the client side open handling thread may hit error
2912  * after the MDT grant the open. Under such case, the client should
2913  * send close RPC to the MDT as cleanup; otherwise, the open handle
2914  * on the MDT will be leaked there until the client umount or evicted.
2915  *
2916  * In further, if someone unlinked the file, because the open handle
2917  * holds the reference on such file/object, then it will block the
2918  * subsequent threads that want to locate such object via FID.
2919  *
2920  * \param[in] sb        super block for this file-system
2921  * \param[in] open_req  pointer to the original open request
2922  */
2923 void ll_open_cleanup(struct super_block *sb, struct req_capsule *pill)
2924 {
2925         struct mdt_body                 *body;
2926         struct md_op_data               *op_data;
2927         struct ptlrpc_request           *close_req = NULL;
2928         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2929         ENTRY;
2930
2931         body = req_capsule_server_get(pill, &RMF_MDT_BODY);
2932         OBD_ALLOC_PTR(op_data);
2933         if (op_data == NULL) {
2934                 CWARN("%s: cannot allocate op_data to release open handle for "
2935                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2936
2937                 RETURN_EXIT;
2938         }
2939
2940         op_data->op_fid1 = body->mbo_fid1;
2941         op_data->op_open_handle = body->mbo_open_handle;
2942         op_data->op_mod_time = ktime_get_real_seconds();
2943         md_close(exp, op_data, NULL, &close_req);
2944         ptlrpc_req_finished(close_req);
2945         ll_finish_md_op_data(op_data);
2946
2947         EXIT;
2948 }
2949
2950 int ll_prep_inode(struct inode **inode, struct req_capsule *pill,
2951                   struct super_block *sb, struct lookup_intent *it)
2952 {
2953         struct ll_sb_info *sbi = NULL;
2954         struct lustre_md md = { NULL };
2955         bool default_lmv_deleted = false;
2956         int rc;
2957
2958         ENTRY;
2959
2960         LASSERT(*inode || sb);
2961         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2962         rc = md_get_lustre_md(sbi->ll_md_exp, pill, sbi->ll_dt_exp,
2963                               sbi->ll_md_exp, &md);
2964         if (rc != 0)
2965                 GOTO(out, rc);
2966
2967         /*
2968          * clear default_lmv only if intent_getattr reply doesn't contain it.
2969          * but it needs to be done after iget, check this early because
2970          * ll_update_lsm_md() may change md.
2971          */
2972         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2973             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2974                 default_lmv_deleted = true;
2975
2976         if (*inode) {
2977                 rc = ll_update_inode(*inode, &md);
2978                 if (rc != 0)
2979                         GOTO(out, rc);
2980         } else {
2981                 bool api32 = test_bit(LL_SBI_32BIT_API, sbi->ll_flags);
2982                 struct lu_fid *fid1 = &md.body->mbo_fid1;
2983
2984                 LASSERT(sb != NULL);
2985
2986                 /*
2987                  * At this point server returns to client's same fid as client
2988                  * generated for creating. So using ->fid1 is okay here.
2989                  */
2990                 if (!fid_is_sane(fid1)) {
2991                         CERROR("%s: Fid is insane "DFID"\n",
2992                                 sbi->ll_fsname, PFID(fid1));
2993                         GOTO(out, rc = -EINVAL);
2994                 }
2995
2996                 *inode = ll_iget(sb, cl_fid_build_ino(fid1, api32), &md);
2997                 if (IS_ERR(*inode)) {
2998                         lmd_clear_acl(&md);
2999                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
3000                         *inode = NULL;
3001                         CERROR("new_inode -fatal: rc %d\n", rc);
3002                         GOTO(out, rc);
3003                 }
3004         }
3005
3006         /* Handling piggyback layout lock.
3007          * Layout lock can be piggybacked by getattr and open request.
3008          * The lsm can be applied to inode only if it comes with a layout lock
3009          * otherwise correct layout may be overwritten, for example:
3010          * 1. proc1: mdt returns a lsm but not granting layout
3011          * 2. layout was changed by another client
3012          * 3. proc2: refresh layout and layout lock granted
3013          * 4. proc1: to apply a stale layout */
3014         if (it != NULL && it->it_lock_mode != 0) {
3015                 struct lustre_handle lockh;
3016                 struct ldlm_lock *lock;
3017
3018                 lockh.cookie = it->it_lock_handle;
3019                 lock = ldlm_handle2lock(&lockh);
3020                 LASSERT(lock != NULL);
3021                 if (ldlm_has_layout(lock)) {
3022                         struct cl_object_conf conf;
3023
3024                         memset(&conf, 0, sizeof(conf));
3025                         conf.coc_opc = OBJECT_CONF_SET;
3026                         conf.coc_inode = *inode;
3027                         conf.coc_lock = lock;
3028                         conf.u.coc_layout = md.layout;
3029                         (void)ll_layout_conf(*inode, &conf);
3030                 }
3031                 LDLM_LOCK_PUT(lock);
3032         }
3033
3034         if (default_lmv_deleted)
3035                 ll_update_default_lsm_md(*inode, &md);
3036
3037         /* we may want to apply some policy for foreign file/dir */
3038         if (ll_sbi_has_foreign_symlink(sbi)) {
3039                 rc = ll_manage_foreign(*inode, &md);
3040                 if (rc < 0)
3041                         GOTO(out, rc);
3042         }
3043
3044         GOTO(out, rc = 0);
3045
3046 out:
3047         /* cleanup will be done if necessary */
3048         md_free_lustre_md(sbi->ll_md_exp, &md);
3049
3050         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
3051                 ll_intent_drop_lock(it);
3052                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, pill);
3053         }
3054
3055         return rc;
3056 }
3057
3058 int ll_obd_statfs(struct inode *inode, void __user *arg)
3059 {
3060         struct ll_sb_info *sbi = NULL;
3061         struct obd_export *exp;
3062         struct obd_ioctl_data *data = NULL;
3063         __u32 type;
3064         int len = 0, rc;
3065
3066         if (inode)
3067                 sbi = ll_i2sbi(inode);
3068         if (!sbi)
3069                 GOTO(out_statfs, rc = -EINVAL);
3070
3071         rc = obd_ioctl_getdata(&data, &len, arg);
3072         if (rc)
3073                 GOTO(out_statfs, rc);
3074
3075         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
3076             !data->ioc_pbuf1 || !data->ioc_pbuf2)
3077                 GOTO(out_statfs, rc = -EINVAL);
3078
3079         if (data->ioc_inllen1 != sizeof(__u32) ||
3080             data->ioc_inllen2 != sizeof(__u32) ||
3081             data->ioc_plen1 != sizeof(struct obd_statfs) ||
3082             data->ioc_plen2 != sizeof(struct obd_uuid))
3083                 GOTO(out_statfs, rc = -EINVAL);
3084
3085         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
3086         if (type & LL_STATFS_LMV)
3087                 exp = sbi->ll_md_exp;
3088         else if (type & LL_STATFS_LOV)
3089                 exp = sbi->ll_dt_exp;
3090         else
3091                 GOTO(out_statfs, rc = -ENODEV);
3092
3093         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, data, NULL);
3094         if (rc)
3095                 GOTO(out_statfs, rc);
3096 out_statfs:
3097         OBD_FREE_LARGE(data, len);
3098         return rc;
3099 }
3100
3101 /*
3102  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
3103  * be called early to avoid deadlock.
3104  */
3105 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
3106 {
3107         if (op_data->op_mea2_sem) {
3108                 up_read_non_owner(op_data->op_mea2_sem);
3109                 op_data->op_mea2_sem = NULL;
3110         }
3111
3112         if (op_data->op_mea1_sem) {
3113                 up_read_non_owner(op_data->op_mea1_sem);
3114                 op_data->op_mea1_sem = NULL;
3115         }
3116 }
3117
3118 /* this function prepares md_op_data hint for passing it down to MD stack. */
3119 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
3120                                       struct inode *i1, struct inode *i2,
3121                                       const char *name, size_t namelen,
3122                                       __u32 mode, enum md_op_code opc,
3123                                       void *data)
3124 {
3125         struct llcrypt_name fname = { 0 };
3126         int rc;
3127
3128         LASSERT(i1 != NULL);
3129
3130         if (name == NULL) {
3131                 /* Do not reuse namelen for something else. */
3132                 if (namelen != 0)
3133                         return ERR_PTR(-EINVAL);
3134         } else {
3135                 if (namelen > ll_i2sbi(i1)->ll_namelen)
3136                         return ERR_PTR(-ENAMETOOLONG);
3137
3138                 /* "/" is not valid name, but it's allowed */
3139                 if (!lu_name_is_valid_2(name, namelen) &&
3140                     strncmp("/", name, namelen) != 0)
3141                         return ERR_PTR(-EINVAL);
3142         }
3143
3144         if (op_data == NULL)
3145                 OBD_ALLOC_PTR(op_data);
3146
3147         if (op_data == NULL)
3148                 return ERR_PTR(-ENOMEM);
3149
3150         ll_i2gids(op_data->op_suppgids, i1, i2);
3151         op_data->op_fid1 = *ll_inode2fid(i1);
3152
3153         if (S_ISDIR(i1->i_mode)) {
3154                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
3155                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
3156                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
3157                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
3158         }
3159
3160         if (i2) {
3161                 op_data->op_fid2 = *ll_inode2fid(i2);
3162                 if (S_ISDIR(i2->i_mode)) {
3163                         if (i2 != i1) {
3164                                 /* i2 is typically a child of i1, and MUST be
3165                                  * further from the root to avoid deadlocks.
3166                                  */
3167                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
3168                                 op_data->op_mea2_sem =
3169                                                 &ll_i2info(i2)->lli_lsm_sem;
3170                         }
3171                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3172                 }
3173         } else {
3174                 fid_zero(&op_data->op_fid2);
3175         }
3176
3177         if (test_bit(LL_SBI_64BIT_HASH, ll_i2sbi(i1)->ll_flags))
3178                 op_data->op_cli_flags |= CLI_HASH64;
3179
3180         if (ll_need_32bit_api(ll_i2sbi(i1)))
3181                 op_data->op_cli_flags |= CLI_API32;
3182
3183         if (opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_CREATE) {
3184                 /* In case of lookup, ll_setup_filename() has already been
3185                  * called in ll_lookup_it(), so just take provided name.
3186                  */
3187                 fname.disk_name.name = (unsigned char *)name;
3188                 fname.disk_name.len = namelen;
3189         } else if (name && namelen) {
3190                 struct qstr dname = QSTR_INIT(name, namelen);
3191                 struct inode *dir;
3192                 struct lu_fid *pfid = NULL;
3193                 struct lu_fid fid;
3194                 int lookup;
3195
3196                 if (!S_ISDIR(i1->i_mode) && i2 && S_ISDIR(i2->i_mode)) {
3197                         /* special case when called from ll_link() */
3198                         dir = i2;
3199                         lookup = 0;
3200                 } else {
3201                         dir = i1;
3202                         lookup = (int)(opc == LUSTRE_OPC_ANY);
3203                 }
3204                 if (opc == LUSTRE_OPC_ANY && lookup)
3205                         pfid = &fid;
3206                 rc = ll_setup_filename(dir, &dname, lookup, &fname, pfid);
3207                 if (rc) {
3208                         ll_finish_md_op_data(op_data);
3209                         return ERR_PTR(rc);
3210                 }
3211                 if (pfid && !fid_is_zero(pfid)) {
3212                         if (i2 == NULL)
3213                                 op_data->op_fid2 = fid;
3214                         op_data->op_bias = MDS_FID_OP;
3215                 }
3216                 if (fname.disk_name.name &&
3217                     fname.disk_name.name != (unsigned char *)name)
3218                         /* op_data->op_name must be freed after use */
3219                         op_data->op_flags |= MF_OPNAME_KMALLOCED;
3220         }
3221
3222         /* In fact LUSTRE_OPC_LOOKUP, LUSTRE_OPC_OPEN, LUSTRE_OPC_MIGR
3223          * are LUSTRE_OPC_ANY
3224          */
3225         if (opc == LUSTRE_OPC_LOOKUP || opc == LUSTRE_OPC_OPEN ||
3226             opc == LUSTRE_OPC_MIGR)
3227                 op_data->op_code = LUSTRE_OPC_ANY;
3228         else
3229                 op_data->op_code = opc;
3230         op_data->op_name = fname.disk_name.name;
3231         op_data->op_namelen = fname.disk_name.len;
3232         op_data->op_mode = mode;
3233         op_data->op_mod_time = ktime_get_real_seconds();
3234         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3235         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3236         op_data->op_cap = current_cap();
3237         op_data->op_mds = 0;
3238         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3239              filename_is_volatile(name, namelen, &op_data->op_mds)) {
3240                 op_data->op_bias |= MDS_CREATE_VOLATILE;
3241         }
3242         op_data->op_data = data;
3243
3244         return op_data;
3245 }
3246
3247 void ll_finish_md_op_data(struct md_op_data *op_data)
3248 {
3249         ll_unlock_md_op_lsm(op_data);
3250         ll_security_release_secctx(op_data->op_file_secctx,
3251                                    op_data->op_file_secctx_size);
3252         if (op_data->op_flags & MF_OPNAME_KMALLOCED)
3253                 /* allocated via ll_setup_filename called
3254                  * from ll_prep_md_op_data
3255                  */
3256                 kfree(op_data->op_name);
3257         llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3258         OBD_FREE_PTR(op_data);
3259 }
3260
3261 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3262 {
3263         struct ll_sb_info *sbi;
3264         int i;
3265
3266         LASSERT(seq && dentry);
3267         sbi = ll_s2sbi(dentry->d_sb);
3268
3269         if (test_bit(LL_SBI_NOLCK, sbi->ll_flags))
3270                 seq_puts(seq, "nolock");
3271
3272         for (i = 1; ll_sbi_flags_name[i].token != LL_SBI_NUM_MOUNT_OPT; i++) {
3273                 /* match_table in some cases has patterns for both enabled and
3274                  * disabled cases. Ignore 'no'xxx versions if bit is set.
3275                  */
3276                 if (test_bit(ll_sbi_flags_name[i].token, sbi->ll_flags) &&
3277                     strncmp(ll_sbi_flags_name[i].pattern, "no", 2)) {
3278                         if (ll_sbi_flags_name[i].token ==
3279                             LL_SBI_FOREIGN_SYMLINK) {
3280                                 seq_show_option(seq, "foreign_symlink",
3281                                                 sbi->ll_foreign_symlink_prefix);
3282                         } else {
3283                                 seq_printf(seq, ",%s",
3284                                            ll_sbi_flags_name[i].pattern);
3285                         }
3286
3287                         /* You can have either localflock or flock but not
3288                          * both. If localflock is set don't print flock or
3289                          * noflock.
3290                          */
3291                         if (ll_sbi_flags_name[i].token == LL_SBI_LOCALFLOCK)
3292                                 i += 2;
3293                 } else if (!test_bit(ll_sbi_flags_name[i].token, sbi->ll_flags) &&
3294                            !strncmp(ll_sbi_flags_name[i].pattern, "no", 2)) {
3295                         seq_printf(seq, ",%s",
3296                                    ll_sbi_flags_name[i].pattern);
3297                 }
3298         }
3299
3300         RETURN(0);
3301 }
3302
3303 /**
3304  * Get obd name by cmd, and copy out to user space
3305  */
3306 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3307 {
3308         struct ll_sb_info *sbi = ll_i2sbi(inode);
3309         struct obd_device *obd;
3310         ENTRY;
3311
3312         if (cmd == OBD_IOC_GETNAME_OLD || cmd == OBD_IOC_GETDTNAME)
3313                 obd = class_exp2obd(sbi->ll_dt_exp);
3314         else if (cmd == OBD_IOC_GETMDNAME)
3315                 obd = class_exp2obd(sbi->ll_md_exp);
3316         else
3317                 RETURN(-EINVAL);
3318
3319         if (!obd)
3320                 RETURN(-ENOENT);
3321
3322         if (copy_to_user((void __user *)arg, obd->obd_name,
3323                          strlen(obd->obd_name) + 1))
3324                 RETURN(-EFAULT);
3325
3326         RETURN(0);
3327 }
3328
3329 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3330 {
3331         char *path = NULL;
3332
3333         struct path p;
3334
3335         p.dentry = dentry;
3336         p.mnt = current->fs->root.mnt;
3337         path_get(&p);
3338         path = d_path(&p, buf, bufsize);
3339         path_put(&p);
3340         return path;
3341 }
3342
3343 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3344 {
3345         char *buf, *path = NULL;
3346         struct dentry *dentry = NULL;
3347         struct inode *inode = page->mapping->host;
3348
3349         /* this can be called inside spin lock so use GFP_ATOMIC. */
3350         buf = (char *)__get_free_page(GFP_ATOMIC);
3351         if (buf != NULL) {
3352                 dentry = d_find_alias(page->mapping->host);
3353                 if (dentry != NULL)
3354                         path = ll_d_path(dentry, buf, PAGE_SIZE);
3355         }
3356
3357         /* The below message is checked in recovery-small.sh test_24b */
3358         CDEBUG(D_WARNING,
3359                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3360                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3361                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3362                PFID(ll_inode2fid(inode)),
3363                (path && !IS_ERR(path)) ? path : "", ioret);
3364
3365         if (dentry != NULL)
3366                 dput(dentry);
3367
3368         if (buf != NULL)
3369                 free_page((unsigned long)buf);
3370 }
3371
3372 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3373                         struct lov_user_md **kbuf)
3374 {
3375         struct lov_user_md      lum;
3376         ssize_t                 lum_size;
3377         ENTRY;
3378
3379         if (copy_from_user(&lum, md, sizeof(lum)))
3380                 RETURN(-EFAULT);
3381
3382         lum_size = ll_lov_user_md_size(&lum);
3383         if (lum_size < 0)
3384                 RETURN(lum_size);
3385
3386         OBD_ALLOC_LARGE(*kbuf, lum_size);
3387         if (*kbuf == NULL)
3388                 RETURN(-ENOMEM);
3389
3390         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3391                 OBD_FREE_LARGE(*kbuf, lum_size);
3392                 RETURN(-EFAULT);
3393         }
3394
3395         RETURN(lum_size);
3396 }
3397
3398 /*
3399  * Compute llite root squash state after a change of root squash
3400  * configuration setting or add/remove of a lnet nid
3401  */
3402 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3403 {
3404         struct root_squash_info *squash = &sbi->ll_squash;
3405         int i;
3406         bool matched;
3407         struct lnet_process_id id;
3408
3409         /* Update norootsquash flag */
3410         spin_lock(&squash->rsi_lock);
3411         if (list_empty(&squash->rsi_nosquash_nids))
3412                 clear_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3413         else {
3414                 /* Do not apply root squash as soon as one of our NIDs is
3415                  * in the nosquash_nids list */
3416                 matched = false;
3417                 i = 0;
3418                 while (LNetGetId(i++, &id) != -ENOENT) {
3419                         if (id.nid == LNET_NID_LO_0)
3420                                 continue;
3421                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3422                                 matched = true;
3423                                 break;
3424                         }
3425                 }
3426                 if (matched)
3427                         set_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3428                 else
3429                         clear_bit(LL_SBI_NOROOTSQUASH, sbi->ll_flags);
3430         }
3431         spin_unlock(&squash->rsi_lock);
3432 }
3433
3434 /**
3435  * Parse linkea content to extract information about a given hardlink
3436  *
3437  * \param[in]   ldata      - Initialized linkea data
3438  * \param[in]   linkno     - Link identifier
3439  * \param[out]  parent_fid - The entry's parent FID
3440  * \param[out]  ln         - Entry name destination buffer
3441  *
3442  * \retval 0 on success
3443  * \retval Appropriate negative error code on failure
3444  */
3445 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3446                             struct lu_fid *parent_fid, struct lu_name *ln)
3447 {
3448         unsigned int    idx;
3449         int             rc;
3450         ENTRY;
3451
3452         rc = linkea_init_with_rec(ldata);
3453         if (rc < 0)
3454                 RETURN(rc);
3455
3456         if (linkno >= ldata->ld_leh->leh_reccount)
3457                 /* beyond last link */
3458                 RETURN(-ENODATA);
3459
3460         linkea_first_entry(ldata);
3461         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3462                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3463                                     parent_fid);
3464                 if (idx == linkno)
3465                         break;
3466
3467                 linkea_next_entry(ldata);
3468         }
3469
3470         if (idx < linkno)
3471                 RETURN(-ENODATA);
3472
3473         RETURN(0);
3474 }
3475
3476 /**
3477  * Get parent FID and name of an identified link. Operation is performed for
3478  * a given link number, letting the caller iterate over linkno to list one or
3479  * all links of an entry.
3480  *
3481  * \param[in]     file - File descriptor against which to perform the operation
3482  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3483  *                       on and the available size. It is eventually filled with
3484  *                       the requested information or left untouched on error
3485  *
3486  * \retval - 0 on success
3487  * \retval - Appropriate negative error code on failure
3488  */
3489 int ll_getparent(struct file *file, struct getparent __user *arg)
3490 {
3491         struct inode            *inode = file_inode(file);
3492         struct linkea_data      *ldata;
3493         struct lu_buf            buf = LU_BUF_NULL;
3494         struct lu_name           ln;
3495         struct lu_fid            parent_fid;
3496         __u32                    linkno;
3497         __u32                    name_size;
3498         int                      rc;
3499
3500         ENTRY;
3501
3502         if (!capable(CAP_DAC_READ_SEARCH) &&
3503             !test_bit(LL_SBI_USER_FID2PATH, ll_i2sbi(inode)->ll_flags))
3504                 RETURN(-EPERM);
3505
3506         if (get_user(name_size, &arg->gp_name_size))
3507                 RETURN(-EFAULT);
3508
3509         if (get_user(linkno, &arg->gp_linkno))
3510                 RETURN(-EFAULT);
3511
3512         if (name_size > PATH_MAX)
3513                 RETURN(-EINVAL);
3514
3515         OBD_ALLOC(ldata, sizeof(*ldata));
3516         if (ldata == NULL)
3517                 RETURN(-ENOMEM);
3518
3519         rc = linkea_data_new(ldata, &buf);
3520         if (rc < 0)
3521                 GOTO(ldata_free, rc);
3522
3523         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3524                            buf.lb_len, OBD_MD_FLXATTR);
3525         if (rc < 0)
3526                 GOTO(lb_free, rc);
3527
3528         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3529         if (rc < 0)
3530                 GOTO(lb_free, rc);
3531
3532         if (ln.ln_namelen >= name_size)
3533                 GOTO(lb_free, rc = -EOVERFLOW);
3534
3535         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3536                 GOTO(lb_free, rc = -EFAULT);
3537
3538         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3539                 GOTO(lb_free, rc = -EFAULT);
3540
3541         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3542                 GOTO(lb_free, rc = -EFAULT);
3543
3544 lb_free:
3545         lu_buf_free(&buf);
3546 ldata_free:
3547         OBD_FREE(ldata, sizeof(*ldata));
3548
3549         RETURN(rc);
3550 }