Whamcloud - gitweb
ae956441424ed3fb1e28a560f32c1779a3b1c34f
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/llite/llite_lib.c
33  *
34  * Lustre Light Super operations
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/cpu.h>
40 #include <linux/module.h>
41 #include <linux/random.h>
42 #include <linux/statfs.h>
43 #include <linux/time.h>
44 #include <linux/types.h>
45 #include <libcfs/linux/linux-uuid.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48 #include <linux/user_namespace.h>
49 #include <linux/delay.h>
50 #include <linux/uidgid.h>
51 #include <linux/security.h>
52 #include <linux/fs_struct.h>
53
54 #ifndef HAVE_CPUS_READ_LOCK
55 #include <libcfs/linux/linux-cpu.h>
56 #endif
57 #include <uapi/linux/lustre/lustre_ioctl.h>
58 #ifdef HAVE_UAPI_LINUX_MOUNT_H
59 #include <uapi/linux/mount.h>
60 #endif
61
62 #include <lustre_ha.h>
63 #include <lustre_dlm.h>
64 #include <lprocfs_status.h>
65 #include <lustre_disk.h>
66 #include <uapi/linux/lustre/lustre_param.h>
67 #include <lustre_log.h>
68 #include <cl_object.h>
69 #include <obd_cksum.h>
70 #include "llite_internal.h"
71
72 struct kmem_cache *ll_file_data_slab;
73
74 #ifndef log2
75 #define log2(n) ffz(~(n))
76 #endif
77
78 /**
79  * If there is only one number of core visible to Lustre,
80  * async readahead will be disabled, to avoid massive over
81  * subscription, we use 1/2 of active cores as default max
82  * async readahead requests.
83  */
84 static inline unsigned int ll_get_ra_async_max_active(void)
85 {
86         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
87 }
88
89 static struct ll_sb_info *ll_init_sbi(void)
90 {
91         struct ll_sb_info *sbi = NULL;
92         unsigned long pages;
93         unsigned long lru_page_max;
94         struct sysinfo si;
95         int rc;
96         int i;
97
98         ENTRY;
99
100         OBD_ALLOC_PTR(sbi);
101         if (sbi == NULL)
102                 RETURN(ERR_PTR(-ENOMEM));
103
104         rc = pcc_super_init(&sbi->ll_pcc_super);
105         if (rc < 0)
106                 GOTO(out_sbi, rc);
107
108         spin_lock_init(&sbi->ll_lock);
109         mutex_init(&sbi->ll_lco.lco_lock);
110         spin_lock_init(&sbi->ll_pp_extent_lock);
111         spin_lock_init(&sbi->ll_process_lock);
112         sbi->ll_rw_stats_on = 0;
113         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
114
115         si_meminfo(&si);
116         pages = si.totalram - si.totalhigh;
117         lru_page_max = pages / 2;
118
119         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
120         sbi->ll_ra_info.ll_readahead_wq =
121                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
122                                        0, CFS_CPT_ANY,
123                                        sbi->ll_ra_info.ra_async_max_active);
124         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
125                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
126
127         /* initialize ll_cache data */
128         sbi->ll_cache = cl_cache_init(lru_page_max);
129         if (sbi->ll_cache == NULL)
130                 GOTO(out_destroy_ra, rc = -ENOMEM);
131
132         sbi->ll_ra_info.ra_max_pages =
133                 min(pages / 32, SBI_DEFAULT_READ_AHEAD_MAX);
134         sbi->ll_ra_info.ra_max_pages_per_file =
135                 min(sbi->ll_ra_info.ra_max_pages / 4,
136                     SBI_DEFAULT_READ_AHEAD_PER_FILE_MAX);
137         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
138                                 sbi->ll_ra_info.ra_max_pages_per_file;
139         sbi->ll_ra_info.ra_range_pages = SBI_DEFAULT_RA_RANGE_PAGES;
140         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
141         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
142
143         sbi->ll_flags |= LL_SBI_VERBOSE;
144 #ifdef ENABLE_CHECKSUM
145         sbi->ll_flags |= LL_SBI_CHECKSUM;
146 #endif
147 #ifdef ENABLE_FLOCK
148         sbi->ll_flags |= LL_SBI_FLOCK;
149 #endif
150
151 #ifdef HAVE_LRU_RESIZE_SUPPORT
152         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
153 #endif
154         sbi->ll_flags |= LL_SBI_LAZYSTATFS;
155
156         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
157                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
158                                pp_r_hist.oh_lock);
159                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
160                                pp_w_hist.oh_lock);
161         }
162
163         /* metadata statahead is enabled by default */
164         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
165         sbi->ll_sa_max = LL_SA_RPC_DEF;
166         atomic_set(&sbi->ll_sa_total, 0);
167         atomic_set(&sbi->ll_sa_wrong, 0);
168         atomic_set(&sbi->ll_sa_running, 0);
169         atomic_set(&sbi->ll_agl_total, 0);
170         sbi->ll_flags |= LL_SBI_AGL_ENABLED;
171         sbi->ll_flags |= LL_SBI_FAST_READ;
172         sbi->ll_flags |= LL_SBI_TINY_WRITE;
173         ll_sbi_set_encrypt(sbi, true);
174
175         /* root squash */
176         sbi->ll_squash.rsi_uid = 0;
177         sbi->ll_squash.rsi_gid = 0;
178         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
179         spin_lock_init(&sbi->ll_squash.rsi_lock);
180
181         /* Per-filesystem file heat */
182         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
183         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
184         RETURN(sbi);
185 out_destroy_ra:
186         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
187 out_pcc:
188         pcc_super_fini(&sbi->ll_pcc_super);
189 out_sbi:
190         OBD_FREE_PTR(sbi);
191         RETURN(ERR_PTR(rc));
192 }
193
194 static void ll_free_sbi(struct super_block *sb)
195 {
196         struct ll_sb_info *sbi = ll_s2sbi(sb);
197         ENTRY;
198
199         if (sbi != NULL) {
200                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
201                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
202                 if (sbi->ll_ra_info.ll_readahead_wq)
203                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
204                 if (sbi->ll_cache != NULL) {
205                         cl_cache_decref(sbi->ll_cache);
206                         sbi->ll_cache = NULL;
207                 }
208                 pcc_super_fini(&sbi->ll_pcc_super);
209                 OBD_FREE(sbi, sizeof(*sbi));
210         }
211         EXIT;
212 }
213
214 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
215 {
216         struct inode *root = NULL;
217         struct ll_sb_info *sbi = ll_s2sbi(sb);
218         struct obd_statfs *osfs = NULL;
219         struct ptlrpc_request *request = NULL;
220         struct obd_connect_data *data = NULL;
221         struct obd_uuid *uuid;
222         struct md_op_data *op_data;
223         struct lustre_md lmd;
224         u64 valid;
225         int size, err, checksum;
226
227         ENTRY;
228         sbi->ll_md_obd = class_name2obd(md);
229         if (!sbi->ll_md_obd) {
230                 CERROR("MD %s: not setup or attached\n", md);
231                 RETURN(-EINVAL);
232         }
233
234         OBD_ALLOC_PTR(data);
235         if (data == NULL)
236                 RETURN(-ENOMEM);
237
238         OBD_ALLOC_PTR(osfs);
239         if (osfs == NULL) {
240                 OBD_FREE_PTR(data);
241                 RETURN(-ENOMEM);
242         }
243
244         /* pass client page size via ocd_grant_blkbits, the server should report
245          * back its backend blocksize for grant calculation purpose */
246         data->ocd_grant_blkbits = PAGE_SHIFT;
247
248         /* indicate MDT features supported by this client */
249         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
250                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
251                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
252                                   OBD_CONNECT_SRVLOCK  | OBD_CONNECT_TRUNCLOCK|
253                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
254                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
255                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
256                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
257                                   OBD_CONNECT_64BITHASH |
258                                   OBD_CONNECT_EINPROGRESS |
259                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
260                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
261                                   OBD_CONNECT_MAX_EASIZE |
262                                   OBD_CONNECT_FLOCK_DEAD |
263                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
264                                   OBD_CONNECT_OPEN_BY_FID |
265                                   OBD_CONNECT_DIR_STRIPE |
266                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
267                                   OBD_CONNECT_SUBTREE |
268                                   OBD_CONNECT_MULTIMODRPCS |
269                                   OBD_CONNECT_GRANT_PARAM |
270                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
271
272         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
273                                    OBD_CONNECT2_SUM_STATFS |
274                                    OBD_CONNECT2_OVERSTRIPING |
275                                    OBD_CONNECT2_FLR |
276                                    OBD_CONNECT2_LOCK_CONVERT |
277                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
278                                    OBD_CONNECT2_INC_XID |
279                                    OBD_CONNECT2_LSOM |
280                                    OBD_CONNECT2_ASYNC_DISCARD |
281                                    OBD_CONNECT2_PCC |
282                                    OBD_CONNECT2_CRUSH | OBD_CONNECT2_LSEEK |
283                                    OBD_CONNECT2_GETATTR_PFID |
284                                    OBD_CONNECT2_DOM_LVB;
285
286 #ifdef HAVE_LRU_RESIZE_SUPPORT
287         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
288                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
289 #endif
290         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
291
292         data->ocd_cksum_types = obd_cksum_types_supported_client();
293
294         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
295                 /* flag mdc connection as lightweight, only used for test
296                  * purpose, use with care */
297                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
298
299         data->ocd_ibits_known = MDS_INODELOCK_FULL;
300         data->ocd_version = LUSTRE_VERSION_CODE;
301
302         if (sb->s_flags & SB_RDONLY)
303                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
304         if (sbi->ll_flags & LL_SBI_USER_XATTR)
305                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
306
307 #ifdef SB_NOSEC
308         /* Setting this indicates we correctly support S_NOSEC (See kernel
309          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
310          */
311         sb->s_flags |= SB_NOSEC;
312 #endif
313
314         if (sbi->ll_flags & LL_SBI_FLOCK)
315                 sbi->ll_fop = &ll_file_operations_flock;
316         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
317                 sbi->ll_fop = &ll_file_operations;
318         else
319                 sbi->ll_fop = &ll_file_operations_noflock;
320
321         /* always ping even if server suppress_pings */
322         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
323                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
324
325         obd_connect_set_secctx(data);
326         if (ll_sbi_has_encrypt(sbi))
327                 obd_connect_set_enc(data);
328
329 #if defined(CONFIG_SECURITY)
330         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
331 #endif
332
333         data->ocd_brw_size = MD_MAX_BRW_SIZE;
334
335         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
336                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
337         if (err == -EBUSY) {
338                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
339                                    "recovery, of which this client is not a "
340                                    "part. Please wait for recovery to complete,"
341                                    " abort, or time out.\n", md);
342                 GOTO(out, err);
343         } else if (err) {
344                 CERROR("cannot connect to %s: rc = %d\n", md, err);
345                 GOTO(out, err);
346         }
347
348         sbi->ll_md_exp->exp_connect_data = *data;
349
350         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
351                            LUSTRE_SEQ_METADATA);
352         if (err) {
353                 CERROR("%s: Can't init metadata layer FID infrastructure, "
354                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
355                 GOTO(out_md, err);
356         }
357
358         /* For mount, we only need fs info from MDT0, and also in DNE, it
359          * can make sure the client can be mounted as long as MDT0 is
360          * avaible */
361         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
362                         ktime_get_seconds() - sbi->ll_statfs_max_age,
363                         OBD_STATFS_FOR_MDT0);
364         if (err)
365                 GOTO(out_md_fid, err);
366
367         /* This needs to be after statfs to ensure connect has finished.
368          * Note that "data" does NOT contain the valid connect reply.
369          * If connecting to a 1.8 server there will be no LMV device, so
370          * we can access the MDC export directly and exp_connect_flags will
371          * be non-zero, but if accessing an upgraded 2.1 server it will
372          * have the correct flags filled in.
373          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
374         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
375         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
376             valid != CLIENT_CONNECT_MDT_REQD) {
377                 char *buf;
378
379                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
380                 obd_connect_flags2str(buf, PAGE_SIZE,
381                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
382                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
383                                    "feature(s) needed for correct operation "
384                                    "of this client (%s). Please upgrade "
385                                    "server or downgrade client.\n",
386                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
387                 OBD_FREE(buf, PAGE_SIZE);
388                 GOTO(out_md_fid, err = -EPROTO);
389         }
390
391         size = sizeof(*data);
392         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
393                            KEY_CONN_DATA,  &size, data);
394         if (err) {
395                 CERROR("%s: Get connect data failed: rc = %d\n",
396                        sbi->ll_md_exp->exp_obd->obd_name, err);
397                 GOTO(out_md_fid, err);
398         }
399
400         LASSERT(osfs->os_bsize);
401         sb->s_blocksize = osfs->os_bsize;
402         sb->s_blocksize_bits = log2(osfs->os_bsize);
403         sb->s_magic = LL_SUPER_MAGIC;
404         sb->s_maxbytes = MAX_LFS_FILESIZE;
405         sbi->ll_namelen = osfs->os_namelen;
406         sbi->ll_mnt.mnt = current->fs->root.mnt;
407
408         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
409             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
410                 LCONSOLE_INFO("Disabling user_xattr feature because "
411                               "it is not supported on the server\n");
412                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
413         }
414
415         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
416 #ifdef SB_POSIXACL
417                 sb->s_flags |= SB_POSIXACL;
418 #endif
419                 sbi->ll_flags |= LL_SBI_ACL;
420         } else {
421                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
422 #ifdef SB_POSIXACL
423                 sb->s_flags &= ~SB_POSIXACL;
424 #endif
425                 sbi->ll_flags &= ~LL_SBI_ACL;
426         }
427
428         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
429                 sbi->ll_flags |= LL_SBI_64BIT_HASH;
430
431         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
432                 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
433
434         if (obd_connect_has_secctx(data))
435                 sbi->ll_flags |= LL_SBI_FILE_SECCTX;
436
437         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
438                 if (ll_sbi_has_test_dummy_encryption(sbi))
439                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
440                                       sbi->ll_fsname,
441                                       sbi->ll_md_exp->exp_obd->obd_name);
442                 ll_sbi_set_encrypt(sbi, false);
443         }
444
445         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
446                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
447                         LCONSOLE_INFO("%s: disabling xattr cache due to "
448                                       "unknown maximum xattr size.\n", dt);
449                 } else if (!sbi->ll_xattr_cache_set) {
450                         /* If xattr_cache is already set (no matter 0 or 1)
451                          * during processing llog, it won't be enabled here. */
452                         sbi->ll_flags |= LL_SBI_XATTR_CACHE;
453                         sbi->ll_xattr_cache_enabled = 1;
454                 }
455         }
456
457         sbi->ll_dt_obd = class_name2obd(dt);
458         if (!sbi->ll_dt_obd) {
459                 CERROR("DT %s: not setup or attached\n", dt);
460                 GOTO(out_md_fid, err = -ENODEV);
461         }
462
463         /* pass client page size via ocd_grant_blkbits, the server should report
464          * back its backend blocksize for grant calculation purpose */
465         data->ocd_grant_blkbits = PAGE_SHIFT;
466
467         /* indicate OST features supported by this client */
468         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
469                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
470                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
471                                   OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
472                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
473                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
474                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
475                                   OBD_CONNECT_EINPROGRESS |
476                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
477                                   OBD_CONNECT_LAYOUTLOCK |
478                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
479                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
480                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
481         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
482                                    OBD_CONNECT2_INC_XID | OBD_CONNECT2_LSEEK;
483
484         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
485                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
486
487         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
488          * disabled by default, because it can still be enabled on the
489          * fly via /sys. As a consequence, we still need to come to an
490          * agreement on the supported algorithms at connect time
491          */
492         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
493
494         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
495                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
496         else
497                 data->ocd_cksum_types = obd_cksum_types_supported_client();
498
499 #ifdef HAVE_LRU_RESIZE_SUPPORT
500         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
501 #endif
502         /* always ping even if server suppress_pings */
503         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
504                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
505
506         if (ll_sbi_has_encrypt(sbi))
507                 obd_connect_set_enc(data);
508
509         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
510                "ocd_grant: %d\n", data->ocd_connect_flags,
511                data->ocd_version, data->ocd_grant);
512
513         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
514         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
515
516         data->ocd_brw_size = DT_MAX_BRW_SIZE;
517
518         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
519                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
520         if (err == -EBUSY) {
521                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
522                                    "recovery, of which this client is not a "
523                                    "part.  Please wait for recovery to "
524                                    "complete, abort, or time out.\n", dt);
525                 GOTO(out_md, err);
526         } else if (err) {
527                 CERROR("%s: Cannot connect to %s: rc = %d\n",
528                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
529                 GOTO(out_md, err);
530         }
531
532         if (ll_sbi_has_encrypt(sbi) &&
533             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
534                 if (ll_sbi_has_test_dummy_encryption(sbi))
535                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
536                                       sbi->ll_fsname, dt);
537                 ll_sbi_set_encrypt(sbi, false);
538         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
539                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
540         }
541
542         sbi->ll_dt_exp->exp_connect_data = *data;
543
544         /* Don't change value if it was specified in the config log */
545         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
546                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
547                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
548                               (data->ocd_brw_size >> PAGE_SHIFT));
549                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
550                     sbi->ll_ra_info.ra_max_pages_per_file)
551                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
552                                 sbi->ll_ra_info.ra_max_pages_per_file;
553         }
554
555         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
556                            LUSTRE_SEQ_METADATA);
557         if (err) {
558                 CERROR("%s: Can't init data layer FID infrastructure, "
559                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
560                 GOTO(out_dt, err);
561         }
562
563         mutex_lock(&sbi->ll_lco.lco_lock);
564         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
565         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
566         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
567         mutex_unlock(&sbi->ll_lco.lco_lock);
568
569         fid_zero(&sbi->ll_root_fid);
570         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
571                            &sbi->ll_root_fid);
572         if (err) {
573                 CERROR("cannot mds_connect: rc = %d\n", err);
574                 GOTO(out_lock_cn_cb, err);
575         }
576         if (!fid_is_sane(&sbi->ll_root_fid)) {
577                 CERROR("%s: Invalid root fid "DFID" during mount\n",
578                        sbi->ll_md_exp->exp_obd->obd_name,
579                        PFID(&sbi->ll_root_fid));
580                 GOTO(out_lock_cn_cb, err = -EINVAL);
581         }
582         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
583
584         sb->s_op = &lustre_super_operations;
585         sb->s_xattr = ll_xattr_handlers;
586 #if THREAD_SIZE >= 8192 /*b=17630*/
587         sb->s_export_op = &lustre_export_operations;
588 #endif
589 #ifdef HAVE_LUSTRE_CRYPTO
590         llcrypt_set_ops(sb, &lustre_cryptops);
591 #endif
592
593         /* make root inode
594          * XXX: move this to after cbd setup? */
595         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
596         if (sbi->ll_flags & LL_SBI_ACL)
597                 valid |= OBD_MD_FLACL;
598
599         OBD_ALLOC_PTR(op_data);
600         if (op_data == NULL)
601                 GOTO(out_lock_cn_cb, err = -ENOMEM);
602
603         op_data->op_fid1 = sbi->ll_root_fid;
604         op_data->op_mode = 0;
605         op_data->op_valid = valid;
606
607         err = md_getattr(sbi->ll_md_exp, op_data, &request);
608
609         OBD_FREE_PTR(op_data);
610         if (err) {
611                 CERROR("%s: md_getattr failed for root: rc = %d\n",
612                        sbi->ll_md_exp->exp_obd->obd_name, err);
613                 GOTO(out_lock_cn_cb, err);
614         }
615
616         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
617                                sbi->ll_md_exp, &lmd);
618         if (err) {
619                 CERROR("failed to understand root inode md: rc = %d\n", err);
620                 ptlrpc_req_finished(request);
621                 GOTO(out_lock_cn_cb, err);
622         }
623
624         LASSERT(fid_is_sane(&sbi->ll_root_fid));
625         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
626                                             sbi->ll_flags & LL_SBI_32BIT_API),
627                        &lmd);
628         md_free_lustre_md(sbi->ll_md_exp, &lmd);
629         ptlrpc_req_finished(request);
630
631         if (IS_ERR(root)) {
632                 lmd_clear_acl(&lmd);
633                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
634                 root = NULL;
635                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
636                        sbi->ll_fsname, err);
637                 GOTO(out_root, err);
638         }
639
640         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
641         if (sbi->ll_checksum_set) {
642                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
643                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
644                                          sizeof(checksum), &checksum, NULL);
645                 if (err) {
646                         CERROR("%s: Set checksum failed: rc = %d\n",
647                                sbi->ll_dt_exp->exp_obd->obd_name, err);
648                         GOTO(out_root, err);
649                 }
650         }
651         cl_sb_init(sb);
652
653         sb->s_root = d_make_root(root);
654         if (sb->s_root == NULL) {
655                 err = -ENOMEM;
656                 CERROR("%s: can't make root dentry: rc = %d\n",
657                        sbi->ll_fsname, err);
658                 GOTO(out_root, err);
659         }
660
661         sbi->ll_sdev_orig = sb->s_dev;
662
663         /* We set sb->s_dev equal on all lustre clients in order to support
664          * NFS export clustering.  NFSD requires that the FSID be the same
665          * on all clients. */
666         /* s_dev is also used in lt_compare() to compare two fs, but that is
667          * only a node-local comparison. */
668         uuid = obd_get_uuid(sbi->ll_md_exp);
669         if (uuid != NULL)
670                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
671
672         if (data != NULL)
673                 OBD_FREE_PTR(data);
674         if (osfs != NULL)
675                 OBD_FREE_PTR(osfs);
676
677         if (sbi->ll_dt_obd) {
678                 err = sysfs_create_link(&sbi->ll_kset.kobj,
679                                         &sbi->ll_dt_obd->obd_kset.kobj,
680                                         sbi->ll_dt_obd->obd_type->typ_name);
681                 if (err < 0) {
682                         CERROR("%s: could not register %s in llite: rc = %d\n",
683                                dt, sbi->ll_fsname, err);
684                         err = 0;
685                 }
686         }
687
688         if (sbi->ll_md_obd) {
689                 err = sysfs_create_link(&sbi->ll_kset.kobj,
690                                         &sbi->ll_md_obd->obd_kset.kobj,
691                                         sbi->ll_md_obd->obd_type->typ_name);
692                 if (err < 0) {
693                         CERROR("%s: could not register %s in llite: rc = %d\n",
694                                md, sbi->ll_fsname, err);
695                         err = 0;
696                 }
697         }
698
699         RETURN(err);
700 out_root:
701         if (root)
702                 iput(root);
703 out_lock_cn_cb:
704         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
705 out_dt:
706         obd_disconnect(sbi->ll_dt_exp);
707         sbi->ll_dt_exp = NULL;
708         sbi->ll_dt_obd = NULL;
709 out_md_fid:
710         obd_fid_fini(sbi->ll_md_exp->exp_obd);
711 out_md:
712         obd_disconnect(sbi->ll_md_exp);
713         sbi->ll_md_exp = NULL;
714         sbi->ll_md_obd = NULL;
715 out:
716         if (data != NULL)
717                 OBD_FREE_PTR(data);
718         if (osfs != NULL)
719                 OBD_FREE_PTR(osfs);
720         return err;
721 }
722
723 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
724 {
725         int size, rc;
726
727         size = sizeof(*lmmsize);
728         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
729                           KEY_MAX_EASIZE, &size, lmmsize);
730         if (rc != 0) {
731                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
732                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
733                 RETURN(rc);
734         }
735
736         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
737
738         size = sizeof(int);
739         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
740                           KEY_MAX_EASIZE, &size, lmmsize);
741         if (rc)
742                 CERROR("Get max mdsize error rc %d\n", rc);
743
744         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
745
746         RETURN(rc);
747 }
748
749 /**
750  * Get the value of the default_easize parameter.
751  *
752  * \see client_obd::cl_default_mds_easize
753  *
754  * \param[in] sbi       superblock info for this filesystem
755  * \param[out] lmmsize  pointer to storage location for value
756  *
757  * \retval 0            on success
758  * \retval negative     negated errno on failure
759  */
760 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
761 {
762         int size, rc;
763
764         size = sizeof(int);
765         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
766                          KEY_DEFAULT_EASIZE, &size, lmmsize);
767         if (rc)
768                 CERROR("Get default mdsize error rc %d\n", rc);
769
770         RETURN(rc);
771 }
772
773 /**
774  * Set the default_easize parameter to the given value.
775  *
776  * \see client_obd::cl_default_mds_easize
777  *
778  * \param[in] sbi       superblock info for this filesystem
779  * \param[in] lmmsize   the size to set
780  *
781  * \retval 0            on success
782  * \retval negative     negated errno on failure
783  */
784 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
785 {
786         int rc;
787
788         if (lmmsize < sizeof(struct lov_mds_md) ||
789             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
790                 return -EINVAL;
791
792         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
793                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
794                                 sizeof(int), &lmmsize, NULL);
795
796         RETURN(rc);
797 }
798
799 static void client_common_put_super(struct super_block *sb)
800 {
801         struct ll_sb_info *sbi = ll_s2sbi(sb);
802         ENTRY;
803
804         cl_sb_fini(sb);
805
806         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
807         obd_disconnect(sbi->ll_dt_exp);
808         sbi->ll_dt_exp = NULL;
809
810         ll_debugfs_unregister_super(sb);
811
812         obd_fid_fini(sbi->ll_md_exp->exp_obd);
813         obd_disconnect(sbi->ll_md_exp);
814         sbi->ll_md_exp = NULL;
815
816         EXIT;
817 }
818
819 void ll_kill_super(struct super_block *sb)
820 {
821         struct ll_sb_info *sbi;
822         ENTRY;
823
824         /* not init sb ?*/
825         if (!(sb->s_flags & SB_ACTIVE))
826                 return;
827
828         sbi = ll_s2sbi(sb);
829         /* we need restore s_dev from changed for clustred NFS before put_super
830          * because new kernels have cached s_dev and change sb->s_dev in
831          * put_super not affected real removing devices */
832         if (sbi) {
833                 sb->s_dev = sbi->ll_sdev_orig;
834
835                 /* wait running statahead threads to quit */
836                 while (atomic_read(&sbi->ll_sa_running) > 0)
837                         schedule_timeout_uninterruptible(
838                                 cfs_time_seconds(1) >> 3);
839         }
840
841         EXIT;
842 }
843
844 static inline int ll_set_opt(const char *opt, char *data, int fl)
845 {
846         if (strncmp(opt, data, strlen(opt)) != 0)
847                 return 0;
848         else
849                 return fl;
850 }
851
852 /* non-client-specific mount options are parsed in lmd_parse */
853 static int ll_options(char *options, struct ll_sb_info *sbi)
854 {
855         int tmp;
856         char *s1 = options, *s2;
857         int *flags = &sbi->ll_flags;
858         ENTRY;
859
860         if (!options)
861                 RETURN(0);
862
863         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
864
865         while (*s1) {
866                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
867                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
868                 if (tmp) {
869                         *flags |= tmp;
870                         goto next;
871                 }
872                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
873                 if (tmp) {
874                         *flags = (*flags & ~LL_SBI_LOCALFLOCK) | tmp;
875                         goto next;
876                 }
877                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
878                 if (tmp) {
879                         *flags = (*flags & ~LL_SBI_FLOCK) | tmp;
880                         goto next;
881                 }
882                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
883                 if (tmp) {
884                         *flags &= ~tmp;
885                         goto next;
886                 }
887                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
888                 if (tmp) {
889                         *flags |= tmp;
890                         goto next;
891                 }
892                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
893                 if (tmp) {
894                         *flags &= ~tmp;
895                         goto next;
896                 }
897                 tmp = ll_set_opt("context", s1, 1);
898                 if (tmp)
899                         goto next;
900                 tmp = ll_set_opt("fscontext", s1, 1);
901                 if (tmp)
902                         goto next;
903                 tmp = ll_set_opt("defcontext", s1, 1);
904                 if (tmp)
905                         goto next;
906                 tmp = ll_set_opt("rootcontext", s1, 1);
907                 if (tmp)
908                         goto next;
909                 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
910                 if (tmp) {
911                         *flags |= tmp;
912                         goto next;
913                 }
914                 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
915                 if (tmp) {
916                         *flags &= ~tmp;
917                         goto next;
918                 }
919
920                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
921                 if (tmp) {
922                         *flags |= tmp;
923                         sbi->ll_checksum_set = 1;
924                         goto next;
925                 }
926                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
927                 if (tmp) {
928                         *flags &= ~tmp;
929                         sbi->ll_checksum_set = 1;
930                         goto next;
931                 }
932                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
933                 if (tmp) {
934                         *flags |= tmp;
935                         goto next;
936                 }
937                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
938                 if (tmp) {
939                         *flags &= ~tmp;
940                         goto next;
941                 }
942                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
943                 if (tmp) {
944                         *flags |= tmp;
945                         goto next;
946                 }
947                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
948                 if (tmp) {
949                         *flags &= ~tmp;
950                         goto next;
951                 }
952                 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
953                 if (tmp) {
954                         *flags |= tmp;
955                         goto next;
956                 }
957                 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
958                 if (tmp) {
959                         *flags |= tmp;
960                         goto next;
961                 }
962                 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
963                 if (tmp) {
964                         *flags &= ~tmp;
965                         goto next;
966                 }
967                 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
968                 if (tmp) {
969                         *flags |= tmp;
970                         goto next;
971                 }
972                 tmp = ll_set_opt("test_dummy_encryption", s1,
973                                  LL_SBI_TEST_DUMMY_ENCRYPTION);
974                 if (tmp) {
975 #ifdef HAVE_LUSTRE_CRYPTO
976                         *flags |= tmp;
977 #else
978                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
979 #endif
980                         goto next;
981                 }
982                 tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT);
983                 if (tmp) {
984 #ifdef HAVE_LUSTRE_CRYPTO
985                         *flags &= ~tmp;
986 #else
987                         LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n");
988 #endif
989                         goto next;
990                 }
991                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
992                                    s1);
993                 RETURN(-EINVAL);
994
995 next:
996                 /* Find next opt */
997                 s2 = strchr(s1, ',');
998                 if (s2 == NULL)
999                         break;
1000                 s1 = s2 + 1;
1001         }
1002         RETURN(0);
1003 }
1004
1005 void ll_lli_init(struct ll_inode_info *lli)
1006 {
1007         lli->lli_inode_magic = LLI_INODE_MAGIC;
1008         lli->lli_flags = 0;
1009         spin_lock_init(&lli->lli_lock);
1010         lli->lli_posix_acl = NULL;
1011         /* Do not set lli_fid, it has been initialized already. */
1012         fid_zero(&lli->lli_pfid);
1013         lli->lli_mds_read_och = NULL;
1014         lli->lli_mds_write_och = NULL;
1015         lli->lli_mds_exec_och = NULL;
1016         lli->lli_open_fd_read_count = 0;
1017         lli->lli_open_fd_write_count = 0;
1018         lli->lli_open_fd_exec_count = 0;
1019         mutex_init(&lli->lli_och_mutex);
1020         spin_lock_init(&lli->lli_agl_lock);
1021         spin_lock_init(&lli->lli_layout_lock);
1022         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1023         lli->lli_clob = NULL;
1024
1025         init_rwsem(&lli->lli_xattrs_list_rwsem);
1026         mutex_init(&lli->lli_xattrs_enq_lock);
1027
1028         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1029         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1030                 lli->lli_opendir_key = NULL;
1031                 lli->lli_sai = NULL;
1032                 spin_lock_init(&lli->lli_sa_lock);
1033                 lli->lli_opendir_pid = 0;
1034                 lli->lli_sa_enabled = 0;
1035                 init_rwsem(&lli->lli_lsm_sem);
1036         } else {
1037                 mutex_init(&lli->lli_size_mutex);
1038                 mutex_init(&lli->lli_setattr_mutex);
1039                 lli->lli_symlink_name = NULL;
1040                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1041                 range_lock_tree_init(&lli->lli_write_tree);
1042                 init_rwsem(&lli->lli_glimpse_sem);
1043                 lli->lli_glimpse_time = ktime_set(0, 0);
1044                 INIT_LIST_HEAD(&lli->lli_agl_list);
1045                 lli->lli_agl_index = 0;
1046                 lli->lli_async_rc = 0;
1047                 spin_lock_init(&lli->lli_heat_lock);
1048                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1049                 lli->lli_heat_flags = 0;
1050                 mutex_init(&lli->lli_pcc_lock);
1051                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1052                 lli->lli_pcc_inode = NULL;
1053                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1054                 lli->lli_pcc_generation = 0;
1055                 mutex_init(&lli->lli_group_mutex);
1056                 lli->lli_group_users = 0;
1057                 lli->lli_group_gid = 0;
1058         }
1059         mutex_init(&lli->lli_layout_mutex);
1060         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1061 }
1062
1063 #define MAX_STRING_SIZE 128
1064
1065 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1066
1067 #define LSI_BDI_INITIALIZED     0x00400000
1068
1069 #ifndef HAVE_BDI_CAP_MAP_COPY
1070 # define BDI_CAP_MAP_COPY       0
1071 #endif
1072
1073 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1074 {
1075         struct  lustre_sb_info *lsi = s2lsi(sb);
1076         char buf[MAX_STRING_SIZE];
1077         va_list args;
1078         int err;
1079
1080         err = bdi_init(&lsi->lsi_bdi);
1081         if (err)
1082                 return err;
1083
1084         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1085         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1086         lsi->lsi_bdi.name = "lustre";
1087         va_start(args, fmt);
1088         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1089         va_end(args);
1090         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1091         va_end(args);
1092         if (!err)
1093                 sb->s_bdi = &lsi->lsi_bdi;
1094
1095         return err;
1096 }
1097 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1098
1099 int ll_fill_super(struct super_block *sb)
1100 {
1101         struct  lustre_profile *lprof = NULL;
1102         struct  lustre_sb_info *lsi = s2lsi(sb);
1103         struct  ll_sb_info *sbi = NULL;
1104         char    *dt = NULL, *md = NULL;
1105         char    *profilenm = get_profile_name(sb);
1106         struct config_llog_instance *cfg;
1107         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1108         const int instlen = LUSTRE_MAXINSTANCE + 2;
1109         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1110         char name[MAX_STRING_SIZE];
1111         int md_len = 0;
1112         int dt_len = 0;
1113         uuid_t uuid;
1114         char *ptr;
1115         int len;
1116         int err;
1117
1118         ENTRY;
1119         /* for ASLR, to map between cfg_instance and hashed ptr */
1120         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1121                profilenm, cfg_instance, sb);
1122
1123         OBD_ALLOC_PTR(cfg);
1124         if (cfg == NULL)
1125                 GOTO(out_free_cfg, err = -ENOMEM);
1126
1127         /* client additional sb info */
1128         lsi->lsi_llsbi = sbi = ll_init_sbi();
1129         if (IS_ERR(sbi))
1130                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1131
1132         err = ll_options(lsi->lsi_lmd->lmd_opts, sbi);
1133         if (err)
1134                 GOTO(out_free_cfg, err);
1135
1136         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1137         sb->s_d_op = &ll_d_ops;
1138
1139         /* UUID handling */
1140         generate_random_uuid(uuid.b);
1141         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1142
1143         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1144
1145         /* Get fsname */
1146         len = strlen(profilenm);
1147         ptr = strrchr(profilenm, '-');
1148         if (ptr && (strcmp(ptr, "-client") == 0))
1149                 len -= 7;
1150
1151         if (len > LUSTRE_MAXFSNAME) {
1152                 if (unlikely(len >= MAX_STRING_SIZE))
1153                         len = MAX_STRING_SIZE - 1;
1154                 strncpy(name, profilenm, len);
1155                 name[len] = '\0';
1156                 err = -ENAMETOOLONG;
1157                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1158                        name, LUSTRE_MAXFSNAME, err);
1159                 GOTO(out_free_cfg, err);
1160         }
1161         strncpy(sbi->ll_fsname, profilenm, len);
1162         sbi->ll_fsname[len] = '\0';
1163
1164         /* Mount info */
1165         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1166                  profilenm, cfg_instance);
1167
1168         err = super_setup_bdi_name(sb, "%s", name);
1169         if (err)
1170                 GOTO(out_free_cfg, err);
1171
1172         /* disable kernel readahead */
1173         sb->s_bdi->ra_pages = 0;
1174
1175         /* Call ll_debugfs_register_super() before lustre_process_log()
1176          * so that "llite.*.*" params can be processed correctly.
1177          */
1178         err = ll_debugfs_register_super(sb, name);
1179         if (err < 0) {
1180                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1181                        sbi->ll_fsname, err);
1182                 err = 0;
1183         }
1184
1185         /* The cfg_instance is a value unique to this super, in case some
1186          * joker tries to mount the same fs at two mount points.
1187          */
1188         cfg->cfg_instance = cfg_instance;
1189         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1190         cfg->cfg_callback = class_config_llog_handler;
1191         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1192         /* set up client obds */
1193         err = lustre_process_log(sb, profilenm, cfg);
1194         if (err < 0)
1195                 GOTO(out_debugfs, err);
1196
1197         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1198         lprof = class_get_profile(profilenm);
1199         if (lprof == NULL) {
1200                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1201                                    " read from the MGS.  Does that filesystem "
1202                                    "exist?\n", profilenm);
1203                 GOTO(out_debugfs, err = -EINVAL);
1204         }
1205         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1206                lprof->lp_md, lprof->lp_dt);
1207
1208         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1209         OBD_ALLOC(dt, dt_len);
1210         if (!dt)
1211                 GOTO(out_profile, err = -ENOMEM);
1212         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1213
1214         md_len = strlen(lprof->lp_md) + instlen + 2;
1215         OBD_ALLOC(md, md_len);
1216         if (!md)
1217                 GOTO(out_free_dt, err = -ENOMEM);
1218         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1219
1220         /* connections, registrations, sb setup */
1221         err = client_common_fill_super(sb, md, dt);
1222         if (err < 0)
1223                 GOTO(out_free_md, err);
1224
1225         sbi->ll_client_common_fill_super_succeeded = 1;
1226
1227 out_free_md:
1228         if (md)
1229                 OBD_FREE(md, md_len);
1230 out_free_dt:
1231         if (dt)
1232                 OBD_FREE(dt, dt_len);
1233 out_profile:
1234         if (lprof)
1235                 class_put_profile(lprof);
1236 out_debugfs:
1237         if (err < 0)
1238                 ll_debugfs_unregister_super(sb);
1239 out_free_cfg:
1240         if (cfg)
1241                 OBD_FREE_PTR(cfg);
1242
1243         if (err)
1244                 ll_put_super(sb);
1245         else if (sbi->ll_flags & LL_SBI_VERBOSE)
1246                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1247         RETURN(err);
1248 } /* ll_fill_super */
1249
1250 void ll_put_super(struct super_block *sb)
1251 {
1252         struct config_llog_instance cfg, params_cfg;
1253         struct obd_device *obd;
1254         struct lustre_sb_info *lsi = s2lsi(sb);
1255         struct ll_sb_info *sbi = ll_s2sbi(sb);
1256         char *profilenm = get_profile_name(sb);
1257         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1258         long ccc_count;
1259         int next, force = 1, rc = 0;
1260         ENTRY;
1261
1262         if (IS_ERR(sbi))
1263                 GOTO(out_no_sbi, 0);
1264
1265         /* Should replace instance_id with something better for ASLR */
1266         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1267                profilenm, cfg_instance, sb);
1268
1269         cfg.cfg_instance = cfg_instance;
1270         lustre_end_log(sb, profilenm, &cfg);
1271
1272         params_cfg.cfg_instance = cfg_instance;
1273         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1274
1275         if (sbi->ll_md_exp) {
1276                 obd = class_exp2obd(sbi->ll_md_exp);
1277                 if (obd)
1278                         force = obd->obd_force;
1279         }
1280
1281         /* Wait for unstable pages to be committed to stable storage */
1282         if (force == 0) {
1283                 rc = l_wait_event_abortable(
1284                         sbi->ll_cache->ccc_unstable_waitq,
1285                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1286         }
1287
1288         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1289         if (force == 0 && rc != -ERESTARTSYS)
1290                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1291
1292         /* We need to set force before the lov_disconnect in
1293          * lustre_common_put_super, since l_d cleans up osc's as well.
1294          */
1295         if (force) {
1296                 next = 0;
1297                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1298                                                      &next)) != NULL) {
1299                         obd->obd_force = force;
1300                 }
1301         }
1302
1303         if (sbi->ll_client_common_fill_super_succeeded) {
1304                 /* Only if client_common_fill_super succeeded */
1305                 client_common_put_super(sb);
1306         }
1307
1308         next = 0;
1309         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1310                 class_manual_cleanup(obd);
1311
1312         if (sbi->ll_flags & LL_SBI_VERBOSE)
1313                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1314
1315         if (profilenm)
1316                 class_del_profile(profilenm);
1317
1318 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1319         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1320                 bdi_destroy(&lsi->lsi_bdi);
1321                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1322         }
1323 #endif
1324
1325         ll_free_sbi(sb);
1326         lsi->lsi_llsbi = NULL;
1327 out_no_sbi:
1328         lustre_common_put_super(sb);
1329
1330         cl_env_cache_purge(~0);
1331
1332         module_put(THIS_MODULE);
1333
1334         EXIT;
1335 } /* client_put_super */
1336
1337 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1338 {
1339         struct inode *inode = NULL;
1340
1341         /* NOTE: we depend on atomic igrab() -bzzz */
1342         lock_res_and_lock(lock);
1343         if (lock->l_resource->lr_lvb_inode) {
1344                 struct ll_inode_info * lli;
1345                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1346                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1347                         inode = igrab(lock->l_resource->lr_lvb_inode);
1348                 } else {
1349                         inode = lock->l_resource->lr_lvb_inode;
1350                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1351                                          D_WARNING, lock, "lr_lvb_inode %p is "
1352                                          "bogus: magic %08x",
1353                                          lock->l_resource->lr_lvb_inode,
1354                                          lli->lli_inode_magic);
1355                         inode = NULL;
1356                 }
1357         }
1358         unlock_res_and_lock(lock);
1359         return inode;
1360 }
1361
1362 void ll_dir_clear_lsm_md(struct inode *inode)
1363 {
1364         struct ll_inode_info *lli = ll_i2info(inode);
1365
1366         LASSERT(S_ISDIR(inode->i_mode));
1367
1368         if (lli->lli_lsm_md) {
1369                 lmv_free_memmd(lli->lli_lsm_md);
1370                 lli->lli_lsm_md = NULL;
1371         }
1372
1373         if (lli->lli_default_lsm_md) {
1374                 lmv_free_memmd(lli->lli_default_lsm_md);
1375                 lli->lli_default_lsm_md = NULL;
1376         }
1377 }
1378
1379 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1380                                       const struct lu_fid *fid,
1381                                       struct lustre_md *md)
1382 {
1383         struct ll_sb_info       *sbi = ll_s2sbi(sb);
1384         struct mdt_body         *body = md->body;
1385         struct inode            *inode;
1386         ino_t                   ino;
1387         ENTRY;
1388
1389         ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1390         inode = iget_locked(sb, ino);
1391         if (inode == NULL) {
1392                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1393                        sbi->ll_fsname, PFID(fid));
1394                 RETURN(ERR_PTR(-ENOENT));
1395         }
1396
1397         if (inode->i_state & I_NEW) {
1398                 struct ll_inode_info *lli = ll_i2info(inode);
1399                 struct lmv_stripe_md *lsm = md->lmv;
1400
1401                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1402                                 (body->mbo_mode & S_IFMT);
1403                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1404                          PFID(fid));
1405
1406                 inode->i_mtime.tv_sec = 0;
1407                 inode->i_atime.tv_sec = 0;
1408                 inode->i_ctime.tv_sec = 0;
1409                 inode->i_rdev = 0;
1410
1411 #ifdef HAVE_BACKING_DEV_INFO
1412                 /* initializing backing dev info. */
1413                 inode->i_mapping->backing_dev_info =
1414                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1415 #endif
1416                 inode->i_op = &ll_dir_inode_operations;
1417                 inode->i_fop = &ll_dir_operations;
1418                 lli->lli_fid = *fid;
1419                 ll_lli_init(lli);
1420
1421                 LASSERT(lsm != NULL);
1422                 /* master object FID */
1423                 lli->lli_pfid = body->mbo_fid1;
1424                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1425                        lli, PFID(fid), PFID(&lli->lli_pfid));
1426                 unlock_new_inode(inode);
1427         }
1428
1429         RETURN(inode);
1430 }
1431
1432 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1433 {
1434         struct lu_fid *fid;
1435         struct lmv_stripe_md *lsm = md->lmv;
1436         struct ll_inode_info *lli = ll_i2info(inode);
1437         int i;
1438
1439         LASSERT(lsm != NULL);
1440
1441         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1442                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1443         lsm_md_dump(D_INODE, lsm);
1444
1445         if (!lmv_dir_striped(lsm))
1446                 goto out;
1447
1448         /* XXX sigh, this lsm_root initialization should be in
1449          * LMV layer, but it needs ll_iget right now, so we
1450          * put this here right now. */
1451         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1452                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1453                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1454
1455                 if (!fid_is_sane(fid))
1456                         continue;
1457
1458                 /* Unfortunately ll_iget will call ll_update_inode,
1459                  * where the initialization of slave inode is slightly
1460                  * different, so it reset lsm_md to NULL to avoid
1461                  * initializing lsm for slave inode. */
1462                 lsm->lsm_md_oinfo[i].lmo_root =
1463                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1464                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1465                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1466
1467                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1468                         while (i-- > 0) {
1469                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1470                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1471                         }
1472                         return rc;
1473                 }
1474         }
1475 out:
1476         lli->lli_lsm_md = lsm;
1477
1478         return 0;
1479 }
1480
1481 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1482 {
1483         struct ll_inode_info *lli = ll_i2info(inode);
1484
1485         if (!md->default_lmv) {
1486                 /* clear default lsm */
1487                 if (lli->lli_default_lsm_md) {
1488                         down_write(&lli->lli_lsm_sem);
1489                         if (lli->lli_default_lsm_md) {
1490                                 lmv_free_memmd(lli->lli_default_lsm_md);
1491                                 lli->lli_default_lsm_md = NULL;
1492                         }
1493                         up_write(&lli->lli_lsm_sem);
1494                 }
1495         } else if (lli->lli_default_lsm_md) {
1496                 /* update default lsm if it changes */
1497                 down_read(&lli->lli_lsm_sem);
1498                 if (lli->lli_default_lsm_md &&
1499                     !lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1500                         up_read(&lli->lli_lsm_sem);
1501                         down_write(&lli->lli_lsm_sem);
1502                         if (lli->lli_default_lsm_md)
1503                                 lmv_free_memmd(lli->lli_default_lsm_md);
1504                         lli->lli_default_lsm_md = md->default_lmv;
1505                         lsm_md_dump(D_INODE, md->default_lmv);
1506                         md->default_lmv = NULL;
1507                         up_write(&lli->lli_lsm_sem);
1508                 } else {
1509                         up_read(&lli->lli_lsm_sem);
1510                 }
1511         } else {
1512                 /* init default lsm */
1513                 down_write(&lli->lli_lsm_sem);
1514                 lli->lli_default_lsm_md = md->default_lmv;
1515                 lsm_md_dump(D_INODE, md->default_lmv);
1516                 md->default_lmv = NULL;
1517                 up_write(&lli->lli_lsm_sem);
1518         }
1519 }
1520
1521 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1522 {
1523         struct ll_inode_info *lli = ll_i2info(inode);
1524         struct lmv_stripe_md *lsm = md->lmv;
1525         struct cl_attr  *attr;
1526         int rc = 0;
1527
1528         ENTRY;
1529
1530         LASSERT(S_ISDIR(inode->i_mode));
1531         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1532                PFID(ll_inode2fid(inode)));
1533
1534         /* update default LMV */
1535         if (md->default_lmv)
1536                 ll_update_default_lsm_md(inode, md);
1537
1538         /*
1539          * no striped information from request, lustre_md from req does not
1540          * include stripeEA, see ll_md_setattr()
1541          */
1542         if (!lsm)
1543                 RETURN(0);
1544
1545         /*
1546          * normally dir layout doesn't change, only take read lock to check
1547          * that to avoid blocking other MD operations.
1548          */
1549         down_read(&lli->lli_lsm_sem);
1550
1551         /* some current lookup initialized lsm, and unchanged */
1552         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1553                 GOTO(unlock, rc = 0);
1554
1555         /* if dir layout doesn't match, check whether version is increased,
1556          * which means layout is changed, this happens in dir split/merge and
1557          * lfsck.
1558          *
1559          * foreign LMV should not change.
1560          */
1561         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1562             lsm->lsm_md_layout_version <=
1563             lli->lli_lsm_md->lsm_md_layout_version) {
1564                 CERROR("%s: "DFID" dir layout mismatch:\n",
1565                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1566                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1567                 lsm_md_dump(D_ERROR, lsm);
1568                 GOTO(unlock, rc = -EINVAL);
1569         }
1570
1571         up_read(&lli->lli_lsm_sem);
1572         down_write(&lli->lli_lsm_sem);
1573         /* clear existing lsm */
1574         if (lli->lli_lsm_md) {
1575                 lmv_free_memmd(lli->lli_lsm_md);
1576                 lli->lli_lsm_md = NULL;
1577         }
1578
1579         rc = ll_init_lsm_md(inode, md);
1580         up_write(&lli->lli_lsm_sem);
1581
1582         if (rc)
1583                 RETURN(rc);
1584
1585         /* set md->lmv to NULL, so the following free lustre_md will not free
1586          * this lsm.
1587          */
1588         md->lmv = NULL;
1589
1590         /* md_merge_attr() may take long, since lsm is already set, switch to
1591          * read lock.
1592          */
1593         down_read(&lli->lli_lsm_sem);
1594
1595         if (!lmv_dir_striped(lli->lli_lsm_md))
1596                 GOTO(unlock, rc = 0);
1597
1598         OBD_ALLOC_PTR(attr);
1599         if (!attr)
1600                 GOTO(unlock, rc = -ENOMEM);
1601
1602         /* validate the lsm */
1603         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1604                            ll_md_blocking_ast);
1605         if (!rc) {
1606                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1607                         md->body->mbo_nlink = attr->cat_nlink;
1608                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1609                         md->body->mbo_size = attr->cat_size;
1610                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1611                         md->body->mbo_atime = attr->cat_atime;
1612                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1613                         md->body->mbo_ctime = attr->cat_ctime;
1614                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1615                         md->body->mbo_mtime = attr->cat_mtime;
1616         }
1617
1618         OBD_FREE_PTR(attr);
1619         GOTO(unlock, rc);
1620 unlock:
1621         up_read(&lli->lli_lsm_sem);
1622
1623         return rc;
1624 }
1625
1626 void ll_clear_inode(struct inode *inode)
1627 {
1628         struct ll_inode_info *lli = ll_i2info(inode);
1629         struct ll_sb_info *sbi = ll_i2sbi(inode);
1630
1631         ENTRY;
1632
1633         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1634                PFID(ll_inode2fid(inode)), inode);
1635
1636         if (S_ISDIR(inode->i_mode)) {
1637                 /* these should have been cleared in ll_file_release */
1638                 LASSERT(lli->lli_opendir_key == NULL);
1639                 LASSERT(lli->lli_sai == NULL);
1640                 LASSERT(lli->lli_opendir_pid == 0);
1641         } else {
1642                 pcc_inode_free(inode);
1643         }
1644
1645         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1646
1647         LASSERT(!lli->lli_open_fd_write_count);
1648         LASSERT(!lli->lli_open_fd_read_count);
1649         LASSERT(!lli->lli_open_fd_exec_count);
1650
1651         if (lli->lli_mds_write_och)
1652                 ll_md_real_close(inode, FMODE_WRITE);
1653         if (lli->lli_mds_exec_och)
1654                 ll_md_real_close(inode, FMODE_EXEC);
1655         if (lli->lli_mds_read_och)
1656                 ll_md_real_close(inode, FMODE_READ);
1657
1658         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1659                 OBD_FREE(lli->lli_symlink_name,
1660                          strlen(lli->lli_symlink_name) + 1);
1661                 lli->lli_symlink_name = NULL;
1662         }
1663
1664         ll_xattr_cache_destroy(inode);
1665
1666         forget_all_cached_acls(inode);
1667         lli_clear_acl(lli);
1668         lli->lli_inode_magic = LLI_INODE_DEAD;
1669
1670         if (S_ISDIR(inode->i_mode))
1671                 ll_dir_clear_lsm_md(inode);
1672         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1673                 LASSERT(list_empty(&lli->lli_agl_list));
1674
1675         /*
1676          * XXX This has to be done before lsm is freed below, because
1677          * cl_object still uses inode lsm.
1678          */
1679         cl_inode_fini(inode);
1680
1681         llcrypt_put_encryption_info(inode);
1682
1683         EXIT;
1684 }
1685
1686 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1687 {
1688         struct lustre_md md;
1689         struct inode *inode = dentry->d_inode;
1690         struct ll_sb_info *sbi = ll_i2sbi(inode);
1691         struct ptlrpc_request *request = NULL;
1692         int rc, ia_valid;
1693         ENTRY;
1694
1695         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1696                                      LUSTRE_OPC_ANY, NULL);
1697         if (IS_ERR(op_data))
1698                 RETURN(PTR_ERR(op_data));
1699
1700         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1701         if (rc) {
1702                 ptlrpc_req_finished(request);
1703                 if (rc == -ENOENT) {
1704                         clear_nlink(inode);
1705                         /* Unlinked special device node? Or just a race?
1706                          * Pretend we done everything. */
1707                         if (!S_ISREG(inode->i_mode) &&
1708                             !S_ISDIR(inode->i_mode)) {
1709                                 ia_valid = op_data->op_attr.ia_valid;
1710                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1711                                 rc = simple_setattr(dentry, &op_data->op_attr);
1712                                 op_data->op_attr.ia_valid = ia_valid;
1713                         }
1714                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1715                         CERROR("md_setattr fails: rc = %d\n", rc);
1716                 }
1717                 RETURN(rc);
1718         }
1719
1720         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1721                               sbi->ll_md_exp, &md);
1722         if (rc) {
1723                 ptlrpc_req_finished(request);
1724                 RETURN(rc);
1725         }
1726
1727         ia_valid = op_data->op_attr.ia_valid;
1728         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1729          * cache is not cleared yet. */
1730         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1731         if (S_ISREG(inode->i_mode))
1732                 inode_lock(inode);
1733         rc = simple_setattr(dentry, &op_data->op_attr);
1734         if (S_ISREG(inode->i_mode))
1735                 inode_unlock(inode);
1736         op_data->op_attr.ia_valid = ia_valid;
1737
1738         rc = ll_update_inode(inode, &md);
1739         ptlrpc_req_finished(request);
1740
1741         RETURN(rc);
1742 }
1743
1744 /**
1745  * Zero portion of page that is part of @inode.
1746  * This implies, if necessary:
1747  * - taking cl_lock on range corresponding to concerned page
1748  * - grabbing vm page
1749  * - associating cl_page
1750  * - proceeding to clio read
1751  * - zeroing range in page
1752  * - proceeding to cl_page flush
1753  * - releasing cl_lock
1754  *
1755  * \param[in] inode     inode
1756  * \param[in] index     page index
1757  * \param[in] offset    offset in page to start zero from
1758  * \param[in] len       len to zero
1759  *
1760  * \retval 0            on success
1761  * \retval negative     errno on failure
1762  */
1763 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1764                     unsigned len)
1765 {
1766         struct ll_inode_info *lli = ll_i2info(inode);
1767         struct cl_object *clob = lli->lli_clob;
1768         __u16 refcheck;
1769         struct lu_env *env = NULL;
1770         struct cl_io *io = NULL;
1771         struct cl_page *clpage = NULL;
1772         struct page *vmpage = NULL;
1773         unsigned from = index << PAGE_SHIFT;
1774         struct cl_lock *lock = NULL;
1775         struct cl_lock_descr *descr = NULL;
1776         struct cl_2queue *queue = NULL;
1777         struct cl_sync_io *anchor = NULL;
1778         bool holdinglock = false;
1779         bool lockedbymyself = true;
1780         int rc;
1781
1782         ENTRY;
1783
1784         env = cl_env_get(&refcheck);
1785         if (IS_ERR(env))
1786                 RETURN(PTR_ERR(env));
1787
1788         io = vvp_env_thread_io(env);
1789         io->ci_obj = clob;
1790         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1791         if (rc)
1792                 GOTO(putenv, rc);
1793
1794         lock = vvp_env_lock(env);
1795         descr = &lock->cll_descr;
1796         descr->cld_obj   = io->ci_obj;
1797         descr->cld_start = cl_index(io->ci_obj, from);
1798         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1799         descr->cld_mode  = CLM_WRITE;
1800         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1801
1802         /* request lock for page */
1803         rc = cl_lock_request(env, io, lock);
1804         /* -ECANCELED indicates a matching lock with a different extent
1805          * was already present, and -EEXIST indicates a matching lock
1806          * on exactly the same extent was already present.
1807          * In both cases it means we are covered.
1808          */
1809         if (rc == -ECANCELED || rc == -EEXIST)
1810                 rc = 0;
1811         else if (rc < 0)
1812                 GOTO(iofini, rc);
1813         else
1814                 holdinglock = true;
1815
1816         /* grab page */
1817         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1818         if (vmpage == NULL)
1819                 GOTO(rellock, rc = -EOPNOTSUPP);
1820
1821         if (!PageDirty(vmpage)) {
1822                 /* associate cl_page */
1823                 clpage = cl_page_find(env, clob, vmpage->index,
1824                                       vmpage, CPT_CACHEABLE);
1825                 if (IS_ERR(clpage))
1826                         GOTO(pagefini, rc = PTR_ERR(clpage));
1827
1828                 cl_page_assume(env, io, clpage);
1829         }
1830
1831         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1832             !PageWriteback(vmpage)) {
1833                 /* read page */
1834                 /* set PagePrivate2 to detect special case of empty page
1835                  * in osc_brw_fini_request()
1836                  */
1837                 SetPagePrivate2(vmpage);
1838                 rc = ll_io_read_page(env, io, clpage, NULL);
1839                 if (!PagePrivate2(vmpage))
1840                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1841                          * meaning we read an empty page. In this case, in order
1842                          * to avoid allocating unnecessary block in truncated
1843                          * file, we must not zero and write as below. Subsequent
1844                          * server-side truncate will handle things correctly.
1845                          */
1846                         GOTO(clpfini, rc = 0);
1847                 ClearPagePrivate2(vmpage);
1848                 if (rc)
1849                         GOTO(clpfini, rc);
1850                 lockedbymyself = trylock_page(vmpage);
1851                 cl_page_assume(env, io, clpage);
1852         }
1853
1854         /* zero range in page */
1855         zero_user(vmpage, offset, len);
1856
1857         if (holdinglock && clpage) {
1858                 /* explicitly write newly modified page */
1859                 queue = &io->ci_queue;
1860                 cl_2queue_init(queue);
1861                 anchor = &vvp_env_info(env)->vti_anchor;
1862                 cl_sync_io_init(anchor, 1);
1863                 clpage->cp_sync_io = anchor;
1864                 cl_2queue_add(queue, clpage);
1865                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1866                 if (rc)
1867                         GOTO(queuefini1, rc);
1868                 rc = cl_sync_io_wait(env, anchor, 0);
1869                 if (rc)
1870                         GOTO(queuefini2, rc);
1871                 cl_page_assume(env, io, clpage);
1872
1873 queuefini2:
1874                 cl_2queue_discard(env, io, queue);
1875 queuefini1:
1876                 cl_2queue_disown(env, io, queue);
1877                 cl_2queue_fini(env, queue);
1878         }
1879
1880 clpfini:
1881         if (clpage)
1882                 cl_page_put(env, clpage);
1883 pagefini:
1884         if (lockedbymyself) {
1885                 unlock_page(vmpage);
1886                 put_page(vmpage);
1887         }
1888 rellock:
1889         if (holdinglock)
1890                 cl_lock_release(env, lock);
1891 iofini:
1892         cl_io_fini(env, io);
1893 putenv:
1894         if (env)
1895                 cl_env_put(env, &refcheck);
1896
1897         RETURN(rc);
1898 }
1899
1900 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1901  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1902  * keep these values until such a time that objects are allocated for it.
1903  * We do the MDS operations first, as it is checking permissions for us.
1904  * We don't to the MDS RPC if there is nothing that we want to store there,
1905  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1906  * going to do an RPC anyways.
1907  *
1908  * If we are doing a truncate, we will send the mtime and ctime updates
1909  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1910  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1911  * at the same time.
1912  *
1913  * In case of HSMimport, we only set attr on MDS.
1914  */
1915 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
1916                    enum op_xvalid xvalid, bool hsm_import)
1917 {
1918         struct inode *inode = dentry->d_inode;
1919         struct ll_inode_info *lli = ll_i2info(inode);
1920         struct md_op_data *op_data = NULL;
1921         ktime_t kstart = ktime_get();
1922         int rc = 0;
1923
1924         ENTRY;
1925
1926         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
1927                "valid %x, hsm_import %d\n",
1928                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
1929                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
1930                hsm_import);
1931
1932         if (attr->ia_valid & ATTR_SIZE) {
1933                 /* Check new size against VFS/VM file size limit and rlimit */
1934                 rc = inode_newsize_ok(inode, attr->ia_size);
1935                 if (rc)
1936                         RETURN(rc);
1937
1938                 /* The maximum Lustre file size is variable, based on the
1939                  * OST maximum object size and number of stripes.  This
1940                  * needs another check in addition to the VFS check above. */
1941                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1942                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
1943                                PFID(&lli->lli_fid), attr->ia_size,
1944                                ll_file_maxbytes(inode));
1945                         RETURN(-EFBIG);
1946                 }
1947
1948                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1949         }
1950
1951         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1952         if (attr->ia_valid & TIMES_SET_FLAGS) {
1953                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
1954                     !cfs_capable(CFS_CAP_FOWNER))
1955                         RETURN(-EPERM);
1956         }
1957
1958         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1959         if (!(xvalid & OP_XVALID_CTIME_SET) &&
1960              (attr->ia_valid & ATTR_CTIME)) {
1961                 attr->ia_ctime = current_time(inode);
1962                 xvalid |= OP_XVALID_CTIME_SET;
1963         }
1964         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1965             (attr->ia_valid & ATTR_ATIME)) {
1966                 attr->ia_atime = current_time(inode);
1967                 attr->ia_valid |= ATTR_ATIME_SET;
1968         }
1969         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1970             (attr->ia_valid & ATTR_MTIME)) {
1971                 attr->ia_mtime = current_time(inode);
1972                 attr->ia_valid |= ATTR_MTIME_SET;
1973         }
1974
1975         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1976                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
1977                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
1978                        ktime_get_real_seconds());
1979
1980         if (S_ISREG(inode->i_mode))
1981                 inode_unlock(inode);
1982
1983         /* We always do an MDS RPC, even if we're only changing the size;
1984          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1985
1986         OBD_ALLOC_PTR(op_data);
1987         if (op_data == NULL)
1988                 GOTO(out, rc = -ENOMEM);
1989
1990         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
1991                 /* If we are changing file size, file content is
1992                  * modified, flag it.
1993                  */
1994                 xvalid |= OP_XVALID_OWNEROVERRIDE;
1995                 op_data->op_bias |= MDS_DATA_MODIFIED;
1996                 ll_file_clear_flag(lli, LLIF_DATA_MODIFIED);
1997         }
1998
1999         if (attr->ia_valid & ATTR_FILE) {
2000                 struct ll_file_data *fd = attr->ia_file->private_data;
2001
2002                 if (fd->fd_lease_och)
2003                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2004         }
2005
2006         op_data->op_attr = *attr;
2007         op_data->op_xvalid = xvalid;
2008
2009         rc = ll_md_setattr(dentry, op_data);
2010         if (rc)
2011                 GOTO(out, rc);
2012
2013         if (!S_ISREG(inode->i_mode) || hsm_import)
2014                 GOTO(out, rc = 0);
2015
2016         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2017                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2018             xvalid & OP_XVALID_CTIME_SET) {
2019                 bool cached = false;
2020
2021                 rc = pcc_inode_setattr(inode, attr, &cached);
2022                 if (cached) {
2023                         if (rc) {
2024                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2025                                        "rc = %d\n",
2026                                        ll_i2sbi(inode)->ll_fsname,
2027                                        PFID(&lli->lli_fid), rc);
2028                                 GOTO(out, rc);
2029                         }
2030                 } else {
2031                         unsigned int flags = 0;
2032
2033                         /* For truncate and utimes sending attributes to OSTs,
2034                          * setting mtime/atime to the past will be performed
2035                          * under PW [0:EOF] extent lock (new_size:EOF for
2036                          * truncate). It may seem excessive to send mtime/atime
2037                          * updates to OSTs when not setting times to past, but
2038                          * it is necessary due to possible time
2039                          * de-synchronization between MDT inode and OST objects
2040                          */
2041                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2042                             attr->ia_valid & ATTR_SIZE) {
2043                                 xvalid |= OP_XVALID_FLAGS;
2044                                 flags = LUSTRE_ENCRYPT_FL;
2045                                 /* Call to ll_io_zero_page is not necessary if
2046                                  * truncating on PAGE_SIZE boundary, because
2047                                  * whole pages will be wiped.
2048                                  * In case of Direct IO, all we need is to set
2049                                  * new size.
2050                                  */
2051                                 if (attr->ia_size & ~PAGE_MASK &&
2052                                     !(attr->ia_valid & ATTR_FILE &&
2053                                       attr->ia_file->f_flags & O_DIRECT)) {
2054                                         pgoff_t offset =
2055                                                 attr->ia_size & (PAGE_SIZE - 1);
2056
2057                                         rc = ll_io_zero_page(inode,
2058                                                     attr->ia_size >> PAGE_SHIFT,
2059                                                     offset, PAGE_SIZE - offset);
2060                                         if (rc)
2061                                                 GOTO(out, rc);
2062                                 }
2063                         }
2064                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2065                 }
2066         }
2067
2068         /* If the file was restored, it needs to set dirty flag.
2069          *
2070          * We've already sent MDS_DATA_MODIFIED flag in
2071          * ll_md_setattr() for truncate. However, the MDT refuses to
2072          * set the HS_DIRTY flag on released files, so we have to set
2073          * it again if the file has been restored. Please check how
2074          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2075          *
2076          * Please notice that if the file is not released, the previous
2077          * MDS_DATA_MODIFIED has taken effect and usually
2078          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2079          * This way we can save an RPC for common open + trunc
2080          * operation. */
2081         if (ll_file_test_and_clear_flag(lli, LLIF_DATA_MODIFIED)) {
2082                 struct hsm_state_set hss = {
2083                         .hss_valid = HSS_SETMASK,
2084                         .hss_setmask = HS_DIRTY,
2085                 };
2086                 int rc2;
2087
2088                 rc2 = ll_hsm_state_set(inode, &hss);
2089                 /* truncate and write can happen at the same time, so that
2090                  * the file can be set modified even though the file is not
2091                  * restored from released state, and ll_hsm_state_set() is
2092                  * not applicable for the file, and rc2 < 0 is normal in this
2093                  * case. */
2094                 if (rc2 < 0)
2095                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2096                                PFID(ll_inode2fid(inode)), rc2);
2097         }
2098
2099         EXIT;
2100 out:
2101         if (op_data != NULL)
2102                 ll_finish_md_op_data(op_data);
2103
2104         if (S_ISREG(inode->i_mode)) {
2105                 inode_lock(inode);
2106                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2107                         inode_dio_wait(inode);
2108                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2109                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2110                  * inode flags, so there is a gap where S_NOSEC is not set.
2111                  * This can cause a writer to take the i_mutex unnecessarily,
2112                  * but this is safe to do and should be rare. */
2113                 inode_has_no_xattr(inode);
2114         }
2115
2116         if (!rc)
2117                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2118                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2119                                    ktime_us_delta(ktime_get(), kstart));
2120
2121         return rc;
2122 }
2123
2124 int ll_setattr(struct dentry *de, struct iattr *attr)
2125 {
2126         int mode = de->d_inode->i_mode;
2127         enum op_xvalid xvalid = 0;
2128         int rc;
2129
2130         rc = llcrypt_prepare_setattr(de, attr);
2131         if (rc)
2132                 return rc;
2133
2134         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2135                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2136                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2137
2138         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2139                                (ATTR_SIZE|ATTR_MODE)) &&
2140             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2141              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2142               !(attr->ia_mode & S_ISGID))))
2143                 attr->ia_valid |= ATTR_FORCE;
2144
2145         if ((attr->ia_valid & ATTR_MODE) &&
2146             (mode & S_ISUID) &&
2147             !(attr->ia_mode & S_ISUID) &&
2148             !(attr->ia_valid & ATTR_KILL_SUID))
2149                 attr->ia_valid |= ATTR_KILL_SUID;
2150
2151         if ((attr->ia_valid & ATTR_MODE) &&
2152             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2153             !(attr->ia_mode & S_ISGID) &&
2154             !(attr->ia_valid & ATTR_KILL_SGID))
2155                 attr->ia_valid |= ATTR_KILL_SGID;
2156
2157         return ll_setattr_raw(de, attr, xvalid, false);
2158 }
2159
2160 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2161                        u32 flags)
2162 {
2163         struct obd_statfs obd_osfs = { 0 };
2164         time64_t max_age;
2165         int rc;
2166
2167         ENTRY;
2168         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2169
2170         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2171                 flags |= OBD_STATFS_NODELAY;
2172
2173         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2174         if (rc)
2175                 RETURN(rc);
2176
2177         osfs->os_type = LL_SUPER_MAGIC;
2178
2179         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2180               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2181
2182         if (osfs->os_state & OS_STATFS_SUM)
2183                 GOTO(out, rc);
2184
2185         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2186         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2187                 GOTO(out, rc = 0);
2188
2189         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2190                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2191                obd_osfs.os_files);
2192
2193         osfs->os_bsize = obd_osfs.os_bsize;
2194         osfs->os_blocks = obd_osfs.os_blocks;
2195         osfs->os_bfree = obd_osfs.os_bfree;
2196         osfs->os_bavail = obd_osfs.os_bavail;
2197
2198         /* If we have _some_ OSTs, but don't have as many free objects on the
2199          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2200          * to compensate, so that the "inodes in use" number is correct.
2201          * This should be kept in sync with lod_statfs() behaviour.
2202          */
2203         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2204                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2205                                  obd_osfs.os_ffree;
2206                 osfs->os_ffree = obd_osfs.os_ffree;
2207         }
2208
2209 out:
2210         RETURN(rc);
2211 }
2212
2213 static int ll_statfs_project(struct inode *inode, struct kstatfs *sfs)
2214 {
2215         struct if_quotactl qctl = {
2216                 .qc_cmd = LUSTRE_Q_GETQUOTA,
2217                 .qc_type = PRJQUOTA,
2218                 .qc_valid = QC_GENERAL,
2219         };
2220         u64 limit, curblock;
2221         int ret;
2222
2223         qctl.qc_id = ll_i2info(inode)->lli_projid;
2224         ret = quotactl_ioctl(ll_i2sbi(inode), &qctl);
2225         if (ret) {
2226                 /* ignore errors if project ID does not have
2227                  * a quota limit or feature unsupported.
2228                  */
2229                 if (ret == -ESRCH || ret == -EOPNOTSUPP)
2230                         ret = 0;
2231                 return ret;
2232         }
2233
2234         limit = ((qctl.qc_dqblk.dqb_bsoftlimit ?
2235                  qctl.qc_dqblk.dqb_bsoftlimit :
2236                  qctl.qc_dqblk.dqb_bhardlimit) * 1024) / sfs->f_bsize;
2237         if (limit && sfs->f_blocks > limit) {
2238                 curblock = (qctl.qc_dqblk.dqb_curspace +
2239                                 sfs->f_bsize - 1) / sfs->f_bsize;
2240                 sfs->f_blocks = limit;
2241                 sfs->f_bfree = sfs->f_bavail =
2242                         (sfs->f_blocks > curblock) ?
2243                         (sfs->f_blocks - curblock) : 0;
2244         }
2245
2246         limit = qctl.qc_dqblk.dqb_isoftlimit ?
2247                 qctl.qc_dqblk.dqb_isoftlimit :
2248                 qctl.qc_dqblk.dqb_ihardlimit;
2249         if (limit && sfs->f_files > limit) {
2250                 sfs->f_files = limit;
2251                 sfs->f_ffree = (sfs->f_files >
2252                         qctl.qc_dqblk.dqb_curinodes) ?
2253                         (sfs->f_files - qctl.qc_dqblk.dqb_curinodes) : 0;
2254         }
2255
2256         return 0;
2257 }
2258
2259 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2260 {
2261         struct super_block *sb = de->d_sb;
2262         struct obd_statfs osfs;
2263         __u64 fsid = huge_encode_dev(sb->s_dev);
2264         ktime_t kstart = ktime_get();
2265         int rc;
2266
2267         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2268
2269         /* Some amount of caching on the client is allowed */
2270         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2271         if (rc)
2272                 return rc;
2273
2274         statfs_unpack(sfs, &osfs);
2275
2276         /* We need to downshift for all 32-bit kernels, because we can't
2277          * tell if the kernel is being called via sys_statfs64() or not.
2278          * Stop before overflowing f_bsize - in which case it is better
2279          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2280         if (sizeof(long) < 8) {
2281                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2282                         sfs->f_bsize <<= 1;
2283
2284                         osfs.os_blocks >>= 1;
2285                         osfs.os_bfree >>= 1;
2286                         osfs.os_bavail >>= 1;
2287                 }
2288         }
2289
2290         sfs->f_blocks = osfs.os_blocks;
2291         sfs->f_bfree = osfs.os_bfree;
2292         sfs->f_bavail = osfs.os_bavail;
2293         sfs->f_fsid.val[0] = (__u32)fsid;
2294         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2295         if (ll_i2info(de->d_inode)->lli_projid)
2296                 return ll_statfs_project(de->d_inode, sfs);
2297
2298         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2299                            ktime_us_delta(ktime_get(), kstart));
2300
2301         return 0;
2302 }
2303
2304 void ll_inode_size_lock(struct inode *inode)
2305 {
2306         struct ll_inode_info *lli;
2307
2308         LASSERT(!S_ISDIR(inode->i_mode));
2309
2310         lli = ll_i2info(inode);
2311         mutex_lock(&lli->lli_size_mutex);
2312 }
2313
2314 void ll_inode_size_unlock(struct inode *inode)
2315 {
2316         struct ll_inode_info *lli;
2317
2318         lli = ll_i2info(inode);
2319         mutex_unlock(&lli->lli_size_mutex);
2320 }
2321
2322 void ll_update_inode_flags(struct inode *inode, int ext_flags)
2323 {
2324         /* do not clear encryption flag */
2325         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2326         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2327         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2328                 ll_file_set_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
2329         else
2330                 ll_file_clear_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
2331 }
2332
2333 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2334 {
2335         struct ll_inode_info *lli = ll_i2info(inode);
2336         struct mdt_body *body = md->body;
2337         struct ll_sb_info *sbi = ll_i2sbi(inode);
2338         int rc = 0;
2339
2340         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2341                 rc = cl_file_inode_init(inode, md);
2342                 if (rc)
2343                         return rc;
2344         }
2345
2346         if (S_ISDIR(inode->i_mode)) {
2347                 rc = ll_update_lsm_md(inode, md);
2348                 if (rc != 0)
2349                         return rc;
2350         }
2351
2352         if (body->mbo_valid & OBD_MD_FLACL)
2353                 lli_replace_acl(lli, md);
2354
2355         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
2356                                         sbi->ll_flags & LL_SBI_32BIT_API);
2357         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2358
2359         if (body->mbo_valid & OBD_MD_FLATIME) {
2360                 if (body->mbo_atime > inode->i_atime.tv_sec)
2361                         inode->i_atime.tv_sec = body->mbo_atime;
2362                 lli->lli_atime = body->mbo_atime;
2363         }
2364
2365         if (body->mbo_valid & OBD_MD_FLMTIME) {
2366                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2367                         CDEBUG(D_INODE,
2368                                "setting ino %lu mtime from %lld to %llu\n",
2369                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2370                                body->mbo_mtime);
2371                         inode->i_mtime.tv_sec = body->mbo_mtime;
2372                 }
2373                 lli->lli_mtime = body->mbo_mtime;
2374         }
2375
2376         if (body->mbo_valid & OBD_MD_FLCTIME) {
2377                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2378                         inode->i_ctime.tv_sec = body->mbo_ctime;
2379                 lli->lli_ctime = body->mbo_ctime;
2380         }
2381
2382         if (body->mbo_valid & OBD_MD_FLBTIME)
2383                 lli->lli_btime = body->mbo_btime;
2384
2385         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2386         if (body->mbo_valid & OBD_MD_FLFLAGS)
2387                 ll_update_inode_flags(inode, body->mbo_flags);
2388         if (body->mbo_valid & OBD_MD_FLMODE)
2389                 inode->i_mode = (inode->i_mode & S_IFMT) |
2390                                 (body->mbo_mode & ~S_IFMT);
2391
2392         if (body->mbo_valid & OBD_MD_FLTYPE)
2393                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2394                                 (body->mbo_mode & S_IFMT);
2395
2396         LASSERT(inode->i_mode != 0);
2397         if (body->mbo_valid & OBD_MD_FLUID)
2398                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2399         if (body->mbo_valid & OBD_MD_FLGID)
2400                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2401         if (body->mbo_valid & OBD_MD_FLPROJID)
2402                 lli->lli_projid = body->mbo_projid;
2403         if (body->mbo_valid & OBD_MD_FLNLINK)
2404                 set_nlink(inode, body->mbo_nlink);
2405         if (body->mbo_valid & OBD_MD_FLRDEV)
2406                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2407
2408         if (body->mbo_valid & OBD_MD_FLID) {
2409                 /* FID shouldn't be changed! */
2410                 if (fid_is_sane(&lli->lli_fid)) {
2411                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2412                                  "Trying to change FID "DFID
2413                                  " to the "DFID", inode "DFID"(%p)\n",
2414                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2415                                  PFID(ll_inode2fid(inode)), inode);
2416                 } else {
2417                         lli->lli_fid = body->mbo_fid1;
2418                 }
2419         }
2420
2421         LASSERT(fid_seq(&lli->lli_fid) != 0);
2422
2423         lli->lli_attr_valid = body->mbo_valid;
2424         if (body->mbo_valid & OBD_MD_FLSIZE) {
2425                 i_size_write(inode, body->mbo_size);
2426
2427                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2428                        PFID(ll_inode2fid(inode)),
2429                        (unsigned long long)body->mbo_size);
2430
2431                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2432                         inode->i_blocks = body->mbo_blocks;
2433         } else {
2434                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2435                         lli->lli_lazysize = body->mbo_size;
2436                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2437                         lli->lli_lazyblocks = body->mbo_blocks;
2438         }
2439
2440         if (body->mbo_valid & OBD_MD_TSTATE) {
2441                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2442                  * clear it when done to ensure to start again
2443                  * glimpsing updated attrs
2444                  */
2445                 if (body->mbo_t_state & MS_RESTORE)
2446                         ll_file_set_flag(lli, LLIF_FILE_RESTORING);
2447                 else
2448                         ll_file_clear_flag(lli, LLIF_FILE_RESTORING);
2449         }
2450
2451         return 0;
2452 }
2453
2454 int ll_read_inode2(struct inode *inode, void *opaque)
2455 {
2456         struct lustre_md *md = opaque;
2457         struct ll_inode_info *lli = ll_i2info(inode);
2458         int     rc;
2459         ENTRY;
2460
2461         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2462                PFID(&lli->lli_fid), inode);
2463
2464         /* Core attributes from the MDS first.  This is a new inode, and
2465          * the VFS doesn't zero times in the core inode so we have to do
2466          * it ourselves.  They will be overwritten by either MDS or OST
2467          * attributes - we just need to make sure they aren't newer.
2468          */
2469         inode->i_mtime.tv_sec = 0;
2470         inode->i_atime.tv_sec = 0;
2471         inode->i_ctime.tv_sec = 0;
2472         inode->i_rdev = 0;
2473         rc = ll_update_inode(inode, md);
2474         if (rc != 0)
2475                 RETURN(rc);
2476
2477         /* OIDEBUG(inode); */
2478
2479 #ifdef HAVE_BACKING_DEV_INFO
2480         /* initializing backing dev info. */
2481         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2482 #endif
2483         if (S_ISREG(inode->i_mode)) {
2484                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2485                 inode->i_op = &ll_file_inode_operations;
2486                 inode->i_fop = sbi->ll_fop;
2487                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2488                 EXIT;
2489         } else if (S_ISDIR(inode->i_mode)) {
2490                 inode->i_op = &ll_dir_inode_operations;
2491                 inode->i_fop = &ll_dir_operations;
2492                 EXIT;
2493         } else if (S_ISLNK(inode->i_mode)) {
2494                 inode->i_op = &ll_fast_symlink_inode_operations;
2495                 EXIT;
2496         } else {
2497                 inode->i_op = &ll_special_inode_operations;
2498
2499                 init_special_inode(inode, inode->i_mode,
2500                                    inode->i_rdev);
2501
2502                 EXIT;
2503         }
2504
2505         return 0;
2506 }
2507
2508 void ll_delete_inode(struct inode *inode)
2509 {
2510         struct ll_inode_info *lli = ll_i2info(inode);
2511         struct address_space *mapping = &inode->i_data;
2512         unsigned long nrpages;
2513         unsigned long flags;
2514
2515         ENTRY;
2516
2517         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2518                 /* It is last chance to write out dirty pages,
2519                  * otherwise we may lose data while umount.
2520                  *
2521                  * If i_nlink is 0 then just discard data. This is safe because
2522                  * local inode gets i_nlink 0 from server only for the last
2523                  * unlink, so that file is not opened somewhere else
2524                  */
2525                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2526                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2527         }
2528         truncate_inode_pages_final(mapping);
2529
2530         /* Workaround for LU-118: Note nrpages may not be totally updated when
2531          * truncate_inode_pages() returns, as there can be a page in the process
2532          * of deletion (inside __delete_from_page_cache()) in the specified
2533          * range. Thus mapping->nrpages can be non-zero when this function
2534          * returns even after truncation of the whole mapping.  Only do this if
2535          * npages isn't already zero.
2536          */
2537         nrpages = mapping->nrpages;
2538         if (nrpages) {
2539                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2540                 nrpages = mapping->nrpages;
2541                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2542         } /* Workaround end */
2543
2544         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2545                  "see https://jira.whamcloud.com/browse/LU-118\n",
2546                  ll_i2sbi(inode)->ll_fsname,
2547                  PFID(ll_inode2fid(inode)), inode, nrpages);
2548
2549         ll_clear_inode(inode);
2550         clear_inode(inode);
2551
2552         EXIT;
2553 }
2554
2555 int ll_iocontrol(struct inode *inode, struct file *file,
2556                  unsigned int cmd, unsigned long arg)
2557 {
2558         struct ll_sb_info *sbi = ll_i2sbi(inode);
2559         struct ptlrpc_request *req = NULL;
2560         int rc, flags = 0;
2561         ENTRY;
2562
2563         switch (cmd) {
2564         case FS_IOC_GETFLAGS: {
2565                 struct mdt_body *body;
2566                 struct md_op_data *op_data;
2567
2568                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2569                                              0, 0, LUSTRE_OPC_ANY,
2570                                              NULL);
2571                 if (IS_ERR(op_data))
2572                         RETURN(PTR_ERR(op_data));
2573
2574                 op_data->op_valid = OBD_MD_FLFLAGS;
2575                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2576                 ll_finish_md_op_data(op_data);
2577                 if (rc) {
2578                         CERROR("%s: failure inode "DFID": rc = %d\n",
2579                                sbi->ll_md_exp->exp_obd->obd_name,
2580                                PFID(ll_inode2fid(inode)), rc);
2581                         RETURN(-abs(rc));
2582                 }
2583
2584                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2585
2586                 flags = body->mbo_flags;
2587
2588                 ptlrpc_req_finished(req);
2589
2590                 RETURN(put_user(flags, (int __user *)arg));
2591         }
2592         case FS_IOC_SETFLAGS: {
2593                 struct iattr *attr;
2594                 struct md_op_data *op_data;
2595                 struct cl_object *obj;
2596                 struct fsxattr fa = { 0 };
2597
2598                 if (get_user(flags, (int __user *)arg))
2599                         RETURN(-EFAULT);
2600
2601                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2602                 if (flags & LUSTRE_PROJINHERIT_FL)
2603                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2604
2605                 rc = ll_ioctl_check_project(inode, &fa);
2606                 if (rc)
2607                         RETURN(rc);
2608
2609                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2610                                              LUSTRE_OPC_ANY, NULL);
2611                 if (IS_ERR(op_data))
2612                         RETURN(PTR_ERR(op_data));
2613
2614                 op_data->op_attr_flags = flags;
2615                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2616                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2617                 ll_finish_md_op_data(op_data);
2618                 ptlrpc_req_finished(req);
2619                 if (rc)
2620                         RETURN(rc);
2621
2622                 ll_update_inode_flags(inode, flags);
2623
2624                 obj = ll_i2info(inode)->lli_clob;
2625                 if (obj == NULL)
2626                         RETURN(0);
2627
2628                 OBD_ALLOC_PTR(attr);
2629                 if (attr == NULL)
2630                         RETURN(-ENOMEM);
2631
2632                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2633
2634                 OBD_FREE_PTR(attr);
2635                 RETURN(rc);
2636         }
2637         default:
2638                 RETURN(-ENOSYS);
2639         }
2640
2641         RETURN(0);
2642 }
2643
2644 int ll_flush_ctx(struct inode *inode)
2645 {
2646         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2647
2648         CDEBUG(D_SEC, "flush context for user %d\n",
2649                from_kuid(&init_user_ns, current_uid()));
2650
2651         obd_set_info_async(NULL, sbi->ll_md_exp,
2652                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2653                            0, NULL, NULL);
2654         obd_set_info_async(NULL, sbi->ll_dt_exp,
2655                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2656                            0, NULL, NULL);
2657         return 0;
2658 }
2659
2660 /* umount -f client means force down, don't save state */
2661 void ll_umount_begin(struct super_block *sb)
2662 {
2663         struct ll_sb_info *sbi = ll_s2sbi(sb);
2664         struct obd_device *obd;
2665         struct obd_ioctl_data *ioc_data;
2666         int cnt;
2667         ENTRY;
2668
2669         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2670                sb->s_count, atomic_read(&sb->s_active));
2671
2672         obd = class_exp2obd(sbi->ll_md_exp);
2673         if (obd == NULL) {
2674                 CERROR("Invalid MDC connection handle %#llx\n",
2675                        sbi->ll_md_exp->exp_handle.h_cookie);
2676                 EXIT;
2677                 return;
2678         }
2679         obd->obd_force = 1;
2680
2681         obd = class_exp2obd(sbi->ll_dt_exp);
2682         if (obd == NULL) {
2683                 CERROR("Invalid LOV connection handle %#llx\n",
2684                        sbi->ll_dt_exp->exp_handle.h_cookie);
2685                 EXIT;
2686                 return;
2687         }
2688         obd->obd_force = 1;
2689
2690         OBD_ALLOC_PTR(ioc_data);
2691         if (ioc_data) {
2692                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2693                               sizeof *ioc_data, ioc_data, NULL);
2694
2695                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2696                               sizeof *ioc_data, ioc_data, NULL);
2697
2698                 OBD_FREE_PTR(ioc_data);
2699         }
2700
2701         /* Really, we'd like to wait until there are no requests outstanding,
2702          * and then continue.  For now, we just periodically checking for vfs
2703          * to decrement mnt_cnt and hope to finish it within 10sec.
2704          */
2705         cnt = 10;
2706         while (cnt > 0 &&
2707                !may_umount(sbi->ll_mnt.mnt)) {
2708                 ssleep(1);
2709                 cnt -= 1;
2710         }
2711
2712         EXIT;
2713 }
2714
2715 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2716 {
2717         struct ll_sb_info *sbi = ll_s2sbi(sb);
2718         char *profilenm = get_profile_name(sb);
2719         int err;
2720         __u32 read_only;
2721
2722         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2723                 read_only = *flags & MS_RDONLY;
2724                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2725                                          sizeof(KEY_READ_ONLY),
2726                                          KEY_READ_ONLY, sizeof(read_only),
2727                                          &read_only, NULL);
2728                 if (err) {
2729                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2730                                       profilenm, read_only ?
2731                                       "read-only" : "read-write", err);
2732                         return err;
2733                 }
2734
2735                 if (read_only)
2736                         sb->s_flags |= SB_RDONLY;
2737                 else
2738                         sb->s_flags &= ~SB_RDONLY;
2739
2740                 if (sbi->ll_flags & LL_SBI_VERBOSE)
2741                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2742                                       read_only ?  "read-only" : "read-write");
2743         }
2744         return 0;
2745 }
2746
2747 /**
2748  * Cleanup the open handle that is cached on MDT-side.
2749  *
2750  * For open case, the client side open handling thread may hit error
2751  * after the MDT grant the open. Under such case, the client should
2752  * send close RPC to the MDT as cleanup; otherwise, the open handle
2753  * on the MDT will be leaked there until the client umount or evicted.
2754  *
2755  * In further, if someone unlinked the file, because the open handle
2756  * holds the reference on such file/object, then it will block the
2757  * subsequent threads that want to locate such object via FID.
2758  *
2759  * \param[in] sb        super block for this file-system
2760  * \param[in] open_req  pointer to the original open request
2761  */
2762 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2763 {
2764         struct mdt_body                 *body;
2765         struct md_op_data               *op_data;
2766         struct ptlrpc_request           *close_req = NULL;
2767         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2768         ENTRY;
2769
2770         body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
2771         OBD_ALLOC_PTR(op_data);
2772         if (op_data == NULL) {
2773                 CWARN("%s: cannot allocate op_data to release open handle for "
2774                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2775
2776                 RETURN_EXIT;
2777         }
2778
2779         op_data->op_fid1 = body->mbo_fid1;
2780         op_data->op_open_handle = body->mbo_open_handle;
2781         op_data->op_mod_time = ktime_get_real_seconds();
2782         md_close(exp, op_data, NULL, &close_req);
2783         ptlrpc_req_finished(close_req);
2784         ll_finish_md_op_data(op_data);
2785
2786         EXIT;
2787 }
2788
2789 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2790                   struct super_block *sb, struct lookup_intent *it)
2791 {
2792         struct ll_sb_info *sbi = NULL;
2793         struct lustre_md md = { NULL };
2794         bool default_lmv_deleted = false;
2795         int rc;
2796
2797         ENTRY;
2798
2799         LASSERT(*inode || sb);
2800         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2801         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2802                               sbi->ll_md_exp, &md);
2803         if (rc != 0)
2804                 GOTO(out, rc);
2805
2806         /*
2807          * clear default_lmv only if intent_getattr reply doesn't contain it.
2808          * but it needs to be done after iget, check this early because
2809          * ll_update_lsm_md() may change md.
2810          */
2811         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2812             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2813                 default_lmv_deleted = true;
2814
2815         if (*inode) {
2816                 rc = ll_update_inode(*inode, &md);
2817                 if (rc != 0)
2818                         GOTO(out, rc);
2819         } else {
2820                 LASSERT(sb != NULL);
2821
2822                 /*
2823                  * At this point server returns to client's same fid as client
2824                  * generated for creating. So using ->fid1 is okay here.
2825                  */
2826                 if (!fid_is_sane(&md.body->mbo_fid1)) {
2827                         CERROR("%s: Fid is insane "DFID"\n",
2828                                 sbi->ll_fsname,
2829                                 PFID(&md.body->mbo_fid1));
2830                         GOTO(out, rc = -EINVAL);
2831                 }
2832
2833                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2834                                              sbi->ll_flags & LL_SBI_32BIT_API),
2835                                  &md);
2836                 if (IS_ERR(*inode)) {
2837                         lmd_clear_acl(&md);
2838                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2839                         *inode = NULL;
2840                         CERROR("new_inode -fatal: rc %d\n", rc);
2841                         GOTO(out, rc);
2842                 }
2843         }
2844
2845         /* Handling piggyback layout lock.
2846          * Layout lock can be piggybacked by getattr and open request.
2847          * The lsm can be applied to inode only if it comes with a layout lock
2848          * otherwise correct layout may be overwritten, for example:
2849          * 1. proc1: mdt returns a lsm but not granting layout
2850          * 2. layout was changed by another client
2851          * 3. proc2: refresh layout and layout lock granted
2852          * 4. proc1: to apply a stale layout */
2853         if (it != NULL && it->it_lock_mode != 0) {
2854                 struct lustre_handle lockh;
2855                 struct ldlm_lock *lock;
2856
2857                 lockh.cookie = it->it_lock_handle;
2858                 lock = ldlm_handle2lock(&lockh);
2859                 LASSERT(lock != NULL);
2860                 if (ldlm_has_layout(lock)) {
2861                         struct cl_object_conf conf;
2862
2863                         memset(&conf, 0, sizeof(conf));
2864                         conf.coc_opc = OBJECT_CONF_SET;
2865                         conf.coc_inode = *inode;
2866                         conf.coc_lock = lock;
2867                         conf.u.coc_layout = md.layout;
2868                         (void)ll_layout_conf(*inode, &conf);
2869                 }
2870                 LDLM_LOCK_PUT(lock);
2871         }
2872
2873         if (default_lmv_deleted)
2874                 ll_update_default_lsm_md(*inode, &md);
2875
2876         GOTO(out, rc = 0);
2877
2878 out:
2879         /* cleanup will be done if necessary */
2880         md_free_lustre_md(sbi->ll_md_exp, &md);
2881
2882         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
2883                 ll_intent_drop_lock(it);
2884                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, req);
2885         }
2886
2887         return rc;
2888 }
2889
2890 int ll_obd_statfs(struct inode *inode, void __user *arg)
2891 {
2892         struct ll_sb_info *sbi = NULL;
2893         struct obd_export *exp;
2894         char *buf = NULL;
2895         struct obd_ioctl_data *data = NULL;
2896         __u32 type;
2897         int len = 0, rc;
2898
2899         if (!inode || !(sbi = ll_i2sbi(inode)))
2900                 GOTO(out_statfs, rc = -EINVAL);
2901
2902         rc = obd_ioctl_getdata(&buf, &len, arg);
2903         if (rc)
2904                 GOTO(out_statfs, rc);
2905
2906         data = (void*)buf;
2907         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2908             !data->ioc_pbuf1 || !data->ioc_pbuf2)
2909                 GOTO(out_statfs, rc = -EINVAL);
2910
2911         if (data->ioc_inllen1 != sizeof(__u32) ||
2912             data->ioc_inllen2 != sizeof(__u32) ||
2913             data->ioc_plen1 != sizeof(struct obd_statfs) ||
2914             data->ioc_plen2 != sizeof(struct obd_uuid))
2915                 GOTO(out_statfs, rc = -EINVAL);
2916
2917         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2918         if (type & LL_STATFS_LMV)
2919                 exp = sbi->ll_md_exp;
2920         else if (type & LL_STATFS_LOV)
2921                 exp = sbi->ll_dt_exp;
2922         else
2923                 GOTO(out_statfs, rc = -ENODEV);
2924
2925         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2926         if (rc)
2927                 GOTO(out_statfs, rc);
2928 out_statfs:
2929         OBD_FREE_LARGE(buf, len);
2930         return rc;
2931 }
2932
2933 /*
2934  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
2935  * be called early to avoid deadlock.
2936  */
2937 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
2938 {
2939         if (op_data->op_mea2_sem) {
2940                 up_read_non_owner(op_data->op_mea2_sem);
2941                 op_data->op_mea2_sem = NULL;
2942         }
2943
2944         if (op_data->op_mea1_sem) {
2945                 up_read_non_owner(op_data->op_mea1_sem);
2946                 op_data->op_mea1_sem = NULL;
2947         }
2948 }
2949
2950 /* this function prepares md_op_data hint for passing it down to MD stack. */
2951 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2952                                       struct inode *i1, struct inode *i2,
2953                                       const char *name, size_t namelen,
2954                                       __u32 mode, enum md_op_code opc,
2955                                       void *data)
2956 {
2957         LASSERT(i1 != NULL);
2958
2959         if (name == NULL) {
2960                 /* Do not reuse namelen for something else. */
2961                 if (namelen != 0)
2962                         return ERR_PTR(-EINVAL);
2963         } else {
2964                 if (namelen > ll_i2sbi(i1)->ll_namelen)
2965                         return ERR_PTR(-ENAMETOOLONG);
2966
2967                 /* "/" is not valid name, but it's allowed */
2968                 if (!lu_name_is_valid_2(name, namelen) &&
2969                     strncmp("/", name, namelen) != 0)
2970                         return ERR_PTR(-EINVAL);
2971         }
2972
2973         if (op_data == NULL)
2974                 OBD_ALLOC_PTR(op_data);
2975
2976         if (op_data == NULL)
2977                 return ERR_PTR(-ENOMEM);
2978
2979         ll_i2gids(op_data->op_suppgids, i1, i2);
2980         op_data->op_fid1 = *ll_inode2fid(i1);
2981         op_data->op_code = opc;
2982
2983         if (S_ISDIR(i1->i_mode)) {
2984                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
2985                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
2986                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
2987                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
2988         }
2989
2990         if (i2) {
2991                 op_data->op_fid2 = *ll_inode2fid(i2);
2992                 if (S_ISDIR(i2->i_mode)) {
2993                         if (i2 != i1) {
2994                                 /* i2 is typically a child of i1, and MUST be
2995                                  * further from the root to avoid deadlocks.
2996                                  */
2997                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
2998                                 op_data->op_mea2_sem =
2999                                                 &ll_i2info(i2)->lli_lsm_sem;
3000                         }
3001                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
3002                 }
3003         } else {
3004                 fid_zero(&op_data->op_fid2);
3005         }
3006
3007         if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
3008                 op_data->op_cli_flags |= CLI_HASH64;
3009
3010         if (ll_need_32bit_api(ll_i2sbi(i1)))
3011                 op_data->op_cli_flags |= CLI_API32;
3012
3013         op_data->op_name = name;
3014         op_data->op_namelen = namelen;
3015         op_data->op_mode = mode;
3016         op_data->op_mod_time = ktime_get_real_seconds();
3017         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
3018         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
3019         op_data->op_cap = cfs_curproc_cap_pack();
3020         op_data->op_mds = 0;
3021         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
3022              filename_is_volatile(name, namelen, &op_data->op_mds)) {
3023                 op_data->op_bias |= MDS_CREATE_VOLATILE;
3024         }
3025         op_data->op_data = data;
3026
3027         return op_data;
3028 }
3029
3030 void ll_finish_md_op_data(struct md_op_data *op_data)
3031 {
3032         ll_unlock_md_op_lsm(op_data);
3033         security_release_secctx(op_data->op_file_secctx,
3034                                 op_data->op_file_secctx_size);
3035         llcrypt_free_ctx(op_data->op_file_encctx, op_data->op_file_encctx_size);
3036         OBD_FREE_PTR(op_data);
3037 }
3038
3039 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
3040 {
3041         struct ll_sb_info *sbi;
3042
3043         LASSERT(seq && dentry);
3044         sbi = ll_s2sbi(dentry->d_sb);
3045
3046         if (sbi->ll_flags & LL_SBI_NOLCK)
3047                 seq_puts(seq, ",nolock");
3048
3049         /* "flock" is the default since 2.13, but it wasn't for many years,
3050          * so it is still useful to print this to show it is enabled.
3051          * Start to print "noflock" so it is now clear when flock is disabled.
3052          */
3053         if (sbi->ll_flags & LL_SBI_FLOCK)
3054                 seq_puts(seq, ",flock");
3055         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
3056                 seq_puts(seq, ",localflock");
3057         else
3058                 seq_puts(seq, ",noflock");
3059
3060         if (sbi->ll_flags & LL_SBI_USER_XATTR)
3061                 seq_puts(seq, ",user_xattr");
3062
3063         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
3064                 seq_puts(seq, ",lazystatfs");
3065
3066         if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
3067                 seq_puts(seq, ",user_fid2path");
3068
3069         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
3070                 seq_puts(seq, ",always_ping");
3071
3072         if (ll_sbi_has_test_dummy_encryption(sbi))
3073                 seq_puts(seq, ",test_dummy_encryption");
3074
3075         if (ll_sbi_has_encrypt(sbi))
3076                 seq_puts(seq, ",encrypt");
3077         else
3078                 seq_puts(seq, ",noencrypt");
3079
3080         RETURN(0);
3081 }
3082
3083 /**
3084  * Get obd name by cmd, and copy out to user space
3085  */
3086 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3087 {
3088         struct ll_sb_info *sbi = ll_i2sbi(inode);
3089         struct obd_device *obd;
3090         ENTRY;
3091
3092         if (cmd == OBD_IOC_GETDTNAME)
3093                 obd = class_exp2obd(sbi->ll_dt_exp);
3094         else if (cmd == OBD_IOC_GETMDNAME)
3095                 obd = class_exp2obd(sbi->ll_md_exp);
3096         else
3097                 RETURN(-EINVAL);
3098
3099         if (!obd)
3100                 RETURN(-ENOENT);
3101
3102         if (copy_to_user((void __user *)arg, obd->obd_name,
3103                          strlen(obd->obd_name) + 1))
3104                 RETURN(-EFAULT);
3105
3106         RETURN(0);
3107 }
3108
3109 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3110 {
3111         char *path = NULL;
3112
3113         struct path p;
3114
3115         p.dentry = dentry;
3116         p.mnt = current->fs->root.mnt;
3117         path_get(&p);
3118         path = d_path(&p, buf, bufsize);
3119         path_put(&p);
3120         return path;
3121 }
3122
3123 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3124 {
3125         char *buf, *path = NULL;
3126         struct dentry *dentry = NULL;
3127         struct inode *inode = page->mapping->host;
3128
3129         /* this can be called inside spin lock so use GFP_ATOMIC. */
3130         buf = (char *)__get_free_page(GFP_ATOMIC);
3131         if (buf != NULL) {
3132                 dentry = d_find_alias(page->mapping->host);
3133                 if (dentry != NULL)
3134                         path = ll_d_path(dentry, buf, PAGE_SIZE);
3135         }
3136
3137         /* The below message is checked in recovery-small.sh test_24b */
3138         CDEBUG(D_WARNING,
3139                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3140                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3141                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3142                PFID(ll_inode2fid(inode)),
3143                (path && !IS_ERR(path)) ? path : "", ioret);
3144
3145         if (dentry != NULL)
3146                 dput(dentry);
3147
3148         if (buf != NULL)
3149                 free_page((unsigned long)buf);
3150 }
3151
3152 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3153                         struct lov_user_md **kbuf)
3154 {
3155         struct lov_user_md      lum;
3156         ssize_t                 lum_size;
3157         ENTRY;
3158
3159         if (copy_from_user(&lum, md, sizeof(lum)))
3160                 RETURN(-EFAULT);
3161
3162         lum_size = ll_lov_user_md_size(&lum);
3163         if (lum_size < 0)
3164                 RETURN(lum_size);
3165
3166         OBD_ALLOC_LARGE(*kbuf, lum_size);
3167         if (*kbuf == NULL)
3168                 RETURN(-ENOMEM);
3169
3170         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3171                 OBD_FREE_LARGE(*kbuf, lum_size);
3172                 RETURN(-EFAULT);
3173         }
3174
3175         RETURN(lum_size);
3176 }
3177
3178 /*
3179  * Compute llite root squash state after a change of root squash
3180  * configuration setting or add/remove of a lnet nid
3181  */
3182 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3183 {
3184         struct root_squash_info *squash = &sbi->ll_squash;
3185         int i;
3186         bool matched;
3187         struct lnet_process_id id;
3188
3189         /* Update norootsquash flag */
3190         spin_lock(&squash->rsi_lock);
3191         if (list_empty(&squash->rsi_nosquash_nids))
3192                 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3193         else {
3194                 /* Do not apply root squash as soon as one of our NIDs is
3195                  * in the nosquash_nids list */
3196                 matched = false;
3197                 i = 0;
3198                 while (LNetGetId(i++, &id) != -ENOENT) {
3199                         if (id.nid == LNET_NID_LO_0)
3200                                 continue;
3201                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3202                                 matched = true;
3203                                 break;
3204                         }
3205                 }
3206                 if (matched)
3207                         sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
3208                 else
3209                         sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3210         }
3211         spin_unlock(&squash->rsi_lock);
3212 }
3213
3214 /**
3215  * Parse linkea content to extract information about a given hardlink
3216  *
3217  * \param[in]   ldata      - Initialized linkea data
3218  * \param[in]   linkno     - Link identifier
3219  * \param[out]  parent_fid - The entry's parent FID
3220  * \param[out]  ln         - Entry name destination buffer
3221  *
3222  * \retval 0 on success
3223  * \retval Appropriate negative error code on failure
3224  */
3225 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3226                             struct lu_fid *parent_fid, struct lu_name *ln)
3227 {
3228         unsigned int    idx;
3229         int             rc;
3230         ENTRY;
3231
3232         rc = linkea_init_with_rec(ldata);
3233         if (rc < 0)
3234                 RETURN(rc);
3235
3236         if (linkno >= ldata->ld_leh->leh_reccount)
3237                 /* beyond last link */
3238                 RETURN(-ENODATA);
3239
3240         linkea_first_entry(ldata);
3241         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3242                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3243                                     parent_fid);
3244                 if (idx == linkno)
3245                         break;
3246
3247                 linkea_next_entry(ldata);
3248         }
3249
3250         if (idx < linkno)
3251                 RETURN(-ENODATA);
3252
3253         RETURN(0);
3254 }
3255
3256 /**
3257  * Get parent FID and name of an identified link. Operation is performed for
3258  * a given link number, letting the caller iterate over linkno to list one or
3259  * all links of an entry.
3260  *
3261  * \param[in]     file - File descriptor against which to perform the operation
3262  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3263  *                       on and the available size. It is eventually filled with
3264  *                       the requested information or left untouched on error
3265  *
3266  * \retval - 0 on success
3267  * \retval - Appropriate negative error code on failure
3268  */
3269 int ll_getparent(struct file *file, struct getparent __user *arg)
3270 {
3271         struct inode            *inode = file_inode(file);
3272         struct linkea_data      *ldata;
3273         struct lu_buf            buf = LU_BUF_NULL;
3274         struct lu_name           ln;
3275         struct lu_fid            parent_fid;
3276         __u32                    linkno;
3277         __u32                    name_size;
3278         int                      rc;
3279
3280         ENTRY;
3281
3282         if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
3283             !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
3284                 RETURN(-EPERM);
3285
3286         if (get_user(name_size, &arg->gp_name_size))
3287                 RETURN(-EFAULT);
3288
3289         if (get_user(linkno, &arg->gp_linkno))
3290                 RETURN(-EFAULT);
3291
3292         if (name_size > PATH_MAX)
3293                 RETURN(-EINVAL);
3294
3295         OBD_ALLOC(ldata, sizeof(*ldata));
3296         if (ldata == NULL)
3297                 RETURN(-ENOMEM);
3298
3299         rc = linkea_data_new(ldata, &buf);
3300         if (rc < 0)
3301                 GOTO(ldata_free, rc);
3302
3303         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3304                            buf.lb_len, OBD_MD_FLXATTR);
3305         if (rc < 0)
3306                 GOTO(lb_free, rc);
3307
3308         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3309         if (rc < 0)
3310                 GOTO(lb_free, rc);
3311
3312         if (ln.ln_namelen >= name_size)
3313                 GOTO(lb_free, rc = -EOVERFLOW);
3314
3315         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3316                 GOTO(lb_free, rc = -EFAULT);
3317
3318         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3319                 GOTO(lb_free, rc = -EFAULT);
3320
3321         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3322                 GOTO(lb_free, rc = -EFAULT);
3323
3324 lb_free:
3325         lu_buf_free(&buf);
3326 ldata_free:
3327         OBD_FREE(ldata, sizeof(*ldata));
3328
3329         RETURN(rc);
3330 }