Whamcloud - gitweb
LU-13467 llite: truncate deadlock with DoM files
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/llite/llite_lib.c
33  *
34  * Lustre Light Super operations
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/cpu.h>
40 #include <linux/module.h>
41 #include <linux/random.h>
42 #include <linux/statfs.h>
43 #include <linux/time.h>
44 #include <linux/types.h>
45 #include <libcfs/linux/linux-uuid.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48 #include <linux/user_namespace.h>
49 #include <linux/delay.h>
50 #include <linux/uidgid.h>
51 #include <linux/security.h>
52
53 #ifndef HAVE_CPUS_READ_LOCK
54 #include <libcfs/linux/linux-cpu.h>
55 #endif
56 #include <uapi/linux/lustre/lustre_ioctl.h>
57 #ifdef HAVE_UAPI_LINUX_MOUNT_H
58 #include <uapi/linux/mount.h>
59 #endif
60
61 #include <lustre_ha.h>
62 #include <lustre_dlm.h>
63 #include <lprocfs_status.h>
64 #include <lustre_disk.h>
65 #include <uapi/linux/lustre/lustre_param.h>
66 #include <lustre_log.h>
67 #include <cl_object.h>
68 #include <obd_cksum.h>
69 #include "llite_internal.h"
70
71 struct kmem_cache *ll_file_data_slab;
72
73 #ifndef log2
74 #define log2(n) ffz(~(n))
75 #endif
76
77 /**
78  * If there is only one number of core visible to Lustre,
79  * async readahead will be disabled, to avoid massive over
80  * subscription, we use 1/2 of active cores as default max
81  * async readahead requests.
82  */
83 static inline unsigned int ll_get_ra_async_max_active(void)
84 {
85         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
86 }
87
88 static struct ll_sb_info *ll_init_sbi(void)
89 {
90         struct ll_sb_info *sbi = NULL;
91         unsigned long pages;
92         unsigned long lru_page_max;
93         struct sysinfo si;
94         int rc;
95         int i;
96
97         ENTRY;
98
99         OBD_ALLOC_PTR(sbi);
100         if (sbi == NULL)
101                 RETURN(ERR_PTR(-ENOMEM));
102
103         rc = pcc_super_init(&sbi->ll_pcc_super);
104         if (rc < 0)
105                 GOTO(out_sbi, rc);
106
107         spin_lock_init(&sbi->ll_lock);
108         mutex_init(&sbi->ll_lco.lco_lock);
109         spin_lock_init(&sbi->ll_pp_extent_lock);
110         spin_lock_init(&sbi->ll_process_lock);
111         sbi->ll_rw_stats_on = 0;
112         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
113
114         si_meminfo(&si);
115         pages = si.totalram - si.totalhigh;
116         lru_page_max = pages / 2;
117
118         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
119         sbi->ll_ra_info.ll_readahead_wq =
120                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
121                                        0, CFS_CPT_ANY,
122                                        sbi->ll_ra_info.ra_async_max_active);
123         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
124                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
125
126         /* initialize ll_cache data */
127         sbi->ll_cache = cl_cache_init(lru_page_max);
128         if (sbi->ll_cache == NULL)
129                 GOTO(out_destroy_ra, rc = -ENOMEM);
130
131         sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
132                                                     SBI_DEFAULT_READ_AHEAD_MAX);
133         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
134                                 sbi->ll_ra_info.ra_max_pages_per_file;
135         sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
136         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
137         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
138
139         sbi->ll_flags |= LL_SBI_VERBOSE;
140 #ifdef ENABLE_CHECKSUM
141         sbi->ll_flags |= LL_SBI_CHECKSUM;
142 #endif
143 #ifdef ENABLE_FLOCK
144         sbi->ll_flags |= LL_SBI_FLOCK;
145 #endif
146
147 #ifdef HAVE_LRU_RESIZE_SUPPORT
148         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
149 #endif
150         sbi->ll_flags |= LL_SBI_LAZYSTATFS;
151
152         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
153                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
154                                pp_r_hist.oh_lock);
155                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
156                                pp_w_hist.oh_lock);
157         }
158
159         /* metadata statahead is enabled by default */
160         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
161         sbi->ll_sa_max = LL_SA_RPC_DEF;
162         atomic_set(&sbi->ll_sa_total, 0);
163         atomic_set(&sbi->ll_sa_wrong, 0);
164         atomic_set(&sbi->ll_sa_running, 0);
165         atomic_set(&sbi->ll_agl_total, 0);
166         sbi->ll_flags |= LL_SBI_AGL_ENABLED;
167         sbi->ll_flags |= LL_SBI_FAST_READ;
168         sbi->ll_flags |= LL_SBI_TINY_WRITE;
169         ll_sbi_set_encrypt(sbi, true);
170
171         /* root squash */
172         sbi->ll_squash.rsi_uid = 0;
173         sbi->ll_squash.rsi_gid = 0;
174         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
175         spin_lock_init(&sbi->ll_squash.rsi_lock);
176
177         /* Per-filesystem file heat */
178         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
179         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
180         RETURN(sbi);
181 out_destroy_ra:
182         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
183 out_pcc:
184         pcc_super_fini(&sbi->ll_pcc_super);
185 out_sbi:
186         OBD_FREE_PTR(sbi);
187         RETURN(ERR_PTR(rc));
188 }
189
190 static void ll_free_sbi(struct super_block *sb)
191 {
192         struct ll_sb_info *sbi = ll_s2sbi(sb);
193         ENTRY;
194
195         if (sbi != NULL) {
196                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
197                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
198                 if (sbi->ll_ra_info.ll_readahead_wq)
199                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
200                 if (sbi->ll_cache != NULL) {
201                         cl_cache_decref(sbi->ll_cache);
202                         sbi->ll_cache = NULL;
203                 }
204                 pcc_super_fini(&sbi->ll_pcc_super);
205                 OBD_FREE(sbi, sizeof(*sbi));
206         }
207         EXIT;
208 }
209
210 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
211 {
212         struct inode *root = NULL;
213         struct ll_sb_info *sbi = ll_s2sbi(sb);
214         struct obd_statfs *osfs = NULL;
215         struct ptlrpc_request *request = NULL;
216         struct obd_connect_data *data = NULL;
217         struct obd_uuid *uuid;
218         struct md_op_data *op_data;
219         struct lustre_md lmd;
220         u64 valid;
221         int size, err, checksum;
222
223         ENTRY;
224         sbi->ll_md_obd = class_name2obd(md);
225         if (!sbi->ll_md_obd) {
226                 CERROR("MD %s: not setup or attached\n", md);
227                 RETURN(-EINVAL);
228         }
229
230         OBD_ALLOC_PTR(data);
231         if (data == NULL)
232                 RETURN(-ENOMEM);
233
234         OBD_ALLOC_PTR(osfs);
235         if (osfs == NULL) {
236                 OBD_FREE_PTR(data);
237                 RETURN(-ENOMEM);
238         }
239
240         /* pass client page size via ocd_grant_blkbits, the server should report
241          * back its backend blocksize for grant calculation purpose */
242         data->ocd_grant_blkbits = PAGE_SHIFT;
243
244         /* indicate MDT features supported by this client */
245         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
246                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
247                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
248                                   OBD_CONNECT_SRVLOCK  | OBD_CONNECT_TRUNCLOCK|
249                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
250                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
251                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
252                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
253                                   OBD_CONNECT_64BITHASH |
254                                   OBD_CONNECT_EINPROGRESS |
255                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
256                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
257                                   OBD_CONNECT_MAX_EASIZE |
258                                   OBD_CONNECT_FLOCK_DEAD |
259                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
260                                   OBD_CONNECT_OPEN_BY_FID |
261                                   OBD_CONNECT_DIR_STRIPE |
262                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
263                                   OBD_CONNECT_SUBTREE |
264                                   OBD_CONNECT_MULTIMODRPCS |
265                                   OBD_CONNECT_GRANT_PARAM |
266                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
267
268         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
269                                    OBD_CONNECT2_SUM_STATFS |
270                                    OBD_CONNECT2_OVERSTRIPING |
271                                    OBD_CONNECT2_FLR |
272                                    OBD_CONNECT2_LOCK_CONVERT |
273                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
274                                    OBD_CONNECT2_INC_XID |
275                                    OBD_CONNECT2_LSOM |
276                                    OBD_CONNECT2_ASYNC_DISCARD |
277                                    OBD_CONNECT2_PCC |
278                                    OBD_CONNECT2_CRUSH;
279
280 #ifdef HAVE_LRU_RESIZE_SUPPORT
281         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
282                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
283 #endif
284 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
285         data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK |
286                                    OBD_CONNECT_LARGE_ACL;
287 #endif
288
289         data->ocd_cksum_types = obd_cksum_types_supported_client();
290
291         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
292                 /* flag mdc connection as lightweight, only used for test
293                  * purpose, use with care */
294                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
295
296         data->ocd_ibits_known = MDS_INODELOCK_FULL;
297         data->ocd_version = LUSTRE_VERSION_CODE;
298
299         if (sb->s_flags & SB_RDONLY)
300                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
301         if (sbi->ll_flags & LL_SBI_USER_XATTR)
302                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
303
304 #ifdef SB_NOSEC
305         /* Setting this indicates we correctly support S_NOSEC (See kernel
306          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
307          */
308         sb->s_flags |= SB_NOSEC;
309 #endif
310
311         if (sbi->ll_flags & LL_SBI_FLOCK)
312                 sbi->ll_fop = &ll_file_operations_flock;
313         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
314                 sbi->ll_fop = &ll_file_operations;
315         else
316                 sbi->ll_fop = &ll_file_operations_noflock;
317
318         /* always ping even if server suppress_pings */
319         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
320                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
321
322         obd_connect_set_secctx(data);
323         if (ll_sbi_has_encrypt(sbi))
324                 obd_connect_set_enc(data);
325
326 #if defined(CONFIG_SECURITY)
327         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
328 #endif
329
330         data->ocd_brw_size = MD_MAX_BRW_SIZE;
331
332         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
333                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
334         if (err == -EBUSY) {
335                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
336                                    "recovery, of which this client is not a "
337                                    "part. Please wait for recovery to complete,"
338                                    " abort, or time out.\n", md);
339                 GOTO(out, err);
340         } else if (err) {
341                 CERROR("cannot connect to %s: rc = %d\n", md, err);
342                 GOTO(out, err);
343         }
344
345         sbi->ll_md_exp->exp_connect_data = *data;
346
347         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
348                            LUSTRE_SEQ_METADATA);
349         if (err) {
350                 CERROR("%s: Can't init metadata layer FID infrastructure, "
351                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
352                 GOTO(out_md, err);
353         }
354
355         /* For mount, we only need fs info from MDT0, and also in DNE, it
356          * can make sure the client can be mounted as long as MDT0 is
357          * avaible */
358         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
359                         ktime_get_seconds() - sbi->ll_statfs_max_age,
360                         OBD_STATFS_FOR_MDT0);
361         if (err)
362                 GOTO(out_md_fid, err);
363
364         /* This needs to be after statfs to ensure connect has finished.
365          * Note that "data" does NOT contain the valid connect reply.
366          * If connecting to a 1.8 server there will be no LMV device, so
367          * we can access the MDC export directly and exp_connect_flags will
368          * be non-zero, but if accessing an upgraded 2.1 server it will
369          * have the correct flags filled in.
370          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
371         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
372         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
373             valid != CLIENT_CONNECT_MDT_REQD) {
374                 char *buf;
375
376                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
377                 obd_connect_flags2str(buf, PAGE_SIZE,
378                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
379                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
380                                    "feature(s) needed for correct operation "
381                                    "of this client (%s). Please upgrade "
382                                    "server or downgrade client.\n",
383                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
384                 OBD_FREE(buf, PAGE_SIZE);
385                 GOTO(out_md_fid, err = -EPROTO);
386         }
387
388         size = sizeof(*data);
389         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
390                            KEY_CONN_DATA,  &size, data);
391         if (err) {
392                 CERROR("%s: Get connect data failed: rc = %d\n",
393                        sbi->ll_md_exp->exp_obd->obd_name, err);
394                 GOTO(out_md_fid, err);
395         }
396
397         LASSERT(osfs->os_bsize);
398         sb->s_blocksize = osfs->os_bsize;
399         sb->s_blocksize_bits = log2(osfs->os_bsize);
400         sb->s_magic = LL_SUPER_MAGIC;
401         sb->s_maxbytes = MAX_LFS_FILESIZE;
402         sbi->ll_namelen = osfs->os_namelen;
403         sbi->ll_mnt.mnt = current->fs->root.mnt;
404
405         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
406             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
407                 LCONSOLE_INFO("Disabling user_xattr feature because "
408                               "it is not supported on the server\n");
409                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
410         }
411
412         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
413 #ifdef SB_POSIXACL
414                 sb->s_flags |= SB_POSIXACL;
415 #endif
416                 sbi->ll_flags |= LL_SBI_ACL;
417         } else {
418                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
419 #ifdef SB_POSIXACL
420                 sb->s_flags &= ~SB_POSIXACL;
421 #endif
422                 sbi->ll_flags &= ~LL_SBI_ACL;
423         }
424
425         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
426                 sbi->ll_flags |= LL_SBI_64BIT_HASH;
427
428         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
429                 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
430
431         if (obd_connect_has_secctx(data))
432                 sbi->ll_flags |= LL_SBI_FILE_SECCTX;
433
434         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
435                 if (ll_sbi_has_test_dummy_encryption(sbi))
436                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
437                                       sbi->ll_fsname,
438                                       sbi->ll_md_exp->exp_obd->obd_name);
439                 ll_sbi_set_encrypt(sbi, false);
440         }
441
442         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
443                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
444                         LCONSOLE_INFO("%s: disabling xattr cache due to "
445                                       "unknown maximum xattr size.\n", dt);
446                 } else if (!sbi->ll_xattr_cache_set) {
447                         /* If xattr_cache is already set (no matter 0 or 1)
448                          * during processing llog, it won't be enabled here. */
449                         sbi->ll_flags |= LL_SBI_XATTR_CACHE;
450                         sbi->ll_xattr_cache_enabled = 1;
451                 }
452         }
453
454         sbi->ll_dt_obd = class_name2obd(dt);
455         if (!sbi->ll_dt_obd) {
456                 CERROR("DT %s: not setup or attached\n", dt);
457                 GOTO(out_md_fid, err = -ENODEV);
458         }
459
460         /* pass client page size via ocd_grant_blkbits, the server should report
461          * back its backend blocksize for grant calculation purpose */
462         data->ocd_grant_blkbits = PAGE_SHIFT;
463
464         /* indicate OST features supported by this client */
465         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
466                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
467                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
468                                   OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
469                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
470                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
471                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
472                                   OBD_CONNECT_EINPROGRESS |
473                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
474                                   OBD_CONNECT_LAYOUTLOCK |
475                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
476                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
477                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
478         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
479                                    OBD_CONNECT2_INC_XID;
480
481         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
482                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
483
484         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
485          * disabled by default, because it can still be enabled on the
486          * fly via /sys. As a consequence, we still need to come to an
487          * agreement on the supported algorithms at connect time
488          */
489         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
490
491         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
492                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
493         else
494                 data->ocd_cksum_types = obd_cksum_types_supported_client();
495
496 #ifdef HAVE_LRU_RESIZE_SUPPORT
497         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
498 #endif
499         /* always ping even if server suppress_pings */
500         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
501                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
502
503         if (ll_sbi_has_encrypt(sbi))
504                 obd_connect_set_enc(data);
505
506         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
507                "ocd_grant: %d\n", data->ocd_connect_flags,
508                data->ocd_version, data->ocd_grant);
509
510         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
511         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
512
513         data->ocd_brw_size = DT_MAX_BRW_SIZE;
514
515         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
516                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
517         if (err == -EBUSY) {
518                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
519                                    "recovery, of which this client is not a "
520                                    "part.  Please wait for recovery to "
521                                    "complete, abort, or time out.\n", dt);
522                 GOTO(out_md, err);
523         } else if (err) {
524                 CERROR("%s: Cannot connect to %s: rc = %d\n",
525                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
526                 GOTO(out_md, err);
527         }
528
529         if (ll_sbi_has_encrypt(sbi) &&
530             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
531                 if (ll_sbi_has_test_dummy_encryption(sbi))
532                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
533                                       sbi->ll_fsname, dt);
534                 ll_sbi_set_encrypt(sbi, false);
535         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
536                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
537         }
538
539         sbi->ll_dt_exp->exp_connect_data = *data;
540
541         /* Don't change value if it was specified in the config log */
542         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
543                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
544                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
545                               (data->ocd_brw_size >> PAGE_SHIFT));
546                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
547                     sbi->ll_ra_info.ra_max_pages_per_file)
548                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
549                                 sbi->ll_ra_info.ra_max_pages_per_file;
550         }
551
552         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
553                            LUSTRE_SEQ_METADATA);
554         if (err) {
555                 CERROR("%s: Can't init data layer FID infrastructure, "
556                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
557                 GOTO(out_dt, err);
558         }
559
560         mutex_lock(&sbi->ll_lco.lco_lock);
561         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
562         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
563         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
564         mutex_unlock(&sbi->ll_lco.lco_lock);
565
566         fid_zero(&sbi->ll_root_fid);
567         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
568                            &sbi->ll_root_fid);
569         if (err) {
570                 CERROR("cannot mds_connect: rc = %d\n", err);
571                 GOTO(out_lock_cn_cb, err);
572         }
573         if (!fid_is_sane(&sbi->ll_root_fid)) {
574                 CERROR("%s: Invalid root fid "DFID" during mount\n",
575                        sbi->ll_md_exp->exp_obd->obd_name,
576                        PFID(&sbi->ll_root_fid));
577                 GOTO(out_lock_cn_cb, err = -EINVAL);
578         }
579         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
580
581         sb->s_op = &lustre_super_operations;
582         sb->s_xattr = ll_xattr_handlers;
583 #if THREAD_SIZE >= 8192 /*b=17630*/
584         sb->s_export_op = &lustre_export_operations;
585 #endif
586 #ifdef HAVE_LUSTRE_CRYPTO
587         llcrypt_set_ops(sb, &lustre_cryptops);
588 #endif
589
590         /* make root inode
591          * XXX: move this to after cbd setup? */
592         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
593         if (sbi->ll_flags & LL_SBI_ACL)
594                 valid |= OBD_MD_FLACL;
595
596         OBD_ALLOC_PTR(op_data);
597         if (op_data == NULL)
598                 GOTO(out_lock_cn_cb, err = -ENOMEM);
599
600         op_data->op_fid1 = sbi->ll_root_fid;
601         op_data->op_mode = 0;
602         op_data->op_valid = valid;
603
604         err = md_getattr(sbi->ll_md_exp, op_data, &request);
605
606         OBD_FREE_PTR(op_data);
607         if (err) {
608                 CERROR("%s: md_getattr failed for root: rc = %d\n",
609                        sbi->ll_md_exp->exp_obd->obd_name, err);
610                 GOTO(out_lock_cn_cb, err);
611         }
612
613         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
614                                sbi->ll_md_exp, &lmd);
615         if (err) {
616                 CERROR("failed to understand root inode md: rc = %d\n", err);
617                 ptlrpc_req_finished(request);
618                 GOTO(out_lock_cn_cb, err);
619         }
620
621         LASSERT(fid_is_sane(&sbi->ll_root_fid));
622         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
623                                             sbi->ll_flags & LL_SBI_32BIT_API),
624                        &lmd);
625         md_free_lustre_md(sbi->ll_md_exp, &lmd);
626         ptlrpc_req_finished(request);
627
628         if (IS_ERR(root)) {
629 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
630                 if (lmd.posix_acl) {
631                         posix_acl_release(lmd.posix_acl);
632                         lmd.posix_acl = NULL;
633                 }
634 #endif
635                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
636                 root = NULL;
637                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
638                        sbi->ll_fsname, err);
639                 GOTO(out_root, err);
640         }
641
642         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
643         if (sbi->ll_checksum_set) {
644                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
645                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
646                                          sizeof(checksum), &checksum, NULL);
647                 if (err) {
648                         CERROR("%s: Set checksum failed: rc = %d\n",
649                                sbi->ll_dt_exp->exp_obd->obd_name, err);
650                         GOTO(out_root, err);
651                 }
652         }
653         cl_sb_init(sb);
654
655         sb->s_root = d_make_root(root);
656         if (sb->s_root == NULL) {
657                 err = -ENOMEM;
658                 CERROR("%s: can't make root dentry: rc = %d\n",
659                        sbi->ll_fsname, err);
660                 GOTO(out_root, err);
661         }
662
663         sbi->ll_sdev_orig = sb->s_dev;
664
665         /* We set sb->s_dev equal on all lustre clients in order to support
666          * NFS export clustering.  NFSD requires that the FSID be the same
667          * on all clients. */
668         /* s_dev is also used in lt_compare() to compare two fs, but that is
669          * only a node-local comparison. */
670         uuid = obd_get_uuid(sbi->ll_md_exp);
671         if (uuid != NULL)
672                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
673
674         if (data != NULL)
675                 OBD_FREE_PTR(data);
676         if (osfs != NULL)
677                 OBD_FREE_PTR(osfs);
678
679         if (sbi->ll_dt_obd) {
680                 err = sysfs_create_link(&sbi->ll_kset.kobj,
681                                         &sbi->ll_dt_obd->obd_kset.kobj,
682                                         sbi->ll_dt_obd->obd_type->typ_name);
683                 if (err < 0) {
684                         CERROR("%s: could not register %s in llite: rc = %d\n",
685                                dt, sbi->ll_fsname, err);
686                         err = 0;
687                 }
688         }
689
690         if (sbi->ll_md_obd) {
691                 err = sysfs_create_link(&sbi->ll_kset.kobj,
692                                         &sbi->ll_md_obd->obd_kset.kobj,
693                                         sbi->ll_md_obd->obd_type->typ_name);
694                 if (err < 0) {
695                         CERROR("%s: could not register %s in llite: rc = %d\n",
696                                md, sbi->ll_fsname, err);
697                         err = 0;
698                 }
699         }
700
701         RETURN(err);
702 out_root:
703         if (root)
704                 iput(root);
705 out_lock_cn_cb:
706         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
707 out_dt:
708         obd_disconnect(sbi->ll_dt_exp);
709         sbi->ll_dt_exp = NULL;
710         sbi->ll_dt_obd = NULL;
711 out_md_fid:
712         obd_fid_fini(sbi->ll_md_exp->exp_obd);
713 out_md:
714         obd_disconnect(sbi->ll_md_exp);
715         sbi->ll_md_exp = NULL;
716         sbi->ll_md_obd = NULL;
717 out:
718         if (data != NULL)
719                 OBD_FREE_PTR(data);
720         if (osfs != NULL)
721                 OBD_FREE_PTR(osfs);
722         return err;
723 }
724
725 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
726 {
727         int size, rc;
728
729         size = sizeof(*lmmsize);
730         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
731                           KEY_MAX_EASIZE, &size, lmmsize);
732         if (rc != 0) {
733                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
734                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
735                 RETURN(rc);
736         }
737
738         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
739
740         size = sizeof(int);
741         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
742                           KEY_MAX_EASIZE, &size, lmmsize);
743         if (rc)
744                 CERROR("Get max mdsize error rc %d\n", rc);
745
746         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
747
748         RETURN(rc);
749 }
750
751 /**
752  * Get the value of the default_easize parameter.
753  *
754  * \see client_obd::cl_default_mds_easize
755  *
756  * \param[in] sbi       superblock info for this filesystem
757  * \param[out] lmmsize  pointer to storage location for value
758  *
759  * \retval 0            on success
760  * \retval negative     negated errno on failure
761  */
762 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
763 {
764         int size, rc;
765
766         size = sizeof(int);
767         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
768                          KEY_DEFAULT_EASIZE, &size, lmmsize);
769         if (rc)
770                 CERROR("Get default mdsize error rc %d\n", rc);
771
772         RETURN(rc);
773 }
774
775 /**
776  * Set the default_easize parameter to the given value.
777  *
778  * \see client_obd::cl_default_mds_easize
779  *
780  * \param[in] sbi       superblock info for this filesystem
781  * \param[in] lmmsize   the size to set
782  *
783  * \retval 0            on success
784  * \retval negative     negated errno on failure
785  */
786 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
787 {
788         int rc;
789
790         if (lmmsize < sizeof(struct lov_mds_md) ||
791             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
792                 return -EINVAL;
793
794         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
795                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
796                                 sizeof(int), &lmmsize, NULL);
797
798         RETURN(rc);
799 }
800
801 static void client_common_put_super(struct super_block *sb)
802 {
803         struct ll_sb_info *sbi = ll_s2sbi(sb);
804         ENTRY;
805
806         cl_sb_fini(sb);
807
808         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
809         obd_disconnect(sbi->ll_dt_exp);
810         sbi->ll_dt_exp = NULL;
811
812         ll_debugfs_unregister_super(sb);
813
814         obd_fid_fini(sbi->ll_md_exp->exp_obd);
815         obd_disconnect(sbi->ll_md_exp);
816         sbi->ll_md_exp = NULL;
817
818         EXIT;
819 }
820
821 void ll_kill_super(struct super_block *sb)
822 {
823         struct ll_sb_info *sbi;
824         ENTRY;
825
826         /* not init sb ?*/
827         if (!(sb->s_flags & SB_ACTIVE))
828                 return;
829
830         sbi = ll_s2sbi(sb);
831         /* we need restore s_dev from changed for clustred NFS before put_super
832          * because new kernels have cached s_dev and change sb->s_dev in
833          * put_super not affected real removing devices */
834         if (sbi) {
835                 sb->s_dev = sbi->ll_sdev_orig;
836
837                 /* wait running statahead threads to quit */
838                 while (atomic_read(&sbi->ll_sa_running) > 0)
839                         schedule_timeout_uninterruptible(
840                                 cfs_time_seconds(1) >> 3);
841         }
842
843         EXIT;
844 }
845
846 static inline int ll_set_opt(const char *opt, char *data, int fl)
847 {
848         if (strncmp(opt, data, strlen(opt)) != 0)
849                 return 0;
850         else
851                 return fl;
852 }
853
854 /* non-client-specific mount options are parsed in lmd_parse */
855 static int ll_options(char *options, struct ll_sb_info *sbi)
856 {
857         int tmp;
858         char *s1 = options, *s2;
859         int *flags = &sbi->ll_flags;
860         ENTRY;
861
862         if (!options)
863                 RETURN(0);
864
865         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
866
867         while (*s1) {
868                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
869                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
870                 if (tmp) {
871                         *flags |= tmp;
872                         goto next;
873                 }
874                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
875                 if (tmp) {
876                         *flags = (*flags & ~LL_SBI_LOCALFLOCK) | tmp;
877                         goto next;
878                 }
879                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
880                 if (tmp) {
881                         *flags = (*flags & ~LL_SBI_FLOCK) | tmp;
882                         goto next;
883                 }
884                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
885                 if (tmp) {
886                         *flags &= ~tmp;
887                         goto next;
888                 }
889                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
890                 if (tmp) {
891                         *flags |= tmp;
892                         goto next;
893                 }
894                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
895                 if (tmp) {
896                         *flags &= ~tmp;
897                         goto next;
898                 }
899                 tmp = ll_set_opt("context", s1, 1);
900                 if (tmp)
901                         goto next;
902                 tmp = ll_set_opt("fscontext", s1, 1);
903                 if (tmp)
904                         goto next;
905                 tmp = ll_set_opt("defcontext", s1, 1);
906                 if (tmp)
907                         goto next;
908                 tmp = ll_set_opt("rootcontext", s1, 1);
909                 if (tmp)
910                         goto next;
911                 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
912                 if (tmp) {
913                         *flags |= tmp;
914                         goto next;
915                 }
916                 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
917                 if (tmp) {
918                         *flags &= ~tmp;
919                         goto next;
920                 }
921
922                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
923                 if (tmp) {
924                         *flags |= tmp;
925                         sbi->ll_checksum_set = 1;
926                         goto next;
927                 }
928                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
929                 if (tmp) {
930                         *flags &= ~tmp;
931                         sbi->ll_checksum_set = 1;
932                         goto next;
933                 }
934                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
935                 if (tmp) {
936                         *flags |= tmp;
937                         goto next;
938                 }
939                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
940                 if (tmp) {
941                         *flags &= ~tmp;
942                         goto next;
943                 }
944                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
945                 if (tmp) {
946                         *flags |= tmp;
947                         goto next;
948                 }
949                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
950                 if (tmp) {
951                         *flags &= ~tmp;
952                         goto next;
953                 }
954                 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
955                 if (tmp) {
956                         *flags |= tmp;
957                         goto next;
958                 }
959                 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
960                 if (tmp) {
961                         *flags |= tmp;
962                         goto next;
963                 }
964                 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
965                 if (tmp) {
966                         *flags &= ~tmp;
967                         goto next;
968                 }
969                 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
970                 if (tmp) {
971                         *flags |= tmp;
972                         goto next;
973                 }
974                 tmp = ll_set_opt("test_dummy_encryption", s1,
975                                  LL_SBI_TEST_DUMMY_ENCRYPTION);
976                 if (tmp) {
977 #ifdef HAVE_LUSTRE_CRYPTO
978                         *flags |= tmp;
979 #else
980                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
981 #endif
982                         goto next;
983                 }
984                 tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT);
985                 if (tmp) {
986 #ifdef HAVE_LUSTRE_CRYPTO
987                         *flags &= ~tmp;
988 #else
989                         LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n");
990 #endif
991                         goto next;
992                 }
993                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
994                                    s1);
995                 RETURN(-EINVAL);
996
997 next:
998                 /* Find next opt */
999                 s2 = strchr(s1, ',');
1000                 if (s2 == NULL)
1001                         break;
1002                 s1 = s2 + 1;
1003         }
1004         RETURN(0);
1005 }
1006
1007 void ll_lli_init(struct ll_inode_info *lli)
1008 {
1009         lli->lli_inode_magic = LLI_INODE_MAGIC;
1010         lli->lli_flags = 0;
1011         spin_lock_init(&lli->lli_lock);
1012         lli->lli_posix_acl = NULL;
1013         /* Do not set lli_fid, it has been initialized already. */
1014         fid_zero(&lli->lli_pfid);
1015         lli->lli_mds_read_och = NULL;
1016         lli->lli_mds_write_och = NULL;
1017         lli->lli_mds_exec_och = NULL;
1018         lli->lli_open_fd_read_count = 0;
1019         lli->lli_open_fd_write_count = 0;
1020         lli->lli_open_fd_exec_count = 0;
1021         mutex_init(&lli->lli_och_mutex);
1022         spin_lock_init(&lli->lli_agl_lock);
1023         spin_lock_init(&lli->lli_layout_lock);
1024         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1025         lli->lli_clob = NULL;
1026
1027         init_rwsem(&lli->lli_xattrs_list_rwsem);
1028         mutex_init(&lli->lli_xattrs_enq_lock);
1029
1030         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1031         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1032                 lli->lli_opendir_key = NULL;
1033                 lli->lli_sai = NULL;
1034                 spin_lock_init(&lli->lli_sa_lock);
1035                 lli->lli_opendir_pid = 0;
1036                 lli->lli_sa_enabled = 0;
1037                 init_rwsem(&lli->lli_lsm_sem);
1038         } else {
1039                 mutex_init(&lli->lli_size_mutex);
1040                 mutex_init(&lli->lli_setattr_mutex);
1041                 lli->lli_symlink_name = NULL;
1042                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1043                 range_lock_tree_init(&lli->lli_write_tree);
1044                 init_rwsem(&lli->lli_glimpse_sem);
1045                 lli->lli_glimpse_time = ktime_set(0, 0);
1046                 INIT_LIST_HEAD(&lli->lli_agl_list);
1047                 lli->lli_agl_index = 0;
1048                 lli->lli_async_rc = 0;
1049                 spin_lock_init(&lli->lli_heat_lock);
1050                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1051                 lli->lli_heat_flags = 0;
1052                 mutex_init(&lli->lli_pcc_lock);
1053                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1054                 lli->lli_pcc_inode = NULL;
1055                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1056                 lli->lli_pcc_generation = 0;
1057                 mutex_init(&lli->lli_group_mutex);
1058                 lli->lli_group_users = 0;
1059                 lli->lli_group_gid = 0;
1060         }
1061         mutex_init(&lli->lli_layout_mutex);
1062         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1063 }
1064
1065 #define MAX_STRING_SIZE 128
1066
1067 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1068
1069 #define LSI_BDI_INITIALIZED     0x00400000
1070
1071 #ifndef HAVE_BDI_CAP_MAP_COPY
1072 # define BDI_CAP_MAP_COPY       0
1073 #endif
1074
1075 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1076 {
1077         struct  lustre_sb_info *lsi = s2lsi(sb);
1078         char buf[MAX_STRING_SIZE];
1079         va_list args;
1080         int err;
1081
1082         err = bdi_init(&lsi->lsi_bdi);
1083         if (err)
1084                 return err;
1085
1086         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1087         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1088         lsi->lsi_bdi.name = "lustre";
1089         va_start(args, fmt);
1090         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1091         va_end(args);
1092         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1093         va_end(args);
1094         if (!err)
1095                 sb->s_bdi = &lsi->lsi_bdi;
1096
1097         return err;
1098 }
1099 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1100
1101 int ll_fill_super(struct super_block *sb)
1102 {
1103         struct  lustre_profile *lprof = NULL;
1104         struct  lustre_sb_info *lsi = s2lsi(sb);
1105         struct  ll_sb_info *sbi = NULL;
1106         char    *dt = NULL, *md = NULL;
1107         char    *profilenm = get_profile_name(sb);
1108         struct config_llog_instance *cfg;
1109         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1110         const int instlen = LUSTRE_MAXINSTANCE + 2;
1111         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1112         char name[MAX_STRING_SIZE];
1113         int md_len = 0;
1114         int dt_len = 0;
1115         uuid_t uuid;
1116         char *ptr;
1117         int len;
1118         int err;
1119
1120         ENTRY;
1121         /* for ASLR, to map between cfg_instance and hashed ptr */
1122         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1123                profilenm, cfg_instance, sb);
1124
1125         OBD_ALLOC_PTR(cfg);
1126         if (cfg == NULL)
1127                 GOTO(out_free_cfg, err = -ENOMEM);
1128
1129         /* client additional sb info */
1130         lsi->lsi_llsbi = sbi = ll_init_sbi();
1131         if (IS_ERR(sbi))
1132                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1133
1134         err = ll_options(lsi->lsi_lmd->lmd_opts, sbi);
1135         if (err)
1136                 GOTO(out_free_cfg, err);
1137
1138         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1139         sb->s_d_op = &ll_d_ops;
1140
1141         /* UUID handling */
1142         generate_random_uuid(uuid.b);
1143         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1144
1145         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1146
1147         /* Get fsname */
1148         len = strlen(profilenm);
1149         ptr = strrchr(profilenm, '-');
1150         if (ptr && (strcmp(ptr, "-client") == 0))
1151                 len -= 7;
1152
1153         if (len > LUSTRE_MAXFSNAME) {
1154                 if (unlikely(len >= MAX_STRING_SIZE))
1155                         len = MAX_STRING_SIZE - 1;
1156                 strncpy(name, profilenm, len);
1157                 name[len] = '\0';
1158                 err = -ENAMETOOLONG;
1159                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1160                        name, LUSTRE_MAXFSNAME, err);
1161                 GOTO(out_free_cfg, err);
1162         }
1163         strncpy(sbi->ll_fsname, profilenm, len);
1164         sbi->ll_fsname[len] = '\0';
1165
1166         /* Mount info */
1167         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1168                  profilenm, cfg_instance);
1169
1170         err = super_setup_bdi_name(sb, "%s", name);
1171         if (err)
1172                 GOTO(out_free_cfg, err);
1173
1174         /* Call ll_debugfs_register_super() before lustre_process_log()
1175          * so that "llite.*.*" params can be processed correctly.
1176          */
1177         err = ll_debugfs_register_super(sb, name);
1178         if (err < 0) {
1179                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1180                        sbi->ll_fsname, err);
1181                 err = 0;
1182         }
1183
1184         /* The cfg_instance is a value unique to this super, in case some
1185          * joker tries to mount the same fs at two mount points.
1186          */
1187         cfg->cfg_instance = cfg_instance;
1188         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1189         cfg->cfg_callback = class_config_llog_handler;
1190         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1191         /* set up client obds */
1192         err = lustre_process_log(sb, profilenm, cfg);
1193         if (err < 0)
1194                 GOTO(out_debugfs, err);
1195
1196         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1197         lprof = class_get_profile(profilenm);
1198         if (lprof == NULL) {
1199                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1200                                    " read from the MGS.  Does that filesystem "
1201                                    "exist?\n", profilenm);
1202                 GOTO(out_debugfs, err = -EINVAL);
1203         }
1204         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1205                lprof->lp_md, lprof->lp_dt);
1206
1207         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1208         OBD_ALLOC(dt, dt_len);
1209         if (!dt)
1210                 GOTO(out_profile, err = -ENOMEM);
1211         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1212
1213         md_len = strlen(lprof->lp_md) + instlen + 2;
1214         OBD_ALLOC(md, md_len);
1215         if (!md)
1216                 GOTO(out_free_dt, err = -ENOMEM);
1217         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1218
1219         /* connections, registrations, sb setup */
1220         err = client_common_fill_super(sb, md, dt);
1221         if (err < 0)
1222                 GOTO(out_free_md, err);
1223
1224         sbi->ll_client_common_fill_super_succeeded = 1;
1225
1226 out_free_md:
1227         if (md)
1228                 OBD_FREE(md, md_len);
1229 out_free_dt:
1230         if (dt)
1231                 OBD_FREE(dt, dt_len);
1232 out_profile:
1233         if (lprof)
1234                 class_put_profile(lprof);
1235 out_debugfs:
1236         if (err < 0)
1237                 ll_debugfs_unregister_super(sb);
1238 out_free_cfg:
1239         if (cfg)
1240                 OBD_FREE_PTR(cfg);
1241
1242         if (err)
1243                 ll_put_super(sb);
1244         else if (sbi->ll_flags & LL_SBI_VERBOSE)
1245                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1246         RETURN(err);
1247 } /* ll_fill_super */
1248
1249 void ll_put_super(struct super_block *sb)
1250 {
1251         struct config_llog_instance cfg, params_cfg;
1252         struct obd_device *obd;
1253         struct lustre_sb_info *lsi = s2lsi(sb);
1254         struct ll_sb_info *sbi = ll_s2sbi(sb);
1255         char *profilenm = get_profile_name(sb);
1256         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1257         long ccc_count;
1258         int next, force = 1, rc = 0;
1259         ENTRY;
1260
1261         if (IS_ERR(sbi))
1262                 GOTO(out_no_sbi, 0);
1263
1264         /* Should replace instance_id with something better for ASLR */
1265         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1266                profilenm, cfg_instance, sb);
1267
1268         cfg.cfg_instance = cfg_instance;
1269         lustre_end_log(sb, profilenm, &cfg);
1270
1271         params_cfg.cfg_instance = cfg_instance;
1272         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1273
1274         if (sbi->ll_md_exp) {
1275                 obd = class_exp2obd(sbi->ll_md_exp);
1276                 if (obd)
1277                         force = obd->obd_force;
1278         }
1279
1280         /* Wait for unstable pages to be committed to stable storage */
1281         if (force == 0) {
1282                 rc = l_wait_event_abortable(
1283                         sbi->ll_cache->ccc_unstable_waitq,
1284                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1285         }
1286
1287         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1288         if (force == 0 && rc != -ERESTARTSYS)
1289                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1290
1291         /* We need to set force before the lov_disconnect in
1292          * lustre_common_put_super, since l_d cleans up osc's as well.
1293          */
1294         if (force) {
1295                 next = 0;
1296                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1297                                                      &next)) != NULL) {
1298                         obd->obd_force = force;
1299                 }
1300         }
1301
1302         if (sbi->ll_client_common_fill_super_succeeded) {
1303                 /* Only if client_common_fill_super succeeded */
1304                 client_common_put_super(sb);
1305         }
1306
1307         next = 0;
1308         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1309                 class_manual_cleanup(obd);
1310
1311         if (sbi->ll_flags & LL_SBI_VERBOSE)
1312                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1313
1314         if (profilenm)
1315                 class_del_profile(profilenm);
1316
1317 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1318         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1319                 bdi_destroy(&lsi->lsi_bdi);
1320                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1321         }
1322 #endif
1323
1324         ll_free_sbi(sb);
1325         lsi->lsi_llsbi = NULL;
1326 out_no_sbi:
1327         lustre_common_put_super(sb);
1328
1329         cl_env_cache_purge(~0);
1330
1331         module_put(THIS_MODULE);
1332
1333         EXIT;
1334 } /* client_put_super */
1335
1336 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1337 {
1338         struct inode *inode = NULL;
1339
1340         /* NOTE: we depend on atomic igrab() -bzzz */
1341         lock_res_and_lock(lock);
1342         if (lock->l_resource->lr_lvb_inode) {
1343                 struct ll_inode_info * lli;
1344                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1345                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1346                         inode = igrab(lock->l_resource->lr_lvb_inode);
1347                 } else {
1348                         inode = lock->l_resource->lr_lvb_inode;
1349                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1350                                          D_WARNING, lock, "lr_lvb_inode %p is "
1351                                          "bogus: magic %08x",
1352                                          lock->l_resource->lr_lvb_inode,
1353                                          lli->lli_inode_magic);
1354                         inode = NULL;
1355                 }
1356         }
1357         unlock_res_and_lock(lock);
1358         return inode;
1359 }
1360
1361 void ll_dir_clear_lsm_md(struct inode *inode)
1362 {
1363         struct ll_inode_info *lli = ll_i2info(inode);
1364
1365         LASSERT(S_ISDIR(inode->i_mode));
1366
1367         if (lli->lli_lsm_md) {
1368                 lmv_free_memmd(lli->lli_lsm_md);
1369                 lli->lli_lsm_md = NULL;
1370         }
1371
1372         if (lli->lli_default_lsm_md) {
1373                 lmv_free_memmd(lli->lli_default_lsm_md);
1374                 lli->lli_default_lsm_md = NULL;
1375         }
1376 }
1377
1378 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1379                                       const struct lu_fid *fid,
1380                                       struct lustre_md *md)
1381 {
1382         struct ll_sb_info       *sbi = ll_s2sbi(sb);
1383         struct mdt_body         *body = md->body;
1384         struct inode            *inode;
1385         ino_t                   ino;
1386         ENTRY;
1387
1388         ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1389         inode = iget_locked(sb, ino);
1390         if (inode == NULL) {
1391                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1392                        sbi->ll_fsname, PFID(fid));
1393                 RETURN(ERR_PTR(-ENOENT));
1394         }
1395
1396         if (inode->i_state & I_NEW) {
1397                 struct ll_inode_info *lli = ll_i2info(inode);
1398                 struct lmv_stripe_md *lsm = md->lmv;
1399
1400                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1401                                 (body->mbo_mode & S_IFMT);
1402                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1403                          PFID(fid));
1404
1405                 inode->i_mtime.tv_sec = 0;
1406                 inode->i_atime.tv_sec = 0;
1407                 inode->i_ctime.tv_sec = 0;
1408                 inode->i_rdev = 0;
1409
1410 #ifdef HAVE_BACKING_DEV_INFO
1411                 /* initializing backing dev info. */
1412                 inode->i_mapping->backing_dev_info =
1413                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1414 #endif
1415                 inode->i_op = &ll_dir_inode_operations;
1416                 inode->i_fop = &ll_dir_operations;
1417                 lli->lli_fid = *fid;
1418                 ll_lli_init(lli);
1419
1420                 LASSERT(lsm != NULL);
1421                 /* master object FID */
1422                 lli->lli_pfid = body->mbo_fid1;
1423                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1424                        lli, PFID(fid), PFID(&lli->lli_pfid));
1425                 unlock_new_inode(inode);
1426         }
1427
1428         RETURN(inode);
1429 }
1430
1431 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1432 {
1433         struct lu_fid *fid;
1434         struct lmv_stripe_md *lsm = md->lmv;
1435         struct ll_inode_info *lli = ll_i2info(inode);
1436         int i;
1437
1438         LASSERT(lsm != NULL);
1439
1440         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1441                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1442         lsm_md_dump(D_INODE, lsm);
1443
1444         if (!lmv_dir_striped(lsm))
1445                 goto out;
1446
1447         /* XXX sigh, this lsm_root initialization should be in
1448          * LMV layer, but it needs ll_iget right now, so we
1449          * put this here right now. */
1450         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1451                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1452                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1453
1454                 if (!fid_is_sane(fid))
1455                         continue;
1456
1457                 /* Unfortunately ll_iget will call ll_update_inode,
1458                  * where the initialization of slave inode is slightly
1459                  * different, so it reset lsm_md to NULL to avoid
1460                  * initializing lsm for slave inode. */
1461                 lsm->lsm_md_oinfo[i].lmo_root =
1462                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1463                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1464                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1465
1466                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1467                         while (i-- > 0) {
1468                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1469                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1470                         }
1471                         return rc;
1472                 }
1473         }
1474 out:
1475         lli->lli_lsm_md = lsm;
1476
1477         return 0;
1478 }
1479
1480 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1481 {
1482         struct ll_inode_info *lli = ll_i2info(inode);
1483
1484         if (!md->default_lmv) {
1485                 /* clear default lsm */
1486                 if (lli->lli_default_lsm_md) {
1487                         down_write(&lli->lli_lsm_sem);
1488                         if (lli->lli_default_lsm_md) {
1489                                 lmv_free_memmd(lli->lli_default_lsm_md);
1490                                 lli->lli_default_lsm_md = NULL;
1491                         }
1492                         up_write(&lli->lli_lsm_sem);
1493                 }
1494         } else if (lli->lli_default_lsm_md) {
1495                 /* update default lsm if it changes */
1496                 down_read(&lli->lli_lsm_sem);
1497                 if (lli->lli_default_lsm_md &&
1498                     !lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1499                         up_read(&lli->lli_lsm_sem);
1500                         down_write(&lli->lli_lsm_sem);
1501                         if (lli->lli_default_lsm_md)
1502                                 lmv_free_memmd(lli->lli_default_lsm_md);
1503                         lli->lli_default_lsm_md = md->default_lmv;
1504                         lsm_md_dump(D_INODE, md->default_lmv);
1505                         md->default_lmv = NULL;
1506                         up_write(&lli->lli_lsm_sem);
1507                 } else {
1508                         up_read(&lli->lli_lsm_sem);
1509                 }
1510         } else {
1511                 /* init default lsm */
1512                 down_write(&lli->lli_lsm_sem);
1513                 lli->lli_default_lsm_md = md->default_lmv;
1514                 lsm_md_dump(D_INODE, md->default_lmv);
1515                 md->default_lmv = NULL;
1516                 up_write(&lli->lli_lsm_sem);
1517         }
1518 }
1519
1520 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1521 {
1522         struct ll_inode_info *lli = ll_i2info(inode);
1523         struct lmv_stripe_md *lsm = md->lmv;
1524         struct cl_attr  *attr;
1525         int rc = 0;
1526
1527         ENTRY;
1528
1529         LASSERT(S_ISDIR(inode->i_mode));
1530         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1531                PFID(ll_inode2fid(inode)));
1532
1533         /* update default LMV */
1534         if (md->default_lmv)
1535                 ll_update_default_lsm_md(inode, md);
1536
1537         /*
1538          * no striped information from request, lustre_md from req does not
1539          * include stripeEA, see ll_md_setattr()
1540          */
1541         if (!lsm)
1542                 RETURN(0);
1543
1544         /*
1545          * normally dir layout doesn't change, only take read lock to check
1546          * that to avoid blocking other MD operations.
1547          */
1548         down_read(&lli->lli_lsm_sem);
1549
1550         /* some current lookup initialized lsm, and unchanged */
1551         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1552                 GOTO(unlock, rc = 0);
1553
1554         /* if dir layout doesn't match, check whether version is increased,
1555          * which means layout is changed, this happens in dir split/merge and
1556          * lfsck.
1557          *
1558          * foreign LMV should not change.
1559          */
1560         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1561             lsm->lsm_md_layout_version <=
1562             lli->lli_lsm_md->lsm_md_layout_version) {
1563                 CERROR("%s: "DFID" dir layout mismatch:\n",
1564                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1565                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1566                 lsm_md_dump(D_ERROR, lsm);
1567                 GOTO(unlock, rc = -EINVAL);
1568         }
1569
1570         up_read(&lli->lli_lsm_sem);
1571         down_write(&lli->lli_lsm_sem);
1572         /* clear existing lsm */
1573         if (lli->lli_lsm_md) {
1574                 lmv_free_memmd(lli->lli_lsm_md);
1575                 lli->lli_lsm_md = NULL;
1576         }
1577
1578         rc = ll_init_lsm_md(inode, md);
1579         up_write(&lli->lli_lsm_sem);
1580
1581         if (rc)
1582                 RETURN(rc);
1583
1584         /* set md->lmv to NULL, so the following free lustre_md will not free
1585          * this lsm.
1586          */
1587         md->lmv = NULL;
1588
1589         /* md_merge_attr() may take long, since lsm is already set, switch to
1590          * read lock.
1591          */
1592         down_read(&lli->lli_lsm_sem);
1593
1594         if (!lmv_dir_striped(lli->lli_lsm_md))
1595                 GOTO(unlock, rc = 0);
1596
1597         OBD_ALLOC_PTR(attr);
1598         if (!attr)
1599                 GOTO(unlock, rc = -ENOMEM);
1600
1601         /* validate the lsm */
1602         rc = md_merge_attr(ll_i2mdexp(inode), lli->lli_lsm_md, attr,
1603                            ll_md_blocking_ast);
1604         if (!rc) {
1605                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1606                         md->body->mbo_nlink = attr->cat_nlink;
1607                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1608                         md->body->mbo_size = attr->cat_size;
1609                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1610                         md->body->mbo_atime = attr->cat_atime;
1611                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1612                         md->body->mbo_ctime = attr->cat_ctime;
1613                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1614                         md->body->mbo_mtime = attr->cat_mtime;
1615         }
1616
1617         OBD_FREE_PTR(attr);
1618         GOTO(unlock, rc);
1619 unlock:
1620         up_read(&lli->lli_lsm_sem);
1621
1622         return rc;
1623 }
1624
1625 void ll_clear_inode(struct inode *inode)
1626 {
1627         struct ll_inode_info *lli = ll_i2info(inode);
1628         struct ll_sb_info *sbi = ll_i2sbi(inode);
1629
1630         ENTRY;
1631
1632         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1633                PFID(ll_inode2fid(inode)), inode);
1634
1635         if (S_ISDIR(inode->i_mode)) {
1636                 /* these should have been cleared in ll_file_release */
1637                 LASSERT(lli->lli_opendir_key == NULL);
1638                 LASSERT(lli->lli_sai == NULL);
1639                 LASSERT(lli->lli_opendir_pid == 0);
1640         } else {
1641                 pcc_inode_free(inode);
1642         }
1643
1644         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1645
1646         LASSERT(!lli->lli_open_fd_write_count);
1647         LASSERT(!lli->lli_open_fd_read_count);
1648         LASSERT(!lli->lli_open_fd_exec_count);
1649
1650         if (lli->lli_mds_write_och)
1651                 ll_md_real_close(inode, FMODE_WRITE);
1652         if (lli->lli_mds_exec_och)
1653                 ll_md_real_close(inode, FMODE_EXEC);
1654         if (lli->lli_mds_read_och)
1655                 ll_md_real_close(inode, FMODE_READ);
1656
1657         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1658                 OBD_FREE(lli->lli_symlink_name,
1659                          strlen(lli->lli_symlink_name) + 1);
1660                 lli->lli_symlink_name = NULL;
1661         }
1662
1663         ll_xattr_cache_destroy(inode);
1664
1665 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
1666         forget_all_cached_acls(inode);
1667         if (lli->lli_posix_acl) {
1668                 posix_acl_release(lli->lli_posix_acl);
1669                 lli->lli_posix_acl = NULL;
1670         }
1671 #endif
1672         lli->lli_inode_magic = LLI_INODE_DEAD;
1673
1674         if (S_ISDIR(inode->i_mode))
1675                 ll_dir_clear_lsm_md(inode);
1676         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1677                 LASSERT(list_empty(&lli->lli_agl_list));
1678
1679         /*
1680          * XXX This has to be done before lsm is freed below, because
1681          * cl_object still uses inode lsm.
1682          */
1683         cl_inode_fini(inode);
1684
1685         llcrypt_put_encryption_info(inode);
1686
1687         EXIT;
1688 }
1689
1690 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1691 {
1692         struct lustre_md md;
1693         struct inode *inode = dentry->d_inode;
1694         struct ll_sb_info *sbi = ll_i2sbi(inode);
1695         struct ptlrpc_request *request = NULL;
1696         int rc, ia_valid;
1697         ENTRY;
1698
1699         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1700                                      LUSTRE_OPC_ANY, NULL);
1701         if (IS_ERR(op_data))
1702                 RETURN(PTR_ERR(op_data));
1703
1704         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1705         if (rc) {
1706                 ptlrpc_req_finished(request);
1707                 if (rc == -ENOENT) {
1708                         clear_nlink(inode);
1709                         /* Unlinked special device node? Or just a race?
1710                          * Pretend we done everything. */
1711                         if (!S_ISREG(inode->i_mode) &&
1712                             !S_ISDIR(inode->i_mode)) {
1713                                 ia_valid = op_data->op_attr.ia_valid;
1714                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1715                                 rc = simple_setattr(dentry, &op_data->op_attr);
1716                                 op_data->op_attr.ia_valid = ia_valid;
1717                         }
1718                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1719                         CERROR("md_setattr fails: rc = %d\n", rc);
1720                 }
1721                 RETURN(rc);
1722         }
1723
1724         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1725                               sbi->ll_md_exp, &md);
1726         if (rc) {
1727                 ptlrpc_req_finished(request);
1728                 RETURN(rc);
1729         }
1730
1731         ia_valid = op_data->op_attr.ia_valid;
1732         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1733          * cache is not cleared yet. */
1734         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1735         if (S_ISREG(inode->i_mode))
1736                 inode_lock(inode);
1737         rc = simple_setattr(dentry, &op_data->op_attr);
1738         if (S_ISREG(inode->i_mode))
1739                 inode_unlock(inode);
1740         op_data->op_attr.ia_valid = ia_valid;
1741
1742         rc = ll_update_inode(inode, &md);
1743         ptlrpc_req_finished(request);
1744
1745         RETURN(rc);
1746 }
1747
1748 /**
1749  * Zero portion of page that is part of @inode.
1750  * This implies, if necessary:
1751  * - taking cl_lock on range corresponding to concerned page
1752  * - grabbing vm page
1753  * - associating cl_page
1754  * - proceeding to clio read
1755  * - zeroing range in page
1756  * - proceeding to cl_page flush
1757  * - releasing cl_lock
1758  *
1759  * \param[in] inode     inode
1760  * \param[in] index     page index
1761  * \param[in] offset    offset in page to start zero from
1762  * \param[in] len       len to zero
1763  *
1764  * \retval 0            on success
1765  * \retval negative     errno on failure
1766  */
1767 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1768                     unsigned len)
1769 {
1770         struct ll_inode_info *lli = ll_i2info(inode);
1771         struct cl_object *clob = lli->lli_clob;
1772         __u16 refcheck;
1773         struct lu_env *env = NULL;
1774         struct cl_io *io = NULL;
1775         struct cl_page *clpage = NULL;
1776         struct page *vmpage = NULL;
1777         unsigned from = index << PAGE_SHIFT;
1778         struct cl_lock *lock = NULL;
1779         struct cl_lock_descr *descr = NULL;
1780         struct cl_2queue *queue = NULL;
1781         struct cl_sync_io *anchor = NULL;
1782         bool holdinglock = false;
1783         bool lockedbymyself = true;
1784         int rc;
1785
1786         ENTRY;
1787
1788         env = cl_env_get(&refcheck);
1789         if (IS_ERR(env))
1790                 RETURN(PTR_ERR(env));
1791
1792         io = vvp_env_thread_io(env);
1793         io->ci_obj = clob;
1794         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1795         if (rc)
1796                 GOTO(putenv, rc);
1797
1798         lock = vvp_env_lock(env);
1799         descr = &lock->cll_descr;
1800         descr->cld_obj   = io->ci_obj;
1801         descr->cld_start = cl_index(io->ci_obj, from);
1802         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1803         descr->cld_mode  = CLM_WRITE;
1804         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1805
1806         /* request lock for page */
1807         rc = cl_lock_request(env, io, lock);
1808         /* -ECANCELED indicates a matching lock with a different extent
1809          * was already present, and -EEXIST indicates a matching lock
1810          * on exactly the same extent was already present.
1811          * In both cases it means we are covered.
1812          */
1813         if (rc == -ECANCELED || rc == -EEXIST)
1814                 rc = 0;
1815         else if (rc < 0)
1816                 GOTO(iofini, rc);
1817         else
1818                 holdinglock = true;
1819
1820         /* grab page */
1821         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1822         if (vmpage == NULL)
1823                 GOTO(rellock, rc = -EOPNOTSUPP);
1824
1825         if (!PageDirty(vmpage)) {
1826                 /* associate cl_page */
1827                 clpage = cl_page_find(env, clob, vmpage->index,
1828                                       vmpage, CPT_CACHEABLE);
1829                 if (IS_ERR(clpage))
1830                         GOTO(pagefini, rc = PTR_ERR(clpage));
1831
1832                 cl_page_assume(env, io, clpage);
1833         }
1834
1835         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1836             !PageWriteback(vmpage)) {
1837                 /* read page */
1838                 /* set PagePrivate2 to detect special case of empty page
1839                  * in osc_brw_fini_request()
1840                  */
1841                 SetPagePrivate2(vmpage);
1842                 rc = ll_io_read_page(env, io, clpage, NULL);
1843                 if (!PagePrivate2(vmpage))
1844                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1845                          * meaning we read an empty page. In this case, in order
1846                          * to avoid allocating unnecessary block in truncated
1847                          * file, we must not zero and write as below. Subsequent
1848                          * server-side truncate will handle things correctly.
1849                          */
1850                         GOTO(clpfini, rc = 0);
1851                 ClearPagePrivate2(vmpage);
1852                 if (rc)
1853                         GOTO(clpfini, rc);
1854                 lockedbymyself = trylock_page(vmpage);
1855                 cl_page_assume(env, io, clpage);
1856         }
1857
1858         /* zero range in page */
1859         zero_user(vmpage, offset, len);
1860
1861         if (holdinglock && clpage) {
1862                 /* explicitly write newly modified page */
1863                 queue = &io->ci_queue;
1864                 cl_2queue_init(queue);
1865                 anchor = &vvp_env_info(env)->vti_anchor;
1866                 cl_sync_io_init(anchor, 1);
1867                 clpage->cp_sync_io = anchor;
1868                 cl_2queue_add(queue, clpage);
1869                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1870                 if (rc)
1871                         GOTO(queuefini1, rc);
1872                 rc = cl_sync_io_wait(env, anchor, 0);
1873                 if (rc)
1874                         GOTO(queuefini2, rc);
1875                 cl_page_assume(env, io, clpage);
1876
1877 queuefini2:
1878                 cl_2queue_discard(env, io, queue);
1879 queuefini1:
1880                 cl_2queue_disown(env, io, queue);
1881                 cl_2queue_fini(env, queue);
1882         }
1883
1884 clpfini:
1885         if (clpage)
1886                 cl_page_put(env, clpage);
1887 pagefini:
1888         if (lockedbymyself) {
1889                 unlock_page(vmpage);
1890                 put_page(vmpage);
1891         }
1892 rellock:
1893         if (holdinglock)
1894                 cl_lock_release(env, lock);
1895 iofini:
1896         cl_io_fini(env, io);
1897 putenv:
1898         if (env)
1899                 cl_env_put(env, &refcheck);
1900
1901         RETURN(rc);
1902 }
1903
1904 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1905  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1906  * keep these values until such a time that objects are allocated for it.
1907  * We do the MDS operations first, as it is checking permissions for us.
1908  * We don't to the MDS RPC if there is nothing that we want to store there,
1909  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1910  * going to do an RPC anyways.
1911  *
1912  * If we are doing a truncate, we will send the mtime and ctime updates
1913  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1914  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1915  * at the same time.
1916  *
1917  * In case of HSMimport, we only set attr on MDS.
1918  */
1919 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
1920                    enum op_xvalid xvalid, bool hsm_import)
1921 {
1922         struct inode *inode = dentry->d_inode;
1923         struct ll_inode_info *lli = ll_i2info(inode);
1924         struct md_op_data *op_data = NULL;
1925         ktime_t kstart = ktime_get();
1926         int rc = 0;
1927
1928         ENTRY;
1929
1930         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
1931                "valid %x, hsm_import %d\n",
1932                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
1933                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
1934                hsm_import);
1935
1936         if (attr->ia_valid & ATTR_SIZE) {
1937                 /* Check new size against VFS/VM file size limit and rlimit */
1938                 rc = inode_newsize_ok(inode, attr->ia_size);
1939                 if (rc)
1940                         RETURN(rc);
1941
1942                 /* The maximum Lustre file size is variable, based on the
1943                  * OST maximum object size and number of stripes.  This
1944                  * needs another check in addition to the VFS check above. */
1945                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1946                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
1947                                PFID(&lli->lli_fid), attr->ia_size,
1948                                ll_file_maxbytes(inode));
1949                         RETURN(-EFBIG);
1950                 }
1951
1952                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1953         }
1954
1955         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1956         if (attr->ia_valid & TIMES_SET_FLAGS) {
1957                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
1958                     !cfs_capable(CFS_CAP_FOWNER))
1959                         RETURN(-EPERM);
1960         }
1961
1962         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1963         if (!(xvalid & OP_XVALID_CTIME_SET) &&
1964              (attr->ia_valid & ATTR_CTIME)) {
1965                 attr->ia_ctime = current_time(inode);
1966                 xvalid |= OP_XVALID_CTIME_SET;
1967         }
1968         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1969             (attr->ia_valid & ATTR_ATIME)) {
1970                 attr->ia_atime = current_time(inode);
1971                 attr->ia_valid |= ATTR_ATIME_SET;
1972         }
1973         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1974             (attr->ia_valid & ATTR_MTIME)) {
1975                 attr->ia_mtime = current_time(inode);
1976                 attr->ia_valid |= ATTR_MTIME_SET;
1977         }
1978
1979         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1980                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
1981                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
1982                        ktime_get_real_seconds());
1983
1984         if (S_ISREG(inode->i_mode))
1985                 inode_unlock(inode);
1986
1987         /* We always do an MDS RPC, even if we're only changing the size;
1988          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1989
1990         OBD_ALLOC_PTR(op_data);
1991         if (op_data == NULL)
1992                 GOTO(out, rc = -ENOMEM);
1993
1994         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
1995                 /* If we are changing file size, file content is
1996                  * modified, flag it.
1997                  */
1998                 xvalid |= OP_XVALID_OWNEROVERRIDE;
1999                 op_data->op_bias |= MDS_DATA_MODIFIED;
2000                 ll_file_clear_flag(lli, LLIF_DATA_MODIFIED);
2001         }
2002
2003         if (attr->ia_valid & ATTR_FILE) {
2004                 struct ll_file_data *fd = attr->ia_file->private_data;
2005
2006                 if (fd->fd_lease_och)
2007                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
2008         }
2009
2010         op_data->op_attr = *attr;
2011         op_data->op_xvalid = xvalid;
2012
2013         rc = ll_md_setattr(dentry, op_data);
2014         if (rc)
2015                 GOTO(out, rc);
2016
2017         if (!S_ISREG(inode->i_mode) || hsm_import)
2018                 GOTO(out, rc = 0);
2019
2020         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2021                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2022             xvalid & OP_XVALID_CTIME_SET) {
2023                 bool cached = false;
2024
2025                 rc = pcc_inode_setattr(inode, attr, &cached);
2026                 if (cached) {
2027                         if (rc) {
2028                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2029                                        "rc = %d\n",
2030                                        ll_i2sbi(inode)->ll_fsname,
2031                                        PFID(&lli->lli_fid), rc);
2032                                 GOTO(out, rc);
2033                         }
2034                 } else {
2035                         unsigned int flags = 0;
2036
2037                         /* For truncate and utimes sending attributes to OSTs,
2038                          * setting mtime/atime to the past will be performed
2039                          * under PW [0:EOF] extent lock (new_size:EOF for
2040                          * truncate). It may seem excessive to send mtime/atime
2041                          * updates to OSTs when not setting times to past, but
2042                          * it is necessary due to possible time
2043                          * de-synchronization between MDT inode and OST objects
2044                          */
2045                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2046                             attr->ia_valid & ATTR_SIZE) {
2047                                 xvalid |= OP_XVALID_FLAGS;
2048                                 flags = LUSTRE_ENCRYPT_FL;
2049                                 if (attr->ia_size & ~PAGE_MASK) {
2050                                         pgoff_t offset =
2051                                                 attr->ia_size & (PAGE_SIZE - 1);
2052
2053                                         rc = ll_io_zero_page(inode,
2054                                                     attr->ia_size >> PAGE_SHIFT,
2055                                                     offset, PAGE_SIZE - offset);
2056                                         if (rc)
2057                                                 GOTO(out, rc);
2058                                 }
2059                         }
2060                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2061                 }
2062         }
2063
2064         /* If the file was restored, it needs to set dirty flag.
2065          *
2066          * We've already sent MDS_DATA_MODIFIED flag in
2067          * ll_md_setattr() for truncate. However, the MDT refuses to
2068          * set the HS_DIRTY flag on released files, so we have to set
2069          * it again if the file has been restored. Please check how
2070          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2071          *
2072          * Please notice that if the file is not released, the previous
2073          * MDS_DATA_MODIFIED has taken effect and usually
2074          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2075          * This way we can save an RPC for common open + trunc
2076          * operation. */
2077         if (ll_file_test_and_clear_flag(lli, LLIF_DATA_MODIFIED)) {
2078                 struct hsm_state_set hss = {
2079                         .hss_valid = HSS_SETMASK,
2080                         .hss_setmask = HS_DIRTY,
2081                 };
2082                 int rc2;
2083
2084                 rc2 = ll_hsm_state_set(inode, &hss);
2085                 /* truncate and write can happen at the same time, so that
2086                  * the file can be set modified even though the file is not
2087                  * restored from released state, and ll_hsm_state_set() is
2088                  * not applicable for the file, and rc2 < 0 is normal in this
2089                  * case. */
2090                 if (rc2 < 0)
2091                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2092                                PFID(ll_inode2fid(inode)), rc2);
2093         }
2094
2095         EXIT;
2096 out:
2097         if (op_data != NULL)
2098                 ll_finish_md_op_data(op_data);
2099
2100         if (S_ISREG(inode->i_mode)) {
2101                 inode_lock(inode);
2102                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2103                         inode_dio_wait(inode);
2104                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2105                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2106                  * inode flags, so there is a gap where S_NOSEC is not set.
2107                  * This can cause a writer to take the i_mutex unnecessarily,
2108                  * but this is safe to do and should be rare. */
2109                 inode_has_no_xattr(inode);
2110         }
2111
2112         if (!rc)
2113                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2114                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2115                                    ktime_us_delta(ktime_get(), kstart));
2116
2117         return rc;
2118 }
2119
2120 int ll_setattr(struct dentry *de, struct iattr *attr)
2121 {
2122         int mode = de->d_inode->i_mode;
2123         enum op_xvalid xvalid = 0;
2124         int rc;
2125
2126         rc = llcrypt_prepare_setattr(de, attr);
2127         if (rc)
2128                 return rc;
2129
2130         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2131                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2132                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2133
2134         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2135                                (ATTR_SIZE|ATTR_MODE)) &&
2136             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2137              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2138               !(attr->ia_mode & S_ISGID))))
2139                 attr->ia_valid |= ATTR_FORCE;
2140
2141         if ((attr->ia_valid & ATTR_MODE) &&
2142             (mode & S_ISUID) &&
2143             !(attr->ia_mode & S_ISUID) &&
2144             !(attr->ia_valid & ATTR_KILL_SUID))
2145                 attr->ia_valid |= ATTR_KILL_SUID;
2146
2147         if ((attr->ia_valid & ATTR_MODE) &&
2148             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2149             !(attr->ia_mode & S_ISGID) &&
2150             !(attr->ia_valid & ATTR_KILL_SGID))
2151                 attr->ia_valid |= ATTR_KILL_SGID;
2152
2153         return ll_setattr_raw(de, attr, xvalid, false);
2154 }
2155
2156 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2157                        u32 flags)
2158 {
2159         struct obd_statfs obd_osfs = { 0 };
2160         time64_t max_age;
2161         int rc;
2162
2163         ENTRY;
2164         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2165
2166         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2167                 flags |= OBD_STATFS_NODELAY;
2168
2169         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2170         if (rc)
2171                 RETURN(rc);
2172
2173         osfs->os_type = LL_SUPER_MAGIC;
2174
2175         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2176               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2177
2178         if (osfs->os_state & OS_STATFS_SUM)
2179                 GOTO(out, rc);
2180
2181         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2182         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2183                 GOTO(out, rc = 0);
2184
2185         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2186                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2187                obd_osfs.os_files);
2188
2189         osfs->os_bsize = obd_osfs.os_bsize;
2190         osfs->os_blocks = obd_osfs.os_blocks;
2191         osfs->os_bfree = obd_osfs.os_bfree;
2192         osfs->os_bavail = obd_osfs.os_bavail;
2193
2194         /* If we have _some_ OSTs, but don't have as many free objects on the
2195          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2196          * to compensate, so that the "inodes in use" number is correct.
2197          * This should be kept in sync with lod_statfs() behaviour.
2198          */
2199         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2200                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2201                                  obd_osfs.os_ffree;
2202                 osfs->os_ffree = obd_osfs.os_ffree;
2203         }
2204
2205 out:
2206         RETURN(rc);
2207 }
2208
2209 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2210 {
2211         struct super_block *sb = de->d_sb;
2212         struct obd_statfs osfs;
2213         __u64 fsid = huge_encode_dev(sb->s_dev);
2214         ktime_t kstart = ktime_get();
2215         int rc;
2216
2217         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2218
2219         /* Some amount of caching on the client is allowed */
2220         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2221         if (rc)
2222                 return rc;
2223
2224         statfs_unpack(sfs, &osfs);
2225
2226         /* We need to downshift for all 32-bit kernels, because we can't
2227          * tell if the kernel is being called via sys_statfs64() or not.
2228          * Stop before overflowing f_bsize - in which case it is better
2229          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2230         if (sizeof(long) < 8) {
2231                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2232                         sfs->f_bsize <<= 1;
2233
2234                         osfs.os_blocks >>= 1;
2235                         osfs.os_bfree >>= 1;
2236                         osfs.os_bavail >>= 1;
2237                 }
2238         }
2239
2240         sfs->f_blocks = osfs.os_blocks;
2241         sfs->f_bfree = osfs.os_bfree;
2242         sfs->f_bavail = osfs.os_bavail;
2243         sfs->f_fsid.val[0] = (__u32)fsid;
2244         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2245
2246         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2247                            ktime_us_delta(ktime_get(), kstart));
2248
2249         return 0;
2250 }
2251
2252 void ll_inode_size_lock(struct inode *inode)
2253 {
2254         struct ll_inode_info *lli;
2255
2256         LASSERT(!S_ISDIR(inode->i_mode));
2257
2258         lli = ll_i2info(inode);
2259         mutex_lock(&lli->lli_size_mutex);
2260 }
2261
2262 void ll_inode_size_unlock(struct inode *inode)
2263 {
2264         struct ll_inode_info *lli;
2265
2266         lli = ll_i2info(inode);
2267         mutex_unlock(&lli->lli_size_mutex);
2268 }
2269
2270 void ll_update_inode_flags(struct inode *inode, int ext_flags)
2271 {
2272         /* do not clear encryption flag */
2273         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2274         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2275         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2276                 ll_file_set_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
2277         else
2278                 ll_file_clear_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
2279 }
2280
2281 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2282 {
2283         struct ll_inode_info *lli = ll_i2info(inode);
2284         struct mdt_body *body = md->body;
2285         struct ll_sb_info *sbi = ll_i2sbi(inode);
2286         int rc = 0;
2287
2288         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2289                 rc = cl_file_inode_init(inode, md);
2290                 if (rc)
2291                         return rc;
2292         }
2293
2294         if (S_ISDIR(inode->i_mode)) {
2295                 rc = ll_update_lsm_md(inode, md);
2296                 if (rc != 0)
2297                         return rc;
2298         }
2299
2300 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
2301         if (body->mbo_valid & OBD_MD_FLACL) {
2302                 spin_lock(&lli->lli_lock);
2303                 if (lli->lli_posix_acl)
2304                         posix_acl_release(lli->lli_posix_acl);
2305                 lli->lli_posix_acl = md->posix_acl;
2306                 spin_unlock(&lli->lli_lock);
2307         }
2308 #endif
2309         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
2310                                         sbi->ll_flags & LL_SBI_32BIT_API);
2311         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2312
2313         if (body->mbo_valid & OBD_MD_FLATIME) {
2314                 if (body->mbo_atime > inode->i_atime.tv_sec)
2315                         inode->i_atime.tv_sec = body->mbo_atime;
2316                 lli->lli_atime = body->mbo_atime;
2317         }
2318
2319         if (body->mbo_valid & OBD_MD_FLMTIME) {
2320                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2321                         CDEBUG(D_INODE,
2322                                "setting ino %lu mtime from %lld to %llu\n",
2323                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2324                                body->mbo_mtime);
2325                         inode->i_mtime.tv_sec = body->mbo_mtime;
2326                 }
2327                 lli->lli_mtime = body->mbo_mtime;
2328         }
2329
2330         if (body->mbo_valid & OBD_MD_FLCTIME) {
2331                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2332                         inode->i_ctime.tv_sec = body->mbo_ctime;
2333                 lli->lli_ctime = body->mbo_ctime;
2334         }
2335
2336         if (body->mbo_valid & OBD_MD_FLBTIME)
2337                 lli->lli_btime = body->mbo_btime;
2338
2339         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2340         if (body->mbo_valid & OBD_MD_FLFLAGS)
2341                 ll_update_inode_flags(inode, body->mbo_flags);
2342         if (body->mbo_valid & OBD_MD_FLMODE)
2343                 inode->i_mode = (inode->i_mode & S_IFMT) |
2344                                 (body->mbo_mode & ~S_IFMT);
2345
2346         if (body->mbo_valid & OBD_MD_FLTYPE)
2347                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2348                                 (body->mbo_mode & S_IFMT);
2349
2350         LASSERT(inode->i_mode != 0);
2351         if (body->mbo_valid & OBD_MD_FLUID)
2352                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2353         if (body->mbo_valid & OBD_MD_FLGID)
2354                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2355         if (body->mbo_valid & OBD_MD_FLPROJID)
2356                 lli->lli_projid = body->mbo_projid;
2357         if (body->mbo_valid & OBD_MD_FLNLINK)
2358                 set_nlink(inode, body->mbo_nlink);
2359         if (body->mbo_valid & OBD_MD_FLRDEV)
2360                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2361
2362         if (body->mbo_valid & OBD_MD_FLID) {
2363                 /* FID shouldn't be changed! */
2364                 if (fid_is_sane(&lli->lli_fid)) {
2365                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2366                                  "Trying to change FID "DFID
2367                                  " to the "DFID", inode "DFID"(%p)\n",
2368                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2369                                  PFID(ll_inode2fid(inode)), inode);
2370                 } else {
2371                         lli->lli_fid = body->mbo_fid1;
2372                 }
2373         }
2374
2375         LASSERT(fid_seq(&lli->lli_fid) != 0);
2376
2377         lli->lli_attr_valid = body->mbo_valid;
2378         if (body->mbo_valid & OBD_MD_FLSIZE) {
2379                 i_size_write(inode, body->mbo_size);
2380
2381                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2382                        PFID(ll_inode2fid(inode)),
2383                        (unsigned long long)body->mbo_size);
2384
2385                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2386                         inode->i_blocks = body->mbo_blocks;
2387         } else {
2388                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2389                         lli->lli_lazysize = body->mbo_size;
2390                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2391                         lli->lli_lazyblocks = body->mbo_blocks;
2392         }
2393
2394         if (body->mbo_valid & OBD_MD_TSTATE) {
2395                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2396                  * clear it when done to ensure to start again
2397                  * glimpsing updated attrs
2398                  */
2399                 if (body->mbo_t_state & MS_RESTORE)
2400                         ll_file_set_flag(lli, LLIF_FILE_RESTORING);
2401                 else
2402                         ll_file_clear_flag(lli, LLIF_FILE_RESTORING);
2403         }
2404
2405         return 0;
2406 }
2407
2408 int ll_read_inode2(struct inode *inode, void *opaque)
2409 {
2410         struct lustre_md *md = opaque;
2411         struct ll_inode_info *lli = ll_i2info(inode);
2412         int     rc;
2413         ENTRY;
2414
2415         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2416                PFID(&lli->lli_fid), inode);
2417
2418         /* Core attributes from the MDS first.  This is a new inode, and
2419          * the VFS doesn't zero times in the core inode so we have to do
2420          * it ourselves.  They will be overwritten by either MDS or OST
2421          * attributes - we just need to make sure they aren't newer.
2422          */
2423         inode->i_mtime.tv_sec = 0;
2424         inode->i_atime.tv_sec = 0;
2425         inode->i_ctime.tv_sec = 0;
2426         inode->i_rdev = 0;
2427         rc = ll_update_inode(inode, md);
2428         if (rc != 0)
2429                 RETURN(rc);
2430
2431         /* OIDEBUG(inode); */
2432
2433 #ifdef HAVE_BACKING_DEV_INFO
2434         /* initializing backing dev info. */
2435         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2436 #endif
2437         if (S_ISREG(inode->i_mode)) {
2438                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2439                 inode->i_op = &ll_file_inode_operations;
2440                 inode->i_fop = sbi->ll_fop;
2441                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2442                 EXIT;
2443         } else if (S_ISDIR(inode->i_mode)) {
2444                 inode->i_op = &ll_dir_inode_operations;
2445                 inode->i_fop = &ll_dir_operations;
2446                 EXIT;
2447         } else if (S_ISLNK(inode->i_mode)) {
2448                 inode->i_op = &ll_fast_symlink_inode_operations;
2449                 EXIT;
2450         } else {
2451                 inode->i_op = &ll_special_inode_operations;
2452
2453                 init_special_inode(inode, inode->i_mode,
2454                                    inode->i_rdev);
2455
2456                 EXIT;
2457         }
2458
2459         return 0;
2460 }
2461
2462 void ll_delete_inode(struct inode *inode)
2463 {
2464         struct ll_inode_info *lli = ll_i2info(inode);
2465         struct address_space *mapping = &inode->i_data;
2466         unsigned long nrpages;
2467         unsigned long flags;
2468
2469         ENTRY;
2470
2471         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2472                 /* It is last chance to write out dirty pages,
2473                  * otherwise we may lose data while umount.
2474                  *
2475                  * If i_nlink is 0 then just discard data. This is safe because
2476                  * local inode gets i_nlink 0 from server only for the last
2477                  * unlink, so that file is not opened somewhere else
2478                  */
2479                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2480                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2481         }
2482         truncate_inode_pages_final(mapping);
2483
2484         /* Workaround for LU-118: Note nrpages may not be totally updated when
2485          * truncate_inode_pages() returns, as there can be a page in the process
2486          * of deletion (inside __delete_from_page_cache()) in the specified
2487          * range. Thus mapping->nrpages can be non-zero when this function
2488          * returns even after truncation of the whole mapping.  Only do this if
2489          * npages isn't already zero.
2490          */
2491         nrpages = mapping->nrpages;
2492         if (nrpages) {
2493                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2494                 nrpages = mapping->nrpages;
2495                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2496         } /* Workaround end */
2497
2498         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2499                  "see https://jira.whamcloud.com/browse/LU-118\n",
2500                  ll_i2sbi(inode)->ll_fsname,
2501                  PFID(ll_inode2fid(inode)), inode, nrpages);
2502
2503         ll_clear_inode(inode);
2504         clear_inode(inode);
2505
2506         EXIT;
2507 }
2508
2509 int ll_iocontrol(struct inode *inode, struct file *file,
2510                  unsigned int cmd, unsigned long arg)
2511 {
2512         struct ll_sb_info *sbi = ll_i2sbi(inode);
2513         struct ptlrpc_request *req = NULL;
2514         int rc, flags = 0;
2515         ENTRY;
2516
2517         switch (cmd) {
2518         case FS_IOC_GETFLAGS: {
2519                 struct mdt_body *body;
2520                 struct md_op_data *op_data;
2521
2522                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2523                                              0, 0, LUSTRE_OPC_ANY,
2524                                              NULL);
2525                 if (IS_ERR(op_data))
2526                         RETURN(PTR_ERR(op_data));
2527
2528                 op_data->op_valid = OBD_MD_FLFLAGS;
2529                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2530                 ll_finish_md_op_data(op_data);
2531                 if (rc) {
2532                         CERROR("%s: failure inode "DFID": rc = %d\n",
2533                                sbi->ll_md_exp->exp_obd->obd_name,
2534                                PFID(ll_inode2fid(inode)), rc);
2535                         RETURN(-abs(rc));
2536                 }
2537
2538                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2539
2540                 flags = body->mbo_flags;
2541
2542                 ptlrpc_req_finished(req);
2543
2544                 RETURN(put_user(flags, (int __user *)arg));
2545         }
2546         case FS_IOC_SETFLAGS: {
2547                 struct iattr *attr;
2548                 struct md_op_data *op_data;
2549                 struct cl_object *obj;
2550                 struct fsxattr fa = { 0 };
2551
2552                 if (get_user(flags, (int __user *)arg))
2553                         RETURN(-EFAULT);
2554
2555                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2556                 if (flags & LUSTRE_PROJINHERIT_FL)
2557                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2558
2559                 rc = ll_ioctl_check_project(inode, &fa);
2560                 if (rc)
2561                         RETURN(rc);
2562
2563                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2564                                              LUSTRE_OPC_ANY, NULL);
2565                 if (IS_ERR(op_data))
2566                         RETURN(PTR_ERR(op_data));
2567
2568                 op_data->op_attr_flags = flags;
2569                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2570                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2571                 ll_finish_md_op_data(op_data);
2572                 ptlrpc_req_finished(req);
2573                 if (rc)
2574                         RETURN(rc);
2575
2576                 ll_update_inode_flags(inode, flags);
2577
2578                 obj = ll_i2info(inode)->lli_clob;
2579                 if (obj == NULL)
2580                         RETURN(0);
2581
2582                 OBD_ALLOC_PTR(attr);
2583                 if (attr == NULL)
2584                         RETURN(-ENOMEM);
2585
2586                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2587
2588                 OBD_FREE_PTR(attr);
2589                 RETURN(rc);
2590         }
2591         default:
2592                 RETURN(-ENOSYS);
2593         }
2594
2595         RETURN(0);
2596 }
2597
2598 int ll_flush_ctx(struct inode *inode)
2599 {
2600         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2601
2602         CDEBUG(D_SEC, "flush context for user %d\n",
2603                from_kuid(&init_user_ns, current_uid()));
2604
2605         obd_set_info_async(NULL, sbi->ll_md_exp,
2606                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2607                            0, NULL, NULL);
2608         obd_set_info_async(NULL, sbi->ll_dt_exp,
2609                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2610                            0, NULL, NULL);
2611         return 0;
2612 }
2613
2614 /* umount -f client means force down, don't save state */
2615 void ll_umount_begin(struct super_block *sb)
2616 {
2617         struct ll_sb_info *sbi = ll_s2sbi(sb);
2618         struct obd_device *obd;
2619         struct obd_ioctl_data *ioc_data;
2620         int cnt;
2621         ENTRY;
2622
2623         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2624                sb->s_count, atomic_read(&sb->s_active));
2625
2626         obd = class_exp2obd(sbi->ll_md_exp);
2627         if (obd == NULL) {
2628                 CERROR("Invalid MDC connection handle %#llx\n",
2629                        sbi->ll_md_exp->exp_handle.h_cookie);
2630                 EXIT;
2631                 return;
2632         }
2633         obd->obd_force = 1;
2634
2635         obd = class_exp2obd(sbi->ll_dt_exp);
2636         if (obd == NULL) {
2637                 CERROR("Invalid LOV connection handle %#llx\n",
2638                        sbi->ll_dt_exp->exp_handle.h_cookie);
2639                 EXIT;
2640                 return;
2641         }
2642         obd->obd_force = 1;
2643
2644         OBD_ALLOC_PTR(ioc_data);
2645         if (ioc_data) {
2646                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2647                               sizeof *ioc_data, ioc_data, NULL);
2648
2649                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2650                               sizeof *ioc_data, ioc_data, NULL);
2651
2652                 OBD_FREE_PTR(ioc_data);
2653         }
2654
2655         /* Really, we'd like to wait until there are no requests outstanding,
2656          * and then continue.  For now, we just periodically checking for vfs
2657          * to decrement mnt_cnt and hope to finish it within 10sec.
2658          */
2659         cnt = 10;
2660         while (cnt > 0 &&
2661                !may_umount(sbi->ll_mnt.mnt)) {
2662                 ssleep(1);
2663                 cnt -= 1;
2664         }
2665
2666         EXIT;
2667 }
2668
2669 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2670 {
2671         struct ll_sb_info *sbi = ll_s2sbi(sb);
2672         char *profilenm = get_profile_name(sb);
2673         int err;
2674         __u32 read_only;
2675
2676         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2677                 read_only = *flags & MS_RDONLY;
2678                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2679                                          sizeof(KEY_READ_ONLY),
2680                                          KEY_READ_ONLY, sizeof(read_only),
2681                                          &read_only, NULL);
2682                 if (err) {
2683                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2684                                       profilenm, read_only ?
2685                                       "read-only" : "read-write", err);
2686                         return err;
2687                 }
2688
2689                 if (read_only)
2690                         sb->s_flags |= SB_RDONLY;
2691                 else
2692                         sb->s_flags &= ~SB_RDONLY;
2693
2694                 if (sbi->ll_flags & LL_SBI_VERBOSE)
2695                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2696                                       read_only ?  "read-only" : "read-write");
2697         }
2698         return 0;
2699 }
2700
2701 /**
2702  * Cleanup the open handle that is cached on MDT-side.
2703  *
2704  * For open case, the client side open handling thread may hit error
2705  * after the MDT grant the open. Under such case, the client should
2706  * send close RPC to the MDT as cleanup; otherwise, the open handle
2707  * on the MDT will be leaked there until the client umount or evicted.
2708  *
2709  * In further, if someone unlinked the file, because the open handle
2710  * holds the reference on such file/object, then it will block the
2711  * subsequent threads that want to locate such object via FID.
2712  *
2713  * \param[in] sb        super block for this file-system
2714  * \param[in] open_req  pointer to the original open request
2715  */
2716 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2717 {
2718         struct mdt_body                 *body;
2719         struct md_op_data               *op_data;
2720         struct ptlrpc_request           *close_req = NULL;
2721         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2722         ENTRY;
2723
2724         body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
2725         OBD_ALLOC_PTR(op_data);
2726         if (op_data == NULL) {
2727                 CWARN("%s: cannot allocate op_data to release open handle for "
2728                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2729
2730                 RETURN_EXIT;
2731         }
2732
2733         op_data->op_fid1 = body->mbo_fid1;
2734         op_data->op_open_handle = body->mbo_open_handle;
2735         op_data->op_mod_time = ktime_get_real_seconds();
2736         md_close(exp, op_data, NULL, &close_req);
2737         ptlrpc_req_finished(close_req);
2738         ll_finish_md_op_data(op_data);
2739
2740         EXIT;
2741 }
2742
2743 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2744                   struct super_block *sb, struct lookup_intent *it)
2745 {
2746         struct ll_sb_info *sbi = NULL;
2747         struct lustre_md md = { NULL };
2748         bool default_lmv_deleted = false;
2749         int rc;
2750
2751         ENTRY;
2752
2753         LASSERT(*inode || sb);
2754         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2755         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2756                               sbi->ll_md_exp, &md);
2757         if (rc != 0)
2758                 GOTO(out, rc);
2759
2760         /*
2761          * clear default_lmv only if intent_getattr reply doesn't contain it.
2762          * but it needs to be done after iget, check this early because
2763          * ll_update_lsm_md() may change md.
2764          */
2765         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2766             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2767                 default_lmv_deleted = true;
2768
2769         if (*inode) {
2770                 rc = ll_update_inode(*inode, &md);
2771                 if (rc != 0)
2772                         GOTO(out, rc);
2773         } else {
2774                 LASSERT(sb != NULL);
2775
2776                 /*
2777                  * At this point server returns to client's same fid as client
2778                  * generated for creating. So using ->fid1 is okay here.
2779                  */
2780                 if (!fid_is_sane(&md.body->mbo_fid1)) {
2781                         CERROR("%s: Fid is insane "DFID"\n",
2782                                 sbi->ll_fsname,
2783                                 PFID(&md.body->mbo_fid1));
2784                         GOTO(out, rc = -EINVAL);
2785                 }
2786
2787                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2788                                              sbi->ll_flags & LL_SBI_32BIT_API),
2789                                  &md);
2790                 if (IS_ERR(*inode)) {
2791 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
2792                         if (md.posix_acl) {
2793                                 posix_acl_release(md.posix_acl);
2794                                 md.posix_acl = NULL;
2795                         }
2796 #endif
2797                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2798                         *inode = NULL;
2799                         CERROR("new_inode -fatal: rc %d\n", rc);
2800                         GOTO(out, rc);
2801                 }
2802         }
2803
2804         /* Handling piggyback layout lock.
2805          * Layout lock can be piggybacked by getattr and open request.
2806          * The lsm can be applied to inode only if it comes with a layout lock
2807          * otherwise correct layout may be overwritten, for example:
2808          * 1. proc1: mdt returns a lsm but not granting layout
2809          * 2. layout was changed by another client
2810          * 3. proc2: refresh layout and layout lock granted
2811          * 4. proc1: to apply a stale layout */
2812         if (it != NULL && it->it_lock_mode != 0) {
2813                 struct lustre_handle lockh;
2814                 struct ldlm_lock *lock;
2815
2816                 lockh.cookie = it->it_lock_handle;
2817                 lock = ldlm_handle2lock(&lockh);
2818                 LASSERT(lock != NULL);
2819                 if (ldlm_has_layout(lock)) {
2820                         struct cl_object_conf conf;
2821
2822                         memset(&conf, 0, sizeof(conf));
2823                         conf.coc_opc = OBJECT_CONF_SET;
2824                         conf.coc_inode = *inode;
2825                         conf.coc_lock = lock;
2826                         conf.u.coc_layout = md.layout;
2827                         (void)ll_layout_conf(*inode, &conf);
2828                 }
2829                 LDLM_LOCK_PUT(lock);
2830         }
2831
2832         if (default_lmv_deleted)
2833                 ll_update_default_lsm_md(*inode, &md);
2834
2835         GOTO(out, rc = 0);
2836
2837 out:
2838         /* cleanup will be done if necessary */
2839         md_free_lustre_md(sbi->ll_md_exp, &md);
2840
2841         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
2842                 ll_intent_drop_lock(it);
2843                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, req);
2844         }
2845
2846         return rc;
2847 }
2848
2849 int ll_obd_statfs(struct inode *inode, void __user *arg)
2850 {
2851         struct ll_sb_info *sbi = NULL;
2852         struct obd_export *exp;
2853         char *buf = NULL;
2854         struct obd_ioctl_data *data = NULL;
2855         __u32 type;
2856         int len = 0, rc;
2857
2858         if (!inode || !(sbi = ll_i2sbi(inode)))
2859                 GOTO(out_statfs, rc = -EINVAL);
2860
2861         rc = obd_ioctl_getdata(&buf, &len, arg);
2862         if (rc)
2863                 GOTO(out_statfs, rc);
2864
2865         data = (void*)buf;
2866         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2867             !data->ioc_pbuf1 || !data->ioc_pbuf2)
2868                 GOTO(out_statfs, rc = -EINVAL);
2869
2870         if (data->ioc_inllen1 != sizeof(__u32) ||
2871             data->ioc_inllen2 != sizeof(__u32) ||
2872             data->ioc_plen1 != sizeof(struct obd_statfs) ||
2873             data->ioc_plen2 != sizeof(struct obd_uuid))
2874                 GOTO(out_statfs, rc = -EINVAL);
2875
2876         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2877         if (type & LL_STATFS_LMV)
2878                 exp = sbi->ll_md_exp;
2879         else if (type & LL_STATFS_LOV)
2880                 exp = sbi->ll_dt_exp;
2881         else
2882                 GOTO(out_statfs, rc = -ENODEV);
2883
2884         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2885         if (rc)
2886                 GOTO(out_statfs, rc);
2887 out_statfs:
2888         OBD_FREE_LARGE(buf, len);
2889         return rc;
2890 }
2891
2892 /*
2893  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
2894  * be called early to avoid deadlock.
2895  */
2896 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
2897 {
2898         if (op_data->op_mea2_sem) {
2899                 up_read(op_data->op_mea2_sem);
2900                 op_data->op_mea2_sem = NULL;
2901         }
2902
2903         if (op_data->op_mea1_sem) {
2904                 up_read(op_data->op_mea1_sem);
2905                 op_data->op_mea1_sem = NULL;
2906         }
2907 }
2908
2909 /* this function prepares md_op_data hint for passing it down to MD stack. */
2910 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2911                                       struct inode *i1, struct inode *i2,
2912                                       const char *name, size_t namelen,
2913                                       __u32 mode, enum md_op_code opc,
2914                                       void *data)
2915 {
2916         LASSERT(i1 != NULL);
2917
2918         if (name == NULL) {
2919                 /* Do not reuse namelen for something else. */
2920                 if (namelen != 0)
2921                         return ERR_PTR(-EINVAL);
2922         } else {
2923                 if (namelen > ll_i2sbi(i1)->ll_namelen)
2924                         return ERR_PTR(-ENAMETOOLONG);
2925
2926                 if (!lu_name_is_valid_2(name, namelen))
2927                         return ERR_PTR(-EINVAL);
2928         }
2929
2930         if (op_data == NULL)
2931                 OBD_ALLOC_PTR(op_data);
2932
2933         if (op_data == NULL)
2934                 return ERR_PTR(-ENOMEM);
2935
2936         ll_i2gids(op_data->op_suppgids, i1, i2);
2937         op_data->op_fid1 = *ll_inode2fid(i1);
2938         op_data->op_code = opc;
2939
2940         if (S_ISDIR(i1->i_mode)) {
2941                 down_read(&ll_i2info(i1)->lli_lsm_sem);
2942                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
2943                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
2944                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
2945         }
2946
2947         if (i2) {
2948                 op_data->op_fid2 = *ll_inode2fid(i2);
2949                 if (S_ISDIR(i2->i_mode)) {
2950                         if (i2 != i1) {
2951                                 down_read(&ll_i2info(i2)->lli_lsm_sem);
2952                                 op_data->op_mea2_sem =
2953                                                 &ll_i2info(i2)->lli_lsm_sem;
2954                         }
2955                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
2956                 }
2957         } else {
2958                 fid_zero(&op_data->op_fid2);
2959         }
2960
2961         if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
2962                 op_data->op_cli_flags |= CLI_HASH64;
2963
2964         if (ll_need_32bit_api(ll_i2sbi(i1)))
2965                 op_data->op_cli_flags |= CLI_API32;
2966
2967         op_data->op_name = name;
2968         op_data->op_namelen = namelen;
2969         op_data->op_mode = mode;
2970         op_data->op_mod_time = ktime_get_real_seconds();
2971         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2972         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2973         op_data->op_cap = cfs_curproc_cap_pack();
2974         op_data->op_mds = 0;
2975         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
2976              filename_is_volatile(name, namelen, &op_data->op_mds)) {
2977                 op_data->op_bias |= MDS_CREATE_VOLATILE;
2978         }
2979         op_data->op_data = data;
2980
2981         return op_data;
2982 }
2983
2984 void ll_finish_md_op_data(struct md_op_data *op_data)
2985 {
2986         ll_unlock_md_op_lsm(op_data);
2987         security_release_secctx(op_data->op_file_secctx,
2988                                 op_data->op_file_secctx_size);
2989         OBD_FREE_PTR(op_data);
2990 }
2991
2992 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2993 {
2994         struct ll_sb_info *sbi;
2995
2996         LASSERT(seq && dentry);
2997         sbi = ll_s2sbi(dentry->d_sb);
2998
2999         if (sbi->ll_flags & LL_SBI_NOLCK)
3000                 seq_puts(seq, ",nolock");
3001
3002         /* "flock" is the default since 2.13, but it wasn't for many years,
3003          * so it is still useful to print this to show it is enabled.
3004          * Start to print "noflock" so it is now clear when flock is disabled.
3005          */
3006         if (sbi->ll_flags & LL_SBI_FLOCK)
3007                 seq_puts(seq, ",flock");
3008         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
3009                 seq_puts(seq, ",localflock");
3010         else
3011                 seq_puts(seq, ",noflock");
3012
3013         if (sbi->ll_flags & LL_SBI_USER_XATTR)
3014                 seq_puts(seq, ",user_xattr");
3015
3016         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
3017                 seq_puts(seq, ",lazystatfs");
3018
3019         if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
3020                 seq_puts(seq, ",user_fid2path");
3021
3022         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
3023                 seq_puts(seq, ",always_ping");
3024
3025         if (ll_sbi_has_test_dummy_encryption(sbi))
3026                 seq_puts(seq, ",test_dummy_encryption");
3027
3028         if (ll_sbi_has_encrypt(sbi))
3029                 seq_puts(seq, ",encrypt");
3030         else
3031                 seq_puts(seq, ",noencrypt");
3032
3033         RETURN(0);
3034 }
3035
3036 /**
3037  * Get obd name by cmd, and copy out to user space
3038  */
3039 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3040 {
3041         struct ll_sb_info *sbi = ll_i2sbi(inode);
3042         struct obd_device *obd;
3043         ENTRY;
3044
3045         if (cmd == OBD_IOC_GETDTNAME)
3046                 obd = class_exp2obd(sbi->ll_dt_exp);
3047         else if (cmd == OBD_IOC_GETMDNAME)
3048                 obd = class_exp2obd(sbi->ll_md_exp);
3049         else
3050                 RETURN(-EINVAL);
3051
3052         if (!obd)
3053                 RETURN(-ENOENT);
3054
3055         if (copy_to_user((void __user *)arg, obd->obd_name,
3056                          strlen(obd->obd_name) + 1))
3057                 RETURN(-EFAULT);
3058
3059         RETURN(0);
3060 }
3061
3062 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3063 {
3064         char *path = NULL;
3065
3066         struct path p;
3067
3068         p.dentry = dentry;
3069         p.mnt = current->fs->root.mnt;
3070         path_get(&p);
3071         path = d_path(&p, buf, bufsize);
3072         path_put(&p);
3073         return path;
3074 }
3075
3076 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3077 {
3078         char *buf, *path = NULL;
3079         struct dentry *dentry = NULL;
3080         struct inode *inode = page->mapping->host;
3081
3082         /* this can be called inside spin lock so use GFP_ATOMIC. */
3083         buf = (char *)__get_free_page(GFP_ATOMIC);
3084         if (buf != NULL) {
3085                 dentry = d_find_alias(page->mapping->host);
3086                 if (dentry != NULL)
3087                         path = ll_d_path(dentry, buf, PAGE_SIZE);
3088         }
3089
3090         /* The below message is checked in recovery-small.sh test_24b */
3091         CDEBUG(D_WARNING,
3092                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3093                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3094                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3095                PFID(ll_inode2fid(inode)),
3096                (path && !IS_ERR(path)) ? path : "", ioret);
3097
3098         if (dentry != NULL)
3099                 dput(dentry);
3100
3101         if (buf != NULL)
3102                 free_page((unsigned long)buf);
3103 }
3104
3105 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3106                         struct lov_user_md **kbuf)
3107 {
3108         struct lov_user_md      lum;
3109         ssize_t                 lum_size;
3110         ENTRY;
3111
3112         if (copy_from_user(&lum, md, sizeof(lum)))
3113                 RETURN(-EFAULT);
3114
3115         lum_size = ll_lov_user_md_size(&lum);
3116         if (lum_size < 0)
3117                 RETURN(lum_size);
3118
3119         OBD_ALLOC_LARGE(*kbuf, lum_size);
3120         if (*kbuf == NULL)
3121                 RETURN(-ENOMEM);
3122
3123         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3124                 OBD_FREE_LARGE(*kbuf, lum_size);
3125                 RETURN(-EFAULT);
3126         }
3127
3128         RETURN(lum_size);
3129 }
3130
3131 /*
3132  * Compute llite root squash state after a change of root squash
3133  * configuration setting or add/remove of a lnet nid
3134  */
3135 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3136 {
3137         struct root_squash_info *squash = &sbi->ll_squash;
3138         int i;
3139         bool matched;
3140         struct lnet_process_id id;
3141
3142         /* Update norootsquash flag */
3143         spin_lock(&squash->rsi_lock);
3144         if (list_empty(&squash->rsi_nosquash_nids))
3145                 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3146         else {
3147                 /* Do not apply root squash as soon as one of our NIDs is
3148                  * in the nosquash_nids list */
3149                 matched = false;
3150                 i = 0;
3151                 while (LNetGetId(i++, &id) != -ENOENT) {
3152                         if (id.nid == LNET_NID_LO_0)
3153                                 continue;
3154                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3155                                 matched = true;
3156                                 break;
3157                         }
3158                 }
3159                 if (matched)
3160                         sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
3161                 else
3162                         sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3163         }
3164         spin_unlock(&squash->rsi_lock);
3165 }
3166
3167 /**
3168  * Parse linkea content to extract information about a given hardlink
3169  *
3170  * \param[in]   ldata      - Initialized linkea data
3171  * \param[in]   linkno     - Link identifier
3172  * \param[out]  parent_fid - The entry's parent FID
3173  * \param[out]  ln         - Entry name destination buffer
3174  *
3175  * \retval 0 on success
3176  * \retval Appropriate negative error code on failure
3177  */
3178 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3179                             struct lu_fid *parent_fid, struct lu_name *ln)
3180 {
3181         unsigned int    idx;
3182         int             rc;
3183         ENTRY;
3184
3185         rc = linkea_init_with_rec(ldata);
3186         if (rc < 0)
3187                 RETURN(rc);
3188
3189         if (linkno >= ldata->ld_leh->leh_reccount)
3190                 /* beyond last link */
3191                 RETURN(-ENODATA);
3192
3193         linkea_first_entry(ldata);
3194         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3195                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3196                                     parent_fid);
3197                 if (idx == linkno)
3198                         break;
3199
3200                 linkea_next_entry(ldata);
3201         }
3202
3203         if (idx < linkno)
3204                 RETURN(-ENODATA);
3205
3206         RETURN(0);
3207 }
3208
3209 /**
3210  * Get parent FID and name of an identified link. Operation is performed for
3211  * a given link number, letting the caller iterate over linkno to list one or
3212  * all links of an entry.
3213  *
3214  * \param[in]     file - File descriptor against which to perform the operation
3215  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3216  *                       on and the available size. It is eventually filled with
3217  *                       the requested information or left untouched on error
3218  *
3219  * \retval - 0 on success
3220  * \retval - Appropriate negative error code on failure
3221  */
3222 int ll_getparent(struct file *file, struct getparent __user *arg)
3223 {
3224         struct inode            *inode = file_inode(file);
3225         struct linkea_data      *ldata;
3226         struct lu_buf            buf = LU_BUF_NULL;
3227         struct lu_name           ln;
3228         struct lu_fid            parent_fid;
3229         __u32                    linkno;
3230         __u32                    name_size;
3231         int                      rc;
3232
3233         ENTRY;
3234
3235         if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
3236             !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
3237                 RETURN(-EPERM);
3238
3239         if (get_user(name_size, &arg->gp_name_size))
3240                 RETURN(-EFAULT);
3241
3242         if (get_user(linkno, &arg->gp_linkno))
3243                 RETURN(-EFAULT);
3244
3245         if (name_size > PATH_MAX)
3246                 RETURN(-EINVAL);
3247
3248         OBD_ALLOC(ldata, sizeof(*ldata));
3249         if (ldata == NULL)
3250                 RETURN(-ENOMEM);
3251
3252         rc = linkea_data_new(ldata, &buf);
3253         if (rc < 0)
3254                 GOTO(ldata_free, rc);
3255
3256         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3257                            buf.lb_len, OBD_MD_FLXATTR);
3258         if (rc < 0)
3259                 GOTO(lb_free, rc);
3260
3261         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3262         if (rc < 0)
3263                 GOTO(lb_free, rc);
3264
3265         if (ln.ln_namelen >= name_size)
3266                 GOTO(lb_free, rc = -EOVERFLOW);
3267
3268         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3269                 GOTO(lb_free, rc = -EFAULT);
3270
3271         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3272                 GOTO(lb_free, rc = -EFAULT);
3273
3274         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3275                 GOTO(lb_free, rc = -EFAULT);
3276
3277 lb_free:
3278         lu_buf_free(&buf);
3279 ldata_free:
3280         OBD_FREE(ldata, sizeof(*ldata));
3281
3282         RETURN(rc);
3283 }