Whamcloud - gitweb
LU-9679 llite: annotate non-owner locking
[fs/lustre-release.git] / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/llite/llite_lib.c
33  *
34  * Lustre Light Super operations
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/cpu.h>
40 #include <linux/module.h>
41 #include <linux/random.h>
42 #include <linux/statfs.h>
43 #include <linux/time.h>
44 #include <linux/types.h>
45 #include <libcfs/linux/linux-uuid.h>
46 #include <linux/version.h>
47 #include <linux/mm.h>
48 #include <linux/user_namespace.h>
49 #include <linux/delay.h>
50 #include <linux/uidgid.h>
51 #include <linux/security.h>
52 #include <linux/fs_struct.h>
53
54 #ifndef HAVE_CPUS_READ_LOCK
55 #include <libcfs/linux/linux-cpu.h>
56 #endif
57 #include <uapi/linux/lustre/lustre_ioctl.h>
58 #ifdef HAVE_UAPI_LINUX_MOUNT_H
59 #include <uapi/linux/mount.h>
60 #endif
61
62 #include <lustre_ha.h>
63 #include <lustre_dlm.h>
64 #include <lprocfs_status.h>
65 #include <lustre_disk.h>
66 #include <uapi/linux/lustre/lustre_param.h>
67 #include <lustre_log.h>
68 #include <cl_object.h>
69 #include <obd_cksum.h>
70 #include "llite_internal.h"
71
72 struct kmem_cache *ll_file_data_slab;
73
74 #ifndef log2
75 #define log2(n) ffz(~(n))
76 #endif
77
78 /**
79  * If there is only one number of core visible to Lustre,
80  * async readahead will be disabled, to avoid massive over
81  * subscription, we use 1/2 of active cores as default max
82  * async readahead requests.
83  */
84 static inline unsigned int ll_get_ra_async_max_active(void)
85 {
86         return cfs_cpt_weight(cfs_cpt_tab, CFS_CPT_ANY) >> 1;
87 }
88
89 static struct ll_sb_info *ll_init_sbi(void)
90 {
91         struct ll_sb_info *sbi = NULL;
92         unsigned long pages;
93         unsigned long lru_page_max;
94         struct sysinfo si;
95         int rc;
96         int i;
97
98         ENTRY;
99
100         OBD_ALLOC_PTR(sbi);
101         if (sbi == NULL)
102                 RETURN(ERR_PTR(-ENOMEM));
103
104         rc = pcc_super_init(&sbi->ll_pcc_super);
105         if (rc < 0)
106                 GOTO(out_sbi, rc);
107
108         spin_lock_init(&sbi->ll_lock);
109         mutex_init(&sbi->ll_lco.lco_lock);
110         spin_lock_init(&sbi->ll_pp_extent_lock);
111         spin_lock_init(&sbi->ll_process_lock);
112         sbi->ll_rw_stats_on = 0;
113         sbi->ll_statfs_max_age = OBD_STATFS_CACHE_SECONDS;
114
115         si_meminfo(&si);
116         pages = si.totalram - si.totalhigh;
117         lru_page_max = pages / 2;
118
119         sbi->ll_ra_info.ra_async_max_active = ll_get_ra_async_max_active();
120         sbi->ll_ra_info.ll_readahead_wq =
121                 cfs_cpt_bind_workqueue("ll-readahead-wq", cfs_cpt_tab,
122                                        0, CFS_CPT_ANY,
123                                        sbi->ll_ra_info.ra_async_max_active);
124         if (IS_ERR(sbi->ll_ra_info.ll_readahead_wq))
125                 GOTO(out_pcc, rc = PTR_ERR(sbi->ll_ra_info.ll_readahead_wq));
126
127         /* initialize ll_cache data */
128         sbi->ll_cache = cl_cache_init(lru_page_max);
129         if (sbi->ll_cache == NULL)
130                 GOTO(out_destroy_ra, rc = -ENOMEM);
131
132         sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
133                                                     SBI_DEFAULT_READ_AHEAD_MAX);
134         sbi->ll_ra_info.ra_async_pages_per_file_threshold =
135                                 sbi->ll_ra_info.ra_max_pages_per_file;
136         sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
137         sbi->ll_ra_info.ra_max_read_ahead_whole_pages = -1;
138         atomic_set(&sbi->ll_ra_info.ra_async_inflight, 0);
139
140         sbi->ll_flags |= LL_SBI_VERBOSE;
141 #ifdef ENABLE_CHECKSUM
142         sbi->ll_flags |= LL_SBI_CHECKSUM;
143 #endif
144 #ifdef ENABLE_FLOCK
145         sbi->ll_flags |= LL_SBI_FLOCK;
146 #endif
147
148 #ifdef HAVE_LRU_RESIZE_SUPPORT
149         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
150 #endif
151         sbi->ll_flags |= LL_SBI_LAZYSTATFS;
152
153         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
154                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
155                                pp_r_hist.oh_lock);
156                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
157                                pp_w_hist.oh_lock);
158         }
159
160         /* metadata statahead is enabled by default */
161         sbi->ll_sa_running_max = LL_SA_RUNNING_DEF;
162         sbi->ll_sa_max = LL_SA_RPC_DEF;
163         atomic_set(&sbi->ll_sa_total, 0);
164         atomic_set(&sbi->ll_sa_wrong, 0);
165         atomic_set(&sbi->ll_sa_running, 0);
166         atomic_set(&sbi->ll_agl_total, 0);
167         sbi->ll_flags |= LL_SBI_AGL_ENABLED;
168         sbi->ll_flags |= LL_SBI_FAST_READ;
169         sbi->ll_flags |= LL_SBI_TINY_WRITE;
170         ll_sbi_set_encrypt(sbi, true);
171
172         /* root squash */
173         sbi->ll_squash.rsi_uid = 0;
174         sbi->ll_squash.rsi_gid = 0;
175         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
176         spin_lock_init(&sbi->ll_squash.rsi_lock);
177
178         /* Per-filesystem file heat */
179         sbi->ll_heat_decay_weight = SBI_DEFAULT_HEAT_DECAY_WEIGHT;
180         sbi->ll_heat_period_second = SBI_DEFAULT_HEAT_PERIOD_SECOND;
181         RETURN(sbi);
182 out_destroy_ra:
183         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
184 out_pcc:
185         pcc_super_fini(&sbi->ll_pcc_super);
186 out_sbi:
187         OBD_FREE_PTR(sbi);
188         RETURN(ERR_PTR(rc));
189 }
190
191 static void ll_free_sbi(struct super_block *sb)
192 {
193         struct ll_sb_info *sbi = ll_s2sbi(sb);
194         ENTRY;
195
196         if (sbi != NULL) {
197                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
198                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
199                 if (sbi->ll_ra_info.ll_readahead_wq)
200                         destroy_workqueue(sbi->ll_ra_info.ll_readahead_wq);
201                 if (sbi->ll_cache != NULL) {
202                         cl_cache_decref(sbi->ll_cache);
203                         sbi->ll_cache = NULL;
204                 }
205                 pcc_super_fini(&sbi->ll_pcc_super);
206                 OBD_FREE(sbi, sizeof(*sbi));
207         }
208         EXIT;
209 }
210
211 static int client_common_fill_super(struct super_block *sb, char *md, char *dt)
212 {
213         struct inode *root = NULL;
214         struct ll_sb_info *sbi = ll_s2sbi(sb);
215         struct obd_statfs *osfs = NULL;
216         struct ptlrpc_request *request = NULL;
217         struct obd_connect_data *data = NULL;
218         struct obd_uuid *uuid;
219         struct md_op_data *op_data;
220         struct lustre_md lmd;
221         u64 valid;
222         int size, err, checksum;
223
224         ENTRY;
225         sbi->ll_md_obd = class_name2obd(md);
226         if (!sbi->ll_md_obd) {
227                 CERROR("MD %s: not setup or attached\n", md);
228                 RETURN(-EINVAL);
229         }
230
231         OBD_ALLOC_PTR(data);
232         if (data == NULL)
233                 RETURN(-ENOMEM);
234
235         OBD_ALLOC_PTR(osfs);
236         if (osfs == NULL) {
237                 OBD_FREE_PTR(data);
238                 RETURN(-ENOMEM);
239         }
240
241         /* pass client page size via ocd_grant_blkbits, the server should report
242          * back its backend blocksize for grant calculation purpose */
243         data->ocd_grant_blkbits = PAGE_SHIFT;
244
245         /* indicate MDT features supported by this client */
246         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
247                                   OBD_CONNECT_ATTRFID  | OBD_CONNECT_GRANT |
248                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
249                                   OBD_CONNECT_SRVLOCK  | OBD_CONNECT_TRUNCLOCK|
250                                   OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
251                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
252                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
253                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
254                                   OBD_CONNECT_64BITHASH |
255                                   OBD_CONNECT_EINPROGRESS |
256                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
257                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS|
258                                   OBD_CONNECT_MAX_EASIZE |
259                                   OBD_CONNECT_FLOCK_DEAD |
260                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
261                                   OBD_CONNECT_OPEN_BY_FID |
262                                   OBD_CONNECT_DIR_STRIPE |
263                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_CKSUM |
264                                   OBD_CONNECT_SUBTREE |
265                                   OBD_CONNECT_MULTIMODRPCS |
266                                   OBD_CONNECT_GRANT_PARAM |
267                                   OBD_CONNECT_SHORTIO | OBD_CONNECT_FLAGS2;
268
269         data->ocd_connect_flags2 = OBD_CONNECT2_DIR_MIGRATE |
270                                    OBD_CONNECT2_SUM_STATFS |
271                                    OBD_CONNECT2_OVERSTRIPING |
272                                    OBD_CONNECT2_FLR |
273                                    OBD_CONNECT2_LOCK_CONVERT |
274                                    OBD_CONNECT2_ARCHIVE_ID_ARRAY |
275                                    OBD_CONNECT2_INC_XID |
276                                    OBD_CONNECT2_LSOM |
277                                    OBD_CONNECT2_ASYNC_DISCARD |
278                                    OBD_CONNECT2_PCC |
279                                    OBD_CONNECT2_CRUSH;
280
281 #ifdef HAVE_LRU_RESIZE_SUPPORT
282         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
283                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
284 #endif
285         data->ocd_connect_flags |= OBD_CONNECT_ACL_FLAGS;
286
287         data->ocd_cksum_types = obd_cksum_types_supported_client();
288
289         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
290                 /* flag mdc connection as lightweight, only used for test
291                  * purpose, use with care */
292                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
293
294         data->ocd_ibits_known = MDS_INODELOCK_FULL;
295         data->ocd_version = LUSTRE_VERSION_CODE;
296
297         if (sb->s_flags & SB_RDONLY)
298                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
299         if (sbi->ll_flags & LL_SBI_USER_XATTR)
300                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
301
302 #ifdef SB_NOSEC
303         /* Setting this indicates we correctly support S_NOSEC (See kernel
304          * commit 9e1f1de02c2275d7172e18dc4e7c2065777611bf)
305          */
306         sb->s_flags |= SB_NOSEC;
307 #endif
308
309         if (sbi->ll_flags & LL_SBI_FLOCK)
310                 sbi->ll_fop = &ll_file_operations_flock;
311         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
312                 sbi->ll_fop = &ll_file_operations;
313         else
314                 sbi->ll_fop = &ll_file_operations_noflock;
315
316         /* always ping even if server suppress_pings */
317         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
318                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
319
320         obd_connect_set_secctx(data);
321         if (ll_sbi_has_encrypt(sbi))
322                 obd_connect_set_enc(data);
323
324 #if defined(CONFIG_SECURITY)
325         data->ocd_connect_flags2 |= OBD_CONNECT2_SELINUX_POLICY;
326 #endif
327
328         data->ocd_brw_size = MD_MAX_BRW_SIZE;
329
330         err = obd_connect(NULL, &sbi->ll_md_exp, sbi->ll_md_obd,
331                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
332         if (err == -EBUSY) {
333                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
334                                    "recovery, of which this client is not a "
335                                    "part. Please wait for recovery to complete,"
336                                    " abort, or time out.\n", md);
337                 GOTO(out, err);
338         } else if (err) {
339                 CERROR("cannot connect to %s: rc = %d\n", md, err);
340                 GOTO(out, err);
341         }
342
343         sbi->ll_md_exp->exp_connect_data = *data;
344
345         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
346                            LUSTRE_SEQ_METADATA);
347         if (err) {
348                 CERROR("%s: Can't init metadata layer FID infrastructure, "
349                        "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
350                 GOTO(out_md, err);
351         }
352
353         /* For mount, we only need fs info from MDT0, and also in DNE, it
354          * can make sure the client can be mounted as long as MDT0 is
355          * avaible */
356         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
357                         ktime_get_seconds() - sbi->ll_statfs_max_age,
358                         OBD_STATFS_FOR_MDT0);
359         if (err)
360                 GOTO(out_md_fid, err);
361
362         /* This needs to be after statfs to ensure connect has finished.
363          * Note that "data" does NOT contain the valid connect reply.
364          * If connecting to a 1.8 server there will be no LMV device, so
365          * we can access the MDC export directly and exp_connect_flags will
366          * be non-zero, but if accessing an upgraded 2.1 server it will
367          * have the correct flags filled in.
368          * XXX: fill in the LMV exp_connect_flags from MDC(s). */
369         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
370         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
371             valid != CLIENT_CONNECT_MDT_REQD) {
372                 char *buf;
373
374                 OBD_ALLOC_WAIT(buf, PAGE_SIZE);
375                 obd_connect_flags2str(buf, PAGE_SIZE,
376                                       valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
377                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
378                                    "feature(s) needed for correct operation "
379                                    "of this client (%s). Please upgrade "
380                                    "server or downgrade client.\n",
381                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
382                 OBD_FREE(buf, PAGE_SIZE);
383                 GOTO(out_md_fid, err = -EPROTO);
384         }
385
386         size = sizeof(*data);
387         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
388                            KEY_CONN_DATA,  &size, data);
389         if (err) {
390                 CERROR("%s: Get connect data failed: rc = %d\n",
391                        sbi->ll_md_exp->exp_obd->obd_name, err);
392                 GOTO(out_md_fid, err);
393         }
394
395         LASSERT(osfs->os_bsize);
396         sb->s_blocksize = osfs->os_bsize;
397         sb->s_blocksize_bits = log2(osfs->os_bsize);
398         sb->s_magic = LL_SUPER_MAGIC;
399         sb->s_maxbytes = MAX_LFS_FILESIZE;
400         sbi->ll_namelen = osfs->os_namelen;
401         sbi->ll_mnt.mnt = current->fs->root.mnt;
402
403         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
404             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
405                 LCONSOLE_INFO("Disabling user_xattr feature because "
406                               "it is not supported on the server\n");
407                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
408         }
409
410         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
411 #ifdef SB_POSIXACL
412                 sb->s_flags |= SB_POSIXACL;
413 #endif
414                 sbi->ll_flags |= LL_SBI_ACL;
415         } else {
416                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
417 #ifdef SB_POSIXACL
418                 sb->s_flags &= ~SB_POSIXACL;
419 #endif
420                 sbi->ll_flags &= ~LL_SBI_ACL;
421         }
422
423         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
424                 sbi->ll_flags |= LL_SBI_64BIT_HASH;
425
426         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
427                 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
428
429         if (obd_connect_has_secctx(data))
430                 sbi->ll_flags |= LL_SBI_FILE_SECCTX;
431
432         if (ll_sbi_has_encrypt(sbi) && !obd_connect_has_enc(data)) {
433                 if (ll_sbi_has_test_dummy_encryption(sbi))
434                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
435                                       sbi->ll_fsname,
436                                       sbi->ll_md_exp->exp_obd->obd_name);
437                 ll_sbi_set_encrypt(sbi, false);
438         }
439
440         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
441                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
442                         LCONSOLE_INFO("%s: disabling xattr cache due to "
443                                       "unknown maximum xattr size.\n", dt);
444                 } else if (!sbi->ll_xattr_cache_set) {
445                         /* If xattr_cache is already set (no matter 0 or 1)
446                          * during processing llog, it won't be enabled here. */
447                         sbi->ll_flags |= LL_SBI_XATTR_CACHE;
448                         sbi->ll_xattr_cache_enabled = 1;
449                 }
450         }
451
452         sbi->ll_dt_obd = class_name2obd(dt);
453         if (!sbi->ll_dt_obd) {
454                 CERROR("DT %s: not setup or attached\n", dt);
455                 GOTO(out_md_fid, err = -ENODEV);
456         }
457
458         /* pass client page size via ocd_grant_blkbits, the server should report
459          * back its backend blocksize for grant calculation purpose */
460         data->ocd_grant_blkbits = PAGE_SHIFT;
461
462         /* indicate OST features supported by this client */
463         data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
464                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
465                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
466                                   OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
467                                   OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
468                                   OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
469                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
470                                   OBD_CONNECT_EINPROGRESS |
471                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
472                                   OBD_CONNECT_LAYOUTLOCK |
473                                   OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
474                                   OBD_CONNECT_BULK_MBITS | OBD_CONNECT_SHORTIO |
475                                   OBD_CONNECT_FLAGS2 | OBD_CONNECT_GRANT_SHRINK;
476         data->ocd_connect_flags2 = OBD_CONNECT2_LOCKAHEAD |
477                                    OBD_CONNECT2_INC_XID;
478
479         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_GRANT_PARAM))
480                 data->ocd_connect_flags |= OBD_CONNECT_GRANT_PARAM;
481
482         /* OBD_CONNECT_CKSUM should always be set, even if checksums are
483          * disabled by default, because it can still be enabled on the
484          * fly via /sys. As a consequence, we still need to come to an
485          * agreement on the supported algorithms at connect time
486          */
487         data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
488
489         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
490                 data->ocd_cksum_types = OBD_CKSUM_ADLER;
491         else
492                 data->ocd_cksum_types = obd_cksum_types_supported_client();
493
494 #ifdef HAVE_LRU_RESIZE_SUPPORT
495         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
496 #endif
497         /* always ping even if server suppress_pings */
498         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
499                 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
500
501         if (ll_sbi_has_encrypt(sbi))
502                 obd_connect_set_enc(data);
503
504         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
505                "ocd_grant: %d\n", data->ocd_connect_flags,
506                data->ocd_version, data->ocd_grant);
507
508         sbi->ll_dt_obd->obd_upcall.onu_owner = &sbi->ll_lco;
509         sbi->ll_dt_obd->obd_upcall.onu_upcall = cl_ocd_update;
510
511         data->ocd_brw_size = DT_MAX_BRW_SIZE;
512
513         err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,
514                           &sbi->ll_sb_uuid, data, sbi->ll_cache);
515         if (err == -EBUSY) {
516                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
517                                    "recovery, of which this client is not a "
518                                    "part.  Please wait for recovery to "
519                                    "complete, abort, or time out.\n", dt);
520                 GOTO(out_md, err);
521         } else if (err) {
522                 CERROR("%s: Cannot connect to %s: rc = %d\n",
523                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
524                 GOTO(out_md, err);
525         }
526
527         if (ll_sbi_has_encrypt(sbi) &&
528             !obd_connect_has_enc(&sbi->ll_dt_obd->u.lov.lov_ocd)) {
529                 if (ll_sbi_has_test_dummy_encryption(sbi))
530                         LCONSOLE_WARN("%s: server %s does not support encryption feature, encryption deactivated.\n",
531                                       sbi->ll_fsname, dt);
532                 ll_sbi_set_encrypt(sbi, false);
533         } else if (ll_sbi_has_test_dummy_encryption(sbi)) {
534                 LCONSOLE_WARN("Test dummy encryption mode enabled\n");
535         }
536
537         sbi->ll_dt_exp->exp_connect_data = *data;
538
539         /* Don't change value if it was specified in the config log */
540         if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages == -1) {
541                 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
542                         max_t(unsigned long, SBI_DEFAULT_READ_AHEAD_WHOLE_MAX,
543                               (data->ocd_brw_size >> PAGE_SHIFT));
544                 if (sbi->ll_ra_info.ra_max_read_ahead_whole_pages >
545                     sbi->ll_ra_info.ra_max_pages_per_file)
546                         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
547                                 sbi->ll_ra_info.ra_max_pages_per_file;
548         }
549
550         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
551                            LUSTRE_SEQ_METADATA);
552         if (err) {
553                 CERROR("%s: Can't init data layer FID infrastructure, "
554                        "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
555                 GOTO(out_dt, err);
556         }
557
558         mutex_lock(&sbi->ll_lco.lco_lock);
559         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
560         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
561         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
562         mutex_unlock(&sbi->ll_lco.lco_lock);
563
564         fid_zero(&sbi->ll_root_fid);
565         err = md_get_root(sbi->ll_md_exp, get_mount_fileset(sb),
566                            &sbi->ll_root_fid);
567         if (err) {
568                 CERROR("cannot mds_connect: rc = %d\n", err);
569                 GOTO(out_lock_cn_cb, err);
570         }
571         if (!fid_is_sane(&sbi->ll_root_fid)) {
572                 CERROR("%s: Invalid root fid "DFID" during mount\n",
573                        sbi->ll_md_exp->exp_obd->obd_name,
574                        PFID(&sbi->ll_root_fid));
575                 GOTO(out_lock_cn_cb, err = -EINVAL);
576         }
577         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
578
579         sb->s_op = &lustre_super_operations;
580         sb->s_xattr = ll_xattr_handlers;
581 #if THREAD_SIZE >= 8192 /*b=17630*/
582         sb->s_export_op = &lustre_export_operations;
583 #endif
584 #ifdef HAVE_LUSTRE_CRYPTO
585         llcrypt_set_ops(sb, &lustre_cryptops);
586 #endif
587
588         /* make root inode
589          * XXX: move this to after cbd setup? */
590         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
591         if (sbi->ll_flags & LL_SBI_ACL)
592                 valid |= OBD_MD_FLACL;
593
594         OBD_ALLOC_PTR(op_data);
595         if (op_data == NULL)
596                 GOTO(out_lock_cn_cb, err = -ENOMEM);
597
598         op_data->op_fid1 = sbi->ll_root_fid;
599         op_data->op_mode = 0;
600         op_data->op_valid = valid;
601
602         err = md_getattr(sbi->ll_md_exp, op_data, &request);
603
604         OBD_FREE_PTR(op_data);
605         if (err) {
606                 CERROR("%s: md_getattr failed for root: rc = %d\n",
607                        sbi->ll_md_exp->exp_obd->obd_name, err);
608                 GOTO(out_lock_cn_cb, err);
609         }
610
611         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
612                                sbi->ll_md_exp, &lmd);
613         if (err) {
614                 CERROR("failed to understand root inode md: rc = %d\n", err);
615                 ptlrpc_req_finished(request);
616                 GOTO(out_lock_cn_cb, err);
617         }
618
619         LASSERT(fid_is_sane(&sbi->ll_root_fid));
620         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
621                                             sbi->ll_flags & LL_SBI_32BIT_API),
622                        &lmd);
623         md_free_lustre_md(sbi->ll_md_exp, &lmd);
624         ptlrpc_req_finished(request);
625
626         if (IS_ERR(root)) {
627                 lmd_clear_acl(&lmd);
628                 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
629                 root = NULL;
630                 CERROR("%s: bad ll_iget() for root: rc = %d\n",
631                        sbi->ll_fsname, err);
632                 GOTO(out_root, err);
633         }
634
635         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
636         if (sbi->ll_checksum_set) {
637                 err = obd_set_info_async(NULL, sbi->ll_dt_exp,
638                                          sizeof(KEY_CHECKSUM), KEY_CHECKSUM,
639                                          sizeof(checksum), &checksum, NULL);
640                 if (err) {
641                         CERROR("%s: Set checksum failed: rc = %d\n",
642                                sbi->ll_dt_exp->exp_obd->obd_name, err);
643                         GOTO(out_root, err);
644                 }
645         }
646         cl_sb_init(sb);
647
648         sb->s_root = d_make_root(root);
649         if (sb->s_root == NULL) {
650                 err = -ENOMEM;
651                 CERROR("%s: can't make root dentry: rc = %d\n",
652                        sbi->ll_fsname, err);
653                 GOTO(out_root, err);
654         }
655
656         sbi->ll_sdev_orig = sb->s_dev;
657
658         /* We set sb->s_dev equal on all lustre clients in order to support
659          * NFS export clustering.  NFSD requires that the FSID be the same
660          * on all clients. */
661         /* s_dev is also used in lt_compare() to compare two fs, but that is
662          * only a node-local comparison. */
663         uuid = obd_get_uuid(sbi->ll_md_exp);
664         if (uuid != NULL)
665                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
666
667         if (data != NULL)
668                 OBD_FREE_PTR(data);
669         if (osfs != NULL)
670                 OBD_FREE_PTR(osfs);
671
672         if (sbi->ll_dt_obd) {
673                 err = sysfs_create_link(&sbi->ll_kset.kobj,
674                                         &sbi->ll_dt_obd->obd_kset.kobj,
675                                         sbi->ll_dt_obd->obd_type->typ_name);
676                 if (err < 0) {
677                         CERROR("%s: could not register %s in llite: rc = %d\n",
678                                dt, sbi->ll_fsname, err);
679                         err = 0;
680                 }
681         }
682
683         if (sbi->ll_md_obd) {
684                 err = sysfs_create_link(&sbi->ll_kset.kobj,
685                                         &sbi->ll_md_obd->obd_kset.kobj,
686                                         sbi->ll_md_obd->obd_type->typ_name);
687                 if (err < 0) {
688                         CERROR("%s: could not register %s in llite: rc = %d\n",
689                                md, sbi->ll_fsname, err);
690                         err = 0;
691                 }
692         }
693
694         RETURN(err);
695 out_root:
696         if (root)
697                 iput(root);
698 out_lock_cn_cb:
699         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
700 out_dt:
701         obd_disconnect(sbi->ll_dt_exp);
702         sbi->ll_dt_exp = NULL;
703         sbi->ll_dt_obd = NULL;
704 out_md_fid:
705         obd_fid_fini(sbi->ll_md_exp->exp_obd);
706 out_md:
707         obd_disconnect(sbi->ll_md_exp);
708         sbi->ll_md_exp = NULL;
709         sbi->ll_md_obd = NULL;
710 out:
711         if (data != NULL)
712                 OBD_FREE_PTR(data);
713         if (osfs != NULL)
714                 OBD_FREE_PTR(osfs);
715         return err;
716 }
717
718 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
719 {
720         int size, rc;
721
722         size = sizeof(*lmmsize);
723         rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
724                           KEY_MAX_EASIZE, &size, lmmsize);
725         if (rc != 0) {
726                 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
727                        sbi->ll_dt_exp->exp_obd->obd_name, rc);
728                 RETURN(rc);
729         }
730
731         CDEBUG(D_INFO, "max LOV ea size: %d\n", *lmmsize);
732
733         size = sizeof(int);
734         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
735                           KEY_MAX_EASIZE, &size, lmmsize);
736         if (rc)
737                 CERROR("Get max mdsize error rc %d\n", rc);
738
739         CDEBUG(D_INFO, "max LMV ea size: %d\n", *lmmsize);
740
741         RETURN(rc);
742 }
743
744 /**
745  * Get the value of the default_easize parameter.
746  *
747  * \see client_obd::cl_default_mds_easize
748  *
749  * \param[in] sbi       superblock info for this filesystem
750  * \param[out] lmmsize  pointer to storage location for value
751  *
752  * \retval 0            on success
753  * \retval negative     negated errno on failure
754  */
755 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
756 {
757         int size, rc;
758
759         size = sizeof(int);
760         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
761                          KEY_DEFAULT_EASIZE, &size, lmmsize);
762         if (rc)
763                 CERROR("Get default mdsize error rc %d\n", rc);
764
765         RETURN(rc);
766 }
767
768 /**
769  * Set the default_easize parameter to the given value.
770  *
771  * \see client_obd::cl_default_mds_easize
772  *
773  * \param[in] sbi       superblock info for this filesystem
774  * \param[in] lmmsize   the size to set
775  *
776  * \retval 0            on success
777  * \retval negative     negated errno on failure
778  */
779 int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
780 {
781         int rc;
782
783         if (lmmsize < sizeof(struct lov_mds_md) ||
784             lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
785                 return -EINVAL;
786
787         rc = obd_set_info_async(NULL, sbi->ll_md_exp,
788                                 sizeof(KEY_DEFAULT_EASIZE), KEY_DEFAULT_EASIZE,
789                                 sizeof(int), &lmmsize, NULL);
790
791         RETURN(rc);
792 }
793
794 static void client_common_put_super(struct super_block *sb)
795 {
796         struct ll_sb_info *sbi = ll_s2sbi(sb);
797         ENTRY;
798
799         cl_sb_fini(sb);
800
801         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
802         obd_disconnect(sbi->ll_dt_exp);
803         sbi->ll_dt_exp = NULL;
804
805         ll_debugfs_unregister_super(sb);
806
807         obd_fid_fini(sbi->ll_md_exp->exp_obd);
808         obd_disconnect(sbi->ll_md_exp);
809         sbi->ll_md_exp = NULL;
810
811         EXIT;
812 }
813
814 void ll_kill_super(struct super_block *sb)
815 {
816         struct ll_sb_info *sbi;
817         ENTRY;
818
819         /* not init sb ?*/
820         if (!(sb->s_flags & SB_ACTIVE))
821                 return;
822
823         sbi = ll_s2sbi(sb);
824         /* we need restore s_dev from changed for clustred NFS before put_super
825          * because new kernels have cached s_dev and change sb->s_dev in
826          * put_super not affected real removing devices */
827         if (sbi) {
828                 sb->s_dev = sbi->ll_sdev_orig;
829
830                 /* wait running statahead threads to quit */
831                 while (atomic_read(&sbi->ll_sa_running) > 0)
832                         schedule_timeout_uninterruptible(
833                                 cfs_time_seconds(1) >> 3);
834         }
835
836         EXIT;
837 }
838
839 static inline int ll_set_opt(const char *opt, char *data, int fl)
840 {
841         if (strncmp(opt, data, strlen(opt)) != 0)
842                 return 0;
843         else
844                 return fl;
845 }
846
847 /* non-client-specific mount options are parsed in lmd_parse */
848 static int ll_options(char *options, struct ll_sb_info *sbi)
849 {
850         int tmp;
851         char *s1 = options, *s2;
852         int *flags = &sbi->ll_flags;
853         ENTRY;
854
855         if (!options)
856                 RETURN(0);
857
858         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
859
860         while (*s1) {
861                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
862                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
863                 if (tmp) {
864                         *flags |= tmp;
865                         goto next;
866                 }
867                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
868                 if (tmp) {
869                         *flags = (*flags & ~LL_SBI_LOCALFLOCK) | tmp;
870                         goto next;
871                 }
872                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
873                 if (tmp) {
874                         *flags = (*flags & ~LL_SBI_FLOCK) | tmp;
875                         goto next;
876                 }
877                 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
878                 if (tmp) {
879                         *flags &= ~tmp;
880                         goto next;
881                 }
882                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
883                 if (tmp) {
884                         *flags |= tmp;
885                         goto next;
886                 }
887                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
888                 if (tmp) {
889                         *flags &= ~tmp;
890                         goto next;
891                 }
892                 tmp = ll_set_opt("context", s1, 1);
893                 if (tmp)
894                         goto next;
895                 tmp = ll_set_opt("fscontext", s1, 1);
896                 if (tmp)
897                         goto next;
898                 tmp = ll_set_opt("defcontext", s1, 1);
899                 if (tmp)
900                         goto next;
901                 tmp = ll_set_opt("rootcontext", s1, 1);
902                 if (tmp)
903                         goto next;
904                 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
905                 if (tmp) {
906                         *flags |= tmp;
907                         goto next;
908                 }
909                 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
910                 if (tmp) {
911                         *flags &= ~tmp;
912                         goto next;
913                 }
914
915                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
916                 if (tmp) {
917                         *flags |= tmp;
918                         sbi->ll_checksum_set = 1;
919                         goto next;
920                 }
921                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
922                 if (tmp) {
923                         *flags &= ~tmp;
924                         sbi->ll_checksum_set = 1;
925                         goto next;
926                 }
927                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
928                 if (tmp) {
929                         *flags |= tmp;
930                         goto next;
931                 }
932                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
933                 if (tmp) {
934                         *flags &= ~tmp;
935                         goto next;
936                 }
937                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
938                 if (tmp) {
939                         *flags |= tmp;
940                         goto next;
941                 }
942                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
943                 if (tmp) {
944                         *flags &= ~tmp;
945                         goto next;
946                 }
947                 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
948                 if (tmp) {
949                         *flags |= tmp;
950                         goto next;
951                 }
952                 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
953                 if (tmp) {
954                         *flags |= tmp;
955                         goto next;
956                 }
957                 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
958                 if (tmp) {
959                         *flags &= ~tmp;
960                         goto next;
961                 }
962                 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
963                 if (tmp) {
964                         *flags |= tmp;
965                         goto next;
966                 }
967                 tmp = ll_set_opt("test_dummy_encryption", s1,
968                                  LL_SBI_TEST_DUMMY_ENCRYPTION);
969                 if (tmp) {
970 #ifdef HAVE_LUSTRE_CRYPTO
971                         *flags |= tmp;
972 #else
973                         LCONSOLE_WARN("Test dummy encryption mount option ignored: encryption not supported\n");
974 #endif
975                         goto next;
976                 }
977                 tmp = ll_set_opt("noencrypt", s1, LL_SBI_ENCRYPT);
978                 if (tmp) {
979 #ifdef HAVE_LUSTRE_CRYPTO
980                         *flags &= ~tmp;
981 #else
982                         LCONSOLE_WARN("noencrypt mount option ignored: encryption not supported\n");
983 #endif
984                         goto next;
985                 }
986                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
987                                    s1);
988                 RETURN(-EINVAL);
989
990 next:
991                 /* Find next opt */
992                 s2 = strchr(s1, ',');
993                 if (s2 == NULL)
994                         break;
995                 s1 = s2 + 1;
996         }
997         RETURN(0);
998 }
999
1000 void ll_lli_init(struct ll_inode_info *lli)
1001 {
1002         lli->lli_inode_magic = LLI_INODE_MAGIC;
1003         lli->lli_flags = 0;
1004         spin_lock_init(&lli->lli_lock);
1005         lli->lli_posix_acl = NULL;
1006         /* Do not set lli_fid, it has been initialized already. */
1007         fid_zero(&lli->lli_pfid);
1008         lli->lli_mds_read_och = NULL;
1009         lli->lli_mds_write_och = NULL;
1010         lli->lli_mds_exec_och = NULL;
1011         lli->lli_open_fd_read_count = 0;
1012         lli->lli_open_fd_write_count = 0;
1013         lli->lli_open_fd_exec_count = 0;
1014         mutex_init(&lli->lli_och_mutex);
1015         spin_lock_init(&lli->lli_agl_lock);
1016         spin_lock_init(&lli->lli_layout_lock);
1017         ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
1018         lli->lli_clob = NULL;
1019
1020         init_rwsem(&lli->lli_xattrs_list_rwsem);
1021         mutex_init(&lli->lli_xattrs_enq_lock);
1022
1023         LASSERT(lli->lli_vfs_inode.i_mode != 0);
1024         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
1025                 lli->lli_opendir_key = NULL;
1026                 lli->lli_sai = NULL;
1027                 spin_lock_init(&lli->lli_sa_lock);
1028                 lli->lli_opendir_pid = 0;
1029                 lli->lli_sa_enabled = 0;
1030                 init_rwsem(&lli->lli_lsm_sem);
1031         } else {
1032                 mutex_init(&lli->lli_size_mutex);
1033                 mutex_init(&lli->lli_setattr_mutex);
1034                 lli->lli_symlink_name = NULL;
1035                 ll_trunc_sem_init(&lli->lli_trunc_sem);
1036                 range_lock_tree_init(&lli->lli_write_tree);
1037                 init_rwsem(&lli->lli_glimpse_sem);
1038                 lli->lli_glimpse_time = ktime_set(0, 0);
1039                 INIT_LIST_HEAD(&lli->lli_agl_list);
1040                 lli->lli_agl_index = 0;
1041                 lli->lli_async_rc = 0;
1042                 spin_lock_init(&lli->lli_heat_lock);
1043                 obd_heat_clear(lli->lli_heat_instances, OBD_HEAT_COUNT);
1044                 lli->lli_heat_flags = 0;
1045                 mutex_init(&lli->lli_pcc_lock);
1046                 lli->lli_pcc_state = PCC_STATE_FL_NONE;
1047                 lli->lli_pcc_inode = NULL;
1048                 lli->lli_pcc_dsflags = PCC_DATASET_INVALID;
1049                 lli->lli_pcc_generation = 0;
1050                 mutex_init(&lli->lli_group_mutex);
1051                 lli->lli_group_users = 0;
1052                 lli->lli_group_gid = 0;
1053         }
1054         mutex_init(&lli->lli_layout_mutex);
1055         memset(lli->lli_jobid, 0, sizeof(lli->lli_jobid));
1056 }
1057
1058 #define MAX_STRING_SIZE 128
1059
1060 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1061
1062 #define LSI_BDI_INITIALIZED     0x00400000
1063
1064 #ifndef HAVE_BDI_CAP_MAP_COPY
1065 # define BDI_CAP_MAP_COPY       0
1066 #endif
1067
1068 static int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1069 {
1070         struct  lustre_sb_info *lsi = s2lsi(sb);
1071         char buf[MAX_STRING_SIZE];
1072         va_list args;
1073         int err;
1074
1075         err = bdi_init(&lsi->lsi_bdi);
1076         if (err)
1077                 return err;
1078
1079         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1080         lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1081         lsi->lsi_bdi.name = "lustre";
1082         va_start(args, fmt);
1083         vsnprintf(buf, MAX_STRING_SIZE, fmt, args);
1084         va_end(args);
1085         err = bdi_register(&lsi->lsi_bdi, NULL, "%s", buf);
1086         va_end(args);
1087         if (!err)
1088                 sb->s_bdi = &lsi->lsi_bdi;
1089
1090         return err;
1091 }
1092 #endif /* !HAVE_SUPER_SETUP_BDI_NAME */
1093
1094 int ll_fill_super(struct super_block *sb)
1095 {
1096         struct  lustre_profile *lprof = NULL;
1097         struct  lustre_sb_info *lsi = s2lsi(sb);
1098         struct  ll_sb_info *sbi = NULL;
1099         char    *dt = NULL, *md = NULL;
1100         char    *profilenm = get_profile_name(sb);
1101         struct config_llog_instance *cfg;
1102         /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
1103         const int instlen = LUSTRE_MAXINSTANCE + 2;
1104         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1105         char name[MAX_STRING_SIZE];
1106         int md_len = 0;
1107         int dt_len = 0;
1108         uuid_t uuid;
1109         char *ptr;
1110         int len;
1111         int err;
1112
1113         ENTRY;
1114         /* for ASLR, to map between cfg_instance and hashed ptr */
1115         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1116                profilenm, cfg_instance, sb);
1117
1118         OBD_ALLOC_PTR(cfg);
1119         if (cfg == NULL)
1120                 GOTO(out_free_cfg, err = -ENOMEM);
1121
1122         /* client additional sb info */
1123         lsi->lsi_llsbi = sbi = ll_init_sbi();
1124         if (IS_ERR(sbi))
1125                 GOTO(out_free_cfg, err = PTR_ERR(sbi));
1126
1127         err = ll_options(lsi->lsi_lmd->lmd_opts, sbi);
1128         if (err)
1129                 GOTO(out_free_cfg, err);
1130
1131         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1132         sb->s_d_op = &ll_d_ops;
1133
1134         /* UUID handling */
1135         generate_random_uuid(uuid.b);
1136         snprintf(sbi->ll_sb_uuid.uuid, sizeof(sbi->ll_sb_uuid), "%pU", uuid.b);
1137
1138         CDEBUG(D_CONFIG, "llite sb uuid: %s\n", sbi->ll_sb_uuid.uuid);
1139
1140         /* Get fsname */
1141         len = strlen(profilenm);
1142         ptr = strrchr(profilenm, '-');
1143         if (ptr && (strcmp(ptr, "-client") == 0))
1144                 len -= 7;
1145
1146         if (len > LUSTRE_MAXFSNAME) {
1147                 if (unlikely(len >= MAX_STRING_SIZE))
1148                         len = MAX_STRING_SIZE - 1;
1149                 strncpy(name, profilenm, len);
1150                 name[len] = '\0';
1151                 err = -ENAMETOOLONG;
1152                 CERROR("%s: fsname longer than %u characters: rc = %d\n",
1153                        name, LUSTRE_MAXFSNAME, err);
1154                 GOTO(out_free_cfg, err);
1155         }
1156         strncpy(sbi->ll_fsname, profilenm, len);
1157         sbi->ll_fsname[len] = '\0';
1158
1159         /* Mount info */
1160         snprintf(name, sizeof(name), "%.*s-%016lx", len,
1161                  profilenm, cfg_instance);
1162
1163         err = super_setup_bdi_name(sb, "%s", name);
1164         if (err)
1165                 GOTO(out_free_cfg, err);
1166
1167         /* Call ll_debugfs_register_super() before lustre_process_log()
1168          * so that "llite.*.*" params can be processed correctly.
1169          */
1170         err = ll_debugfs_register_super(sb, name);
1171         if (err < 0) {
1172                 CERROR("%s: could not register mountpoint in llite: rc = %d\n",
1173                        sbi->ll_fsname, err);
1174                 err = 0;
1175         }
1176
1177         /* The cfg_instance is a value unique to this super, in case some
1178          * joker tries to mount the same fs at two mount points.
1179          */
1180         cfg->cfg_instance = cfg_instance;
1181         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1182         cfg->cfg_callback = class_config_llog_handler;
1183         cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
1184         /* set up client obds */
1185         err = lustre_process_log(sb, profilenm, cfg);
1186         if (err < 0)
1187                 GOTO(out_debugfs, err);
1188
1189         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1190         lprof = class_get_profile(profilenm);
1191         if (lprof == NULL) {
1192                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1193                                    " read from the MGS.  Does that filesystem "
1194                                    "exist?\n", profilenm);
1195                 GOTO(out_debugfs, err = -EINVAL);
1196         }
1197         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1198                lprof->lp_md, lprof->lp_dt);
1199
1200         dt_len = strlen(lprof->lp_dt) + instlen + 2;
1201         OBD_ALLOC(dt, dt_len);
1202         if (!dt)
1203                 GOTO(out_profile, err = -ENOMEM);
1204         snprintf(dt, dt_len - 1, "%s-%016lx", lprof->lp_dt, cfg_instance);
1205
1206         md_len = strlen(lprof->lp_md) + instlen + 2;
1207         OBD_ALLOC(md, md_len);
1208         if (!md)
1209                 GOTO(out_free_dt, err = -ENOMEM);
1210         snprintf(md, md_len - 1, "%s-%016lx", lprof->lp_md, cfg_instance);
1211
1212         /* connections, registrations, sb setup */
1213         err = client_common_fill_super(sb, md, dt);
1214         if (err < 0)
1215                 GOTO(out_free_md, err);
1216
1217         sbi->ll_client_common_fill_super_succeeded = 1;
1218
1219 out_free_md:
1220         if (md)
1221                 OBD_FREE(md, md_len);
1222 out_free_dt:
1223         if (dt)
1224                 OBD_FREE(dt, dt_len);
1225 out_profile:
1226         if (lprof)
1227                 class_put_profile(lprof);
1228 out_debugfs:
1229         if (err < 0)
1230                 ll_debugfs_unregister_super(sb);
1231 out_free_cfg:
1232         if (cfg)
1233                 OBD_FREE_PTR(cfg);
1234
1235         if (err)
1236                 ll_put_super(sb);
1237         else if (sbi->ll_flags & LL_SBI_VERBOSE)
1238                 LCONSOLE_WARN("Mounted %s\n", profilenm);
1239         RETURN(err);
1240 } /* ll_fill_super */
1241
1242 void ll_put_super(struct super_block *sb)
1243 {
1244         struct config_llog_instance cfg, params_cfg;
1245         struct obd_device *obd;
1246         struct lustre_sb_info *lsi = s2lsi(sb);
1247         struct ll_sb_info *sbi = ll_s2sbi(sb);
1248         char *profilenm = get_profile_name(sb);
1249         unsigned long cfg_instance = ll_get_cfg_instance(sb);
1250         long ccc_count;
1251         int next, force = 1, rc = 0;
1252         ENTRY;
1253
1254         if (IS_ERR(sbi))
1255                 GOTO(out_no_sbi, 0);
1256
1257         /* Should replace instance_id with something better for ASLR */
1258         CDEBUG(D_VFSTRACE, "VFS Op: cfg_instance %s-%016lx (sb %p)\n",
1259                profilenm, cfg_instance, sb);
1260
1261         cfg.cfg_instance = cfg_instance;
1262         lustre_end_log(sb, profilenm, &cfg);
1263
1264         params_cfg.cfg_instance = cfg_instance;
1265         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1266
1267         if (sbi->ll_md_exp) {
1268                 obd = class_exp2obd(sbi->ll_md_exp);
1269                 if (obd)
1270                         force = obd->obd_force;
1271         }
1272
1273         /* Wait for unstable pages to be committed to stable storage */
1274         if (force == 0) {
1275                 rc = l_wait_event_abortable(
1276                         sbi->ll_cache->ccc_unstable_waitq,
1277                         atomic_long_read(&sbi->ll_cache->ccc_unstable_nr) == 0);
1278         }
1279
1280         ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
1281         if (force == 0 && rc != -ERESTARTSYS)
1282                 LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
1283
1284         /* We need to set force before the lov_disconnect in
1285          * lustre_common_put_super, since l_d cleans up osc's as well.
1286          */
1287         if (force) {
1288                 next = 0;
1289                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1290                                                      &next)) != NULL) {
1291                         obd->obd_force = force;
1292                 }
1293         }
1294
1295         if (sbi->ll_client_common_fill_super_succeeded) {
1296                 /* Only if client_common_fill_super succeeded */
1297                 client_common_put_super(sb);
1298         }
1299
1300         next = 0;
1301         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
1302                 class_manual_cleanup(obd);
1303
1304         if (sbi->ll_flags & LL_SBI_VERBOSE)
1305                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1306
1307         if (profilenm)
1308                 class_del_profile(profilenm);
1309
1310 #ifndef HAVE_SUPER_SETUP_BDI_NAME
1311         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1312                 bdi_destroy(&lsi->lsi_bdi);
1313                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1314         }
1315 #endif
1316
1317         ll_free_sbi(sb);
1318         lsi->lsi_llsbi = NULL;
1319 out_no_sbi:
1320         lustre_common_put_super(sb);
1321
1322         cl_env_cache_purge(~0);
1323
1324         module_put(THIS_MODULE);
1325
1326         EXIT;
1327 } /* client_put_super */
1328
1329 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1330 {
1331         struct inode *inode = NULL;
1332
1333         /* NOTE: we depend on atomic igrab() -bzzz */
1334         lock_res_and_lock(lock);
1335         if (lock->l_resource->lr_lvb_inode) {
1336                 struct ll_inode_info * lli;
1337                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1338                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1339                         inode = igrab(lock->l_resource->lr_lvb_inode);
1340                 } else {
1341                         inode = lock->l_resource->lr_lvb_inode;
1342                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1343                                          D_WARNING, lock, "lr_lvb_inode %p is "
1344                                          "bogus: magic %08x",
1345                                          lock->l_resource->lr_lvb_inode,
1346                                          lli->lli_inode_magic);
1347                         inode = NULL;
1348                 }
1349         }
1350         unlock_res_and_lock(lock);
1351         return inode;
1352 }
1353
1354 void ll_dir_clear_lsm_md(struct inode *inode)
1355 {
1356         struct ll_inode_info *lli = ll_i2info(inode);
1357
1358         LASSERT(S_ISDIR(inode->i_mode));
1359
1360         if (lli->lli_lsm_md) {
1361                 lmv_free_memmd(lli->lli_lsm_md);
1362                 lli->lli_lsm_md = NULL;
1363         }
1364
1365         if (lli->lli_default_lsm_md) {
1366                 lmv_free_memmd(lli->lli_default_lsm_md);
1367                 lli->lli_default_lsm_md = NULL;
1368         }
1369 }
1370
1371 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1372                                       const struct lu_fid *fid,
1373                                       struct lustre_md *md)
1374 {
1375         struct ll_sb_info       *sbi = ll_s2sbi(sb);
1376         struct mdt_body         *body = md->body;
1377         struct inode            *inode;
1378         ino_t                   ino;
1379         ENTRY;
1380
1381         ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1382         inode = iget_locked(sb, ino);
1383         if (inode == NULL) {
1384                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1385                        sbi->ll_fsname, PFID(fid));
1386                 RETURN(ERR_PTR(-ENOENT));
1387         }
1388
1389         if (inode->i_state & I_NEW) {
1390                 struct ll_inode_info *lli = ll_i2info(inode);
1391                 struct lmv_stripe_md *lsm = md->lmv;
1392
1393                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1394                                 (body->mbo_mode & S_IFMT);
1395                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1396                          PFID(fid));
1397
1398                 inode->i_mtime.tv_sec = 0;
1399                 inode->i_atime.tv_sec = 0;
1400                 inode->i_ctime.tv_sec = 0;
1401                 inode->i_rdev = 0;
1402
1403 #ifdef HAVE_BACKING_DEV_INFO
1404                 /* initializing backing dev info. */
1405                 inode->i_mapping->backing_dev_info =
1406                                                 &s2lsi(inode->i_sb)->lsi_bdi;
1407 #endif
1408                 inode->i_op = &ll_dir_inode_operations;
1409                 inode->i_fop = &ll_dir_operations;
1410                 lli->lli_fid = *fid;
1411                 ll_lli_init(lli);
1412
1413                 LASSERT(lsm != NULL);
1414                 /* master object FID */
1415                 lli->lli_pfid = body->mbo_fid1;
1416                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1417                        lli, PFID(fid), PFID(&lli->lli_pfid));
1418                 unlock_new_inode(inode);
1419         }
1420
1421         RETURN(inode);
1422 }
1423
1424 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1425 {
1426         struct lu_fid *fid;
1427         struct lmv_stripe_md *lsm = md->lmv;
1428         struct ll_inode_info *lli = ll_i2info(inode);
1429         int i;
1430
1431         LASSERT(lsm != NULL);
1432
1433         CDEBUG(D_INODE, "%s: "DFID" set dir layout:\n",
1434                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1435         lsm_md_dump(D_INODE, lsm);
1436
1437         if (!lmv_dir_striped(lsm))
1438                 goto out;
1439
1440         /* XXX sigh, this lsm_root initialization should be in
1441          * LMV layer, but it needs ll_iget right now, so we
1442          * put this here right now. */
1443         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1444                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1445                 LASSERT(lsm->lsm_md_oinfo[i].lmo_root == NULL);
1446
1447                 if (!fid_is_sane(fid))
1448                         continue;
1449
1450                 /* Unfortunately ll_iget will call ll_update_inode,
1451                  * where the initialization of slave inode is slightly
1452                  * different, so it reset lsm_md to NULL to avoid
1453                  * initializing lsm for slave inode. */
1454                 lsm->lsm_md_oinfo[i].lmo_root =
1455                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1456                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1457                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1458
1459                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1460                         while (i-- > 0) {
1461                                 iput(lsm->lsm_md_oinfo[i].lmo_root);
1462                                 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1463                         }
1464                         return rc;
1465                 }
1466         }
1467 out:
1468         lli->lli_lsm_md = lsm;
1469
1470         return 0;
1471 }
1472
1473 static void ll_update_default_lsm_md(struct inode *inode, struct lustre_md *md)
1474 {
1475         struct ll_inode_info *lli = ll_i2info(inode);
1476
1477         if (!md->default_lmv) {
1478                 /* clear default lsm */
1479                 if (lli->lli_default_lsm_md) {
1480                         down_write(&lli->lli_lsm_sem);
1481                         if (lli->lli_default_lsm_md) {
1482                                 lmv_free_memmd(lli->lli_default_lsm_md);
1483                                 lli->lli_default_lsm_md = NULL;
1484                         }
1485                         up_write(&lli->lli_lsm_sem);
1486                 }
1487         } else if (lli->lli_default_lsm_md) {
1488                 /* update default lsm if it changes */
1489                 down_read(&lli->lli_lsm_sem);
1490                 if (lli->lli_default_lsm_md &&
1491                     !lsm_md_eq(lli->lli_default_lsm_md, md->default_lmv)) {
1492                         up_read(&lli->lli_lsm_sem);
1493                         down_write(&lli->lli_lsm_sem);
1494                         if (lli->lli_default_lsm_md)
1495                                 lmv_free_memmd(lli->lli_default_lsm_md);
1496                         lli->lli_default_lsm_md = md->default_lmv;
1497                         lsm_md_dump(D_INODE, md->default_lmv);
1498                         md->default_lmv = NULL;
1499                         up_write(&lli->lli_lsm_sem);
1500                 } else {
1501                         up_read(&lli->lli_lsm_sem);
1502                 }
1503         } else {
1504                 /* init default lsm */
1505                 down_write(&lli->lli_lsm_sem);
1506                 lli->lli_default_lsm_md = md->default_lmv;
1507                 lsm_md_dump(D_INODE, md->default_lmv);
1508                 md->default_lmv = NULL;
1509                 up_write(&lli->lli_lsm_sem);
1510         }
1511 }
1512
1513 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1514 {
1515         struct ll_inode_info *lli = ll_i2info(inode);
1516         struct lmv_stripe_md *lsm = md->lmv;
1517         struct cl_attr  *attr;
1518         int rc = 0;
1519
1520         ENTRY;
1521
1522         LASSERT(S_ISDIR(inode->i_mode));
1523         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1524                PFID(ll_inode2fid(inode)));
1525
1526         /* update default LMV */
1527         if (md->default_lmv)
1528                 ll_update_default_lsm_md(inode, md);
1529
1530         /*
1531          * no striped information from request, lustre_md from req does not
1532          * include stripeEA, see ll_md_setattr()
1533          */
1534         if (!lsm)
1535                 RETURN(0);
1536
1537         /*
1538          * normally dir layout doesn't change, only take read lock to check
1539          * that to avoid blocking other MD operations.
1540          */
1541         down_read(&lli->lli_lsm_sem);
1542
1543         /* some current lookup initialized lsm, and unchanged */
1544         if (lli->lli_lsm_md && lsm_md_eq(lli->lli_lsm_md, lsm))
1545                 GOTO(unlock, rc = 0);
1546
1547         /* if dir layout doesn't match, check whether version is increased,
1548          * which means layout is changed, this happens in dir split/merge and
1549          * lfsck.
1550          *
1551          * foreign LMV should not change.
1552          */
1553         if (lli->lli_lsm_md && lmv_dir_striped(lli->lli_lsm_md) &&
1554             lsm->lsm_md_layout_version <=
1555             lli->lli_lsm_md->lsm_md_layout_version) {
1556                 CERROR("%s: "DFID" dir layout mismatch:\n",
1557                        ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid));
1558                 lsm_md_dump(D_ERROR, lli->lli_lsm_md);
1559                 lsm_md_dump(D_ERROR, lsm);
1560                 GOTO(unlock, rc = -EINVAL);
1561         }
1562
1563         up_read(&lli->lli_lsm_sem);
1564         down_write(&lli->lli_lsm_sem);
1565         /* clear existing lsm */
1566         if (lli->lli_lsm_md) {
1567                 lmv_free_memmd(lli->lli_lsm_md);
1568                 lli->lli_lsm_md = NULL;
1569         }
1570
1571         rc = ll_init_lsm_md(inode, md);
1572         up_write(&lli->lli_lsm_sem);
1573
1574         if (rc)
1575                 RETURN(rc);
1576
1577         /* set md->lmv to NULL, so the following free lustre_md will not free
1578          * this lsm.
1579          */
1580         md->lmv = NULL;
1581
1582         /* md_merge_attr() may take long, since lsm is already set, switch to
1583          * read lock.
1584          */
1585         down_read(&lli->lli_lsm_sem);
1586
1587         if (!lmv_dir_striped(lli->lli_lsm_md))
1588                 GOTO(unlock, rc = 0);
1589
1590         OBD_ALLOC_PTR(attr);
1591         if (!attr)
1592                 GOTO(unlock, rc = -ENOMEM);
1593
1594         /* validate the lsm */
1595         rc = md_merge_attr(ll_i2mdexp(inode), &lli->lli_fid, lli->lli_lsm_md,
1596                            attr, ll_md_blocking_ast);
1597         if (!rc) {
1598                 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1599                         md->body->mbo_nlink = attr->cat_nlink;
1600                 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1601                         md->body->mbo_size = attr->cat_size;
1602                 if (md->body->mbo_valid & OBD_MD_FLATIME)
1603                         md->body->mbo_atime = attr->cat_atime;
1604                 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1605                         md->body->mbo_ctime = attr->cat_ctime;
1606                 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1607                         md->body->mbo_mtime = attr->cat_mtime;
1608         }
1609
1610         OBD_FREE_PTR(attr);
1611         GOTO(unlock, rc);
1612 unlock:
1613         up_read(&lli->lli_lsm_sem);
1614
1615         return rc;
1616 }
1617
1618 void ll_clear_inode(struct inode *inode)
1619 {
1620         struct ll_inode_info *lli = ll_i2info(inode);
1621         struct ll_sb_info *sbi = ll_i2sbi(inode);
1622
1623         ENTRY;
1624
1625         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1626                PFID(ll_inode2fid(inode)), inode);
1627
1628         if (S_ISDIR(inode->i_mode)) {
1629                 /* these should have been cleared in ll_file_release */
1630                 LASSERT(lli->lli_opendir_key == NULL);
1631                 LASSERT(lli->lli_sai == NULL);
1632                 LASSERT(lli->lli_opendir_pid == 0);
1633         } else {
1634                 pcc_inode_free(inode);
1635         }
1636
1637         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1638
1639         LASSERT(!lli->lli_open_fd_write_count);
1640         LASSERT(!lli->lli_open_fd_read_count);
1641         LASSERT(!lli->lli_open_fd_exec_count);
1642
1643         if (lli->lli_mds_write_och)
1644                 ll_md_real_close(inode, FMODE_WRITE);
1645         if (lli->lli_mds_exec_och)
1646                 ll_md_real_close(inode, FMODE_EXEC);
1647         if (lli->lli_mds_read_och)
1648                 ll_md_real_close(inode, FMODE_READ);
1649
1650         if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1651                 OBD_FREE(lli->lli_symlink_name,
1652                          strlen(lli->lli_symlink_name) + 1);
1653                 lli->lli_symlink_name = NULL;
1654         }
1655
1656         ll_xattr_cache_destroy(inode);
1657
1658         forget_all_cached_acls(inode);
1659         lli_clear_acl(lli);
1660         lli->lli_inode_magic = LLI_INODE_DEAD;
1661
1662         if (S_ISDIR(inode->i_mode))
1663                 ll_dir_clear_lsm_md(inode);
1664         else if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1665                 LASSERT(list_empty(&lli->lli_agl_list));
1666
1667         /*
1668          * XXX This has to be done before lsm is freed below, because
1669          * cl_object still uses inode lsm.
1670          */
1671         cl_inode_fini(inode);
1672
1673         llcrypt_put_encryption_info(inode);
1674
1675         EXIT;
1676 }
1677
1678 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
1679 {
1680         struct lustre_md md;
1681         struct inode *inode = dentry->d_inode;
1682         struct ll_sb_info *sbi = ll_i2sbi(inode);
1683         struct ptlrpc_request *request = NULL;
1684         int rc, ia_valid;
1685         ENTRY;
1686
1687         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1688                                      LUSTRE_OPC_ANY, NULL);
1689         if (IS_ERR(op_data))
1690                 RETURN(PTR_ERR(op_data));
1691
1692         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
1693         if (rc) {
1694                 ptlrpc_req_finished(request);
1695                 if (rc == -ENOENT) {
1696                         clear_nlink(inode);
1697                         /* Unlinked special device node? Or just a race?
1698                          * Pretend we done everything. */
1699                         if (!S_ISREG(inode->i_mode) &&
1700                             !S_ISDIR(inode->i_mode)) {
1701                                 ia_valid = op_data->op_attr.ia_valid;
1702                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1703                                 rc = simple_setattr(dentry, &op_data->op_attr);
1704                                 op_data->op_attr.ia_valid = ia_valid;
1705                         }
1706                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1707                         CERROR("md_setattr fails: rc = %d\n", rc);
1708                 }
1709                 RETURN(rc);
1710         }
1711
1712         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1713                               sbi->ll_md_exp, &md);
1714         if (rc) {
1715                 ptlrpc_req_finished(request);
1716                 RETURN(rc);
1717         }
1718
1719         ia_valid = op_data->op_attr.ia_valid;
1720         /* inode size will be in ll_setattr_ost, can't do it now since dirty
1721          * cache is not cleared yet. */
1722         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1723         if (S_ISREG(inode->i_mode))
1724                 inode_lock(inode);
1725         rc = simple_setattr(dentry, &op_data->op_attr);
1726         if (S_ISREG(inode->i_mode))
1727                 inode_unlock(inode);
1728         op_data->op_attr.ia_valid = ia_valid;
1729
1730         rc = ll_update_inode(inode, &md);
1731         ptlrpc_req_finished(request);
1732
1733         RETURN(rc);
1734 }
1735
1736 /**
1737  * Zero portion of page that is part of @inode.
1738  * This implies, if necessary:
1739  * - taking cl_lock on range corresponding to concerned page
1740  * - grabbing vm page
1741  * - associating cl_page
1742  * - proceeding to clio read
1743  * - zeroing range in page
1744  * - proceeding to cl_page flush
1745  * - releasing cl_lock
1746  *
1747  * \param[in] inode     inode
1748  * \param[in] index     page index
1749  * \param[in] offset    offset in page to start zero from
1750  * \param[in] len       len to zero
1751  *
1752  * \retval 0            on success
1753  * \retval negative     errno on failure
1754  */
1755 int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
1756                     unsigned len)
1757 {
1758         struct ll_inode_info *lli = ll_i2info(inode);
1759         struct cl_object *clob = lli->lli_clob;
1760         __u16 refcheck;
1761         struct lu_env *env = NULL;
1762         struct cl_io *io = NULL;
1763         struct cl_page *clpage = NULL;
1764         struct page *vmpage = NULL;
1765         unsigned from = index << PAGE_SHIFT;
1766         struct cl_lock *lock = NULL;
1767         struct cl_lock_descr *descr = NULL;
1768         struct cl_2queue *queue = NULL;
1769         struct cl_sync_io *anchor = NULL;
1770         bool holdinglock = false;
1771         bool lockedbymyself = true;
1772         int rc;
1773
1774         ENTRY;
1775
1776         env = cl_env_get(&refcheck);
1777         if (IS_ERR(env))
1778                 RETURN(PTR_ERR(env));
1779
1780         io = vvp_env_thread_io(env);
1781         io->ci_obj = clob;
1782         rc = cl_io_rw_init(env, io, CIT_WRITE, from, PAGE_SIZE);
1783         if (rc)
1784                 GOTO(putenv, rc);
1785
1786         lock = vvp_env_lock(env);
1787         descr = &lock->cll_descr;
1788         descr->cld_obj   = io->ci_obj;
1789         descr->cld_start = cl_index(io->ci_obj, from);
1790         descr->cld_end   = cl_index(io->ci_obj, from + PAGE_SIZE - 1);
1791         descr->cld_mode  = CLM_WRITE;
1792         descr->cld_enq_flags = CEF_MUST | CEF_NONBLOCK;
1793
1794         /* request lock for page */
1795         rc = cl_lock_request(env, io, lock);
1796         /* -ECANCELED indicates a matching lock with a different extent
1797          * was already present, and -EEXIST indicates a matching lock
1798          * on exactly the same extent was already present.
1799          * In both cases it means we are covered.
1800          */
1801         if (rc == -ECANCELED || rc == -EEXIST)
1802                 rc = 0;
1803         else if (rc < 0)
1804                 GOTO(iofini, rc);
1805         else
1806                 holdinglock = true;
1807
1808         /* grab page */
1809         vmpage = grab_cache_page_nowait(inode->i_mapping, index);
1810         if (vmpage == NULL)
1811                 GOTO(rellock, rc = -EOPNOTSUPP);
1812
1813         if (!PageDirty(vmpage)) {
1814                 /* associate cl_page */
1815                 clpage = cl_page_find(env, clob, vmpage->index,
1816                                       vmpage, CPT_CACHEABLE);
1817                 if (IS_ERR(clpage))
1818                         GOTO(pagefini, rc = PTR_ERR(clpage));
1819
1820                 cl_page_assume(env, io, clpage);
1821         }
1822
1823         if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
1824             !PageWriteback(vmpage)) {
1825                 /* read page */
1826                 /* set PagePrivate2 to detect special case of empty page
1827                  * in osc_brw_fini_request()
1828                  */
1829                 SetPagePrivate2(vmpage);
1830                 rc = ll_io_read_page(env, io, clpage, NULL);
1831                 if (!PagePrivate2(vmpage))
1832                         /* PagePrivate2 was cleared in osc_brw_fini_request()
1833                          * meaning we read an empty page. In this case, in order
1834                          * to avoid allocating unnecessary block in truncated
1835                          * file, we must not zero and write as below. Subsequent
1836                          * server-side truncate will handle things correctly.
1837                          */
1838                         GOTO(clpfini, rc = 0);
1839                 ClearPagePrivate2(vmpage);
1840                 if (rc)
1841                         GOTO(clpfini, rc);
1842                 lockedbymyself = trylock_page(vmpage);
1843                 cl_page_assume(env, io, clpage);
1844         }
1845
1846         /* zero range in page */
1847         zero_user(vmpage, offset, len);
1848
1849         if (holdinglock && clpage) {
1850                 /* explicitly write newly modified page */
1851                 queue = &io->ci_queue;
1852                 cl_2queue_init(queue);
1853                 anchor = &vvp_env_info(env)->vti_anchor;
1854                 cl_sync_io_init(anchor, 1);
1855                 clpage->cp_sync_io = anchor;
1856                 cl_2queue_add(queue, clpage);
1857                 rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
1858                 if (rc)
1859                         GOTO(queuefini1, rc);
1860                 rc = cl_sync_io_wait(env, anchor, 0);
1861                 if (rc)
1862                         GOTO(queuefini2, rc);
1863                 cl_page_assume(env, io, clpage);
1864
1865 queuefini2:
1866                 cl_2queue_discard(env, io, queue);
1867 queuefini1:
1868                 cl_2queue_disown(env, io, queue);
1869                 cl_2queue_fini(env, queue);
1870         }
1871
1872 clpfini:
1873         if (clpage)
1874                 cl_page_put(env, clpage);
1875 pagefini:
1876         if (lockedbymyself) {
1877                 unlock_page(vmpage);
1878                 put_page(vmpage);
1879         }
1880 rellock:
1881         if (holdinglock)
1882                 cl_lock_release(env, lock);
1883 iofini:
1884         cl_io_fini(env, io);
1885 putenv:
1886         if (env)
1887                 cl_env_put(env, &refcheck);
1888
1889         RETURN(rc);
1890 }
1891
1892 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1893  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1894  * keep these values until such a time that objects are allocated for it.
1895  * We do the MDS operations first, as it is checking permissions for us.
1896  * We don't to the MDS RPC if there is nothing that we want to store there,
1897  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1898  * going to do an RPC anyways.
1899  *
1900  * If we are doing a truncate, we will send the mtime and ctime updates
1901  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1902  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1903  * at the same time.
1904  *
1905  * In case of HSMimport, we only set attr on MDS.
1906  */
1907 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr,
1908                    enum op_xvalid xvalid, bool hsm_import)
1909 {
1910         struct inode *inode = dentry->d_inode;
1911         struct ll_inode_info *lli = ll_i2info(inode);
1912         struct md_op_data *op_data = NULL;
1913         ktime_t kstart = ktime_get();
1914         int rc = 0;
1915
1916         ENTRY;
1917
1918         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, "
1919                "valid %x, hsm_import %d\n",
1920                ll_i2sbi(inode)->ll_fsname, PFID(&lli->lli_fid),
1921                inode, i_size_read(inode), attr->ia_size, attr->ia_valid,
1922                hsm_import);
1923
1924         if (attr->ia_valid & ATTR_SIZE) {
1925                 /* Check new size against VFS/VM file size limit and rlimit */
1926                 rc = inode_newsize_ok(inode, attr->ia_size);
1927                 if (rc)
1928                         RETURN(rc);
1929
1930                 /* The maximum Lustre file size is variable, based on the
1931                  * OST maximum object size and number of stripes.  This
1932                  * needs another check in addition to the VFS check above. */
1933                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1934                         CDEBUG(D_INODE,"file "DFID" too large %llu > %llu\n",
1935                                PFID(&lli->lli_fid), attr->ia_size,
1936                                ll_file_maxbytes(inode));
1937                         RETURN(-EFBIG);
1938                 }
1939
1940                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1941         }
1942
1943         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1944         if (attr->ia_valid & TIMES_SET_FLAGS) {
1945                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
1946                     !cfs_capable(CFS_CAP_FOWNER))
1947                         RETURN(-EPERM);
1948         }
1949
1950         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1951         if (!(xvalid & OP_XVALID_CTIME_SET) &&
1952              (attr->ia_valid & ATTR_CTIME)) {
1953                 attr->ia_ctime = current_time(inode);
1954                 xvalid |= OP_XVALID_CTIME_SET;
1955         }
1956         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1957             (attr->ia_valid & ATTR_ATIME)) {
1958                 attr->ia_atime = current_time(inode);
1959                 attr->ia_valid |= ATTR_ATIME_SET;
1960         }
1961         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1962             (attr->ia_valid & ATTR_MTIME)) {
1963                 attr->ia_mtime = current_time(inode);
1964                 attr->ia_valid |= ATTR_MTIME_SET;
1965         }
1966
1967         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1968                 CDEBUG(D_INODE, "setting mtime %lld, ctime %lld, now = %lld\n",
1969                        (s64)attr->ia_mtime.tv_sec, (s64)attr->ia_ctime.tv_sec,
1970                        ktime_get_real_seconds());
1971
1972         if (S_ISREG(inode->i_mode))
1973                 inode_unlock(inode);
1974
1975         /* We always do an MDS RPC, even if we're only changing the size;
1976          * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1977
1978         OBD_ALLOC_PTR(op_data);
1979         if (op_data == NULL)
1980                 GOTO(out, rc = -ENOMEM);
1981
1982         if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
1983                 /* If we are changing file size, file content is
1984                  * modified, flag it.
1985                  */
1986                 xvalid |= OP_XVALID_OWNEROVERRIDE;
1987                 op_data->op_bias |= MDS_DATA_MODIFIED;
1988                 ll_file_clear_flag(lli, LLIF_DATA_MODIFIED);
1989         }
1990
1991         if (attr->ia_valid & ATTR_FILE) {
1992                 struct ll_file_data *fd = attr->ia_file->private_data;
1993
1994                 if (fd->fd_lease_och)
1995                         op_data->op_bias |= MDS_TRUNC_KEEP_LEASE;
1996         }
1997
1998         op_data->op_attr = *attr;
1999         op_data->op_xvalid = xvalid;
2000
2001         rc = ll_md_setattr(dentry, op_data);
2002         if (rc)
2003                 GOTO(out, rc);
2004
2005         if (!S_ISREG(inode->i_mode) || hsm_import)
2006                 GOTO(out, rc = 0);
2007
2008         if (attr->ia_valid & (ATTR_SIZE | ATTR_ATIME | ATTR_ATIME_SET |
2009                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME) ||
2010             xvalid & OP_XVALID_CTIME_SET) {
2011                 bool cached = false;
2012
2013                 rc = pcc_inode_setattr(inode, attr, &cached);
2014                 if (cached) {
2015                         if (rc) {
2016                                 CERROR("%s: PCC inode "DFID" setattr failed: "
2017                                        "rc = %d\n",
2018                                        ll_i2sbi(inode)->ll_fsname,
2019                                        PFID(&lli->lli_fid), rc);
2020                                 GOTO(out, rc);
2021                         }
2022                 } else {
2023                         unsigned int flags = 0;
2024
2025                         /* For truncate and utimes sending attributes to OSTs,
2026                          * setting mtime/atime to the past will be performed
2027                          * under PW [0:EOF] extent lock (new_size:EOF for
2028                          * truncate). It may seem excessive to send mtime/atime
2029                          * updates to OSTs when not setting times to past, but
2030                          * it is necessary due to possible time
2031                          * de-synchronization between MDT inode and OST objects
2032                          */
2033                         if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode) &&
2034                             attr->ia_valid & ATTR_SIZE) {
2035                                 xvalid |= OP_XVALID_FLAGS;
2036                                 flags = LUSTRE_ENCRYPT_FL;
2037                                 if (attr->ia_size & ~PAGE_MASK) {
2038                                         pgoff_t offset =
2039                                                 attr->ia_size & (PAGE_SIZE - 1);
2040
2041                                         rc = ll_io_zero_page(inode,
2042                                                     attr->ia_size >> PAGE_SHIFT,
2043                                                     offset, PAGE_SIZE - offset);
2044                                         if (rc)
2045                                                 GOTO(out, rc);
2046                                 }
2047                         }
2048                         rc = cl_setattr_ost(lli->lli_clob, attr, xvalid, flags);
2049                 }
2050         }
2051
2052         /* If the file was restored, it needs to set dirty flag.
2053          *
2054          * We've already sent MDS_DATA_MODIFIED flag in
2055          * ll_md_setattr() for truncate. However, the MDT refuses to
2056          * set the HS_DIRTY flag on released files, so we have to set
2057          * it again if the file has been restored. Please check how
2058          * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
2059          *
2060          * Please notice that if the file is not released, the previous
2061          * MDS_DATA_MODIFIED has taken effect and usually
2062          * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
2063          * This way we can save an RPC for common open + trunc
2064          * operation. */
2065         if (ll_file_test_and_clear_flag(lli, LLIF_DATA_MODIFIED)) {
2066                 struct hsm_state_set hss = {
2067                         .hss_valid = HSS_SETMASK,
2068                         .hss_setmask = HS_DIRTY,
2069                 };
2070                 int rc2;
2071
2072                 rc2 = ll_hsm_state_set(inode, &hss);
2073                 /* truncate and write can happen at the same time, so that
2074                  * the file can be set modified even though the file is not
2075                  * restored from released state, and ll_hsm_state_set() is
2076                  * not applicable for the file, and rc2 < 0 is normal in this
2077                  * case. */
2078                 if (rc2 < 0)
2079                         CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
2080                                PFID(ll_inode2fid(inode)), rc2);
2081         }
2082
2083         EXIT;
2084 out:
2085         if (op_data != NULL)
2086                 ll_finish_md_op_data(op_data);
2087
2088         if (S_ISREG(inode->i_mode)) {
2089                 inode_lock(inode);
2090                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
2091                         inode_dio_wait(inode);
2092                 /* Once we've got the i_mutex, it's safe to set the S_NOSEC
2093                  * flag.  ll_update_inode (called from ll_md_setattr), clears
2094                  * inode flags, so there is a gap where S_NOSEC is not set.
2095                  * This can cause a writer to take the i_mutex unnecessarily,
2096                  * but this is safe to do and should be rare. */
2097                 inode_has_no_xattr(inode);
2098         }
2099
2100         if (!rc)
2101                 ll_stats_ops_tally(ll_i2sbi(inode), attr->ia_valid & ATTR_SIZE ?
2102                                         LPROC_LL_TRUNC : LPROC_LL_SETATTR,
2103                                    ktime_us_delta(ktime_get(), kstart));
2104
2105         return rc;
2106 }
2107
2108 int ll_setattr(struct dentry *de, struct iattr *attr)
2109 {
2110         int mode = de->d_inode->i_mode;
2111         enum op_xvalid xvalid = 0;
2112         int rc;
2113
2114         rc = llcrypt_prepare_setattr(de, attr);
2115         if (rc)
2116                 return rc;
2117
2118         if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
2119                               (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
2120                 xvalid |= OP_XVALID_OWNEROVERRIDE;
2121
2122         if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
2123                                (ATTR_SIZE|ATTR_MODE)) &&
2124             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
2125              (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2126               !(attr->ia_mode & S_ISGID))))
2127                 attr->ia_valid |= ATTR_FORCE;
2128
2129         if ((attr->ia_valid & ATTR_MODE) &&
2130             (mode & S_ISUID) &&
2131             !(attr->ia_mode & S_ISUID) &&
2132             !(attr->ia_valid & ATTR_KILL_SUID))
2133                 attr->ia_valid |= ATTR_KILL_SUID;
2134
2135         if ((attr->ia_valid & ATTR_MODE) &&
2136             ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
2137             !(attr->ia_mode & S_ISGID) &&
2138             !(attr->ia_valid & ATTR_KILL_SGID))
2139                 attr->ia_valid |= ATTR_KILL_SGID;
2140
2141         return ll_setattr_raw(de, attr, xvalid, false);
2142 }
2143
2144 int ll_statfs_internal(struct ll_sb_info *sbi, struct obd_statfs *osfs,
2145                        u32 flags)
2146 {
2147         struct obd_statfs obd_osfs = { 0 };
2148         time64_t max_age;
2149         int rc;
2150
2151         ENTRY;
2152         max_age = ktime_get_seconds() - sbi->ll_statfs_max_age;
2153
2154         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2155                 flags |= OBD_STATFS_NODELAY;
2156
2157         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
2158         if (rc)
2159                 RETURN(rc);
2160
2161         osfs->os_type = LL_SUPER_MAGIC;
2162
2163         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
2164               osfs->os_bavail, osfs->os_blocks, osfs->os_ffree, osfs->os_files);
2165
2166         if (osfs->os_state & OS_STATFS_SUM)
2167                 GOTO(out, rc);
2168
2169         rc = obd_statfs(NULL, sbi->ll_dt_exp, &obd_osfs, max_age, flags);
2170         if (rc) /* Possibly a filesystem with no OSTs.  Report MDT totals. */
2171                 GOTO(out, rc = 0);
2172
2173         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
2174                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
2175                obd_osfs.os_files);
2176
2177         osfs->os_bsize = obd_osfs.os_bsize;
2178         osfs->os_blocks = obd_osfs.os_blocks;
2179         osfs->os_bfree = obd_osfs.os_bfree;
2180         osfs->os_bavail = obd_osfs.os_bavail;
2181
2182         /* If we have _some_ OSTs, but don't have as many free objects on the
2183          * OSTs as inodes on the MDTs, reduce the reported number of inodes
2184          * to compensate, so that the "inodes in use" number is correct.
2185          * This should be kept in sync with lod_statfs() behaviour.
2186          */
2187         if (obd_osfs.os_files && obd_osfs.os_ffree < osfs->os_ffree) {
2188                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
2189                                  obd_osfs.os_ffree;
2190                 osfs->os_ffree = obd_osfs.os_ffree;
2191         }
2192
2193 out:
2194         RETURN(rc);
2195 }
2196
2197 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
2198 {
2199         struct super_block *sb = de->d_sb;
2200         struct obd_statfs osfs;
2201         __u64 fsid = huge_encode_dev(sb->s_dev);
2202         ktime_t kstart = ktime_get();
2203         int rc;
2204
2205         CDEBUG(D_VFSTRACE, "VFS Op:sb=%s (%p)\n", sb->s_id, sb);
2206
2207         /* Some amount of caching on the client is allowed */
2208         rc = ll_statfs_internal(ll_s2sbi(sb), &osfs, OBD_STATFS_SUM);
2209         if (rc)
2210                 return rc;
2211
2212         statfs_unpack(sfs, &osfs);
2213
2214         /* We need to downshift for all 32-bit kernels, because we can't
2215          * tell if the kernel is being called via sys_statfs64() or not.
2216          * Stop before overflowing f_bsize - in which case it is better
2217          * to just risk EOVERFLOW if caller is using old sys_statfs(). */
2218         if (sizeof(long) < 8) {
2219                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
2220                         sfs->f_bsize <<= 1;
2221
2222                         osfs.os_blocks >>= 1;
2223                         osfs.os_bfree >>= 1;
2224                         osfs.os_bavail >>= 1;
2225                 }
2226         }
2227
2228         sfs->f_blocks = osfs.os_blocks;
2229         sfs->f_bfree = osfs.os_bfree;
2230         sfs->f_bavail = osfs.os_bavail;
2231         sfs->f_fsid.val[0] = (__u32)fsid;
2232         sfs->f_fsid.val[1] = (__u32)(fsid >> 32);
2233
2234         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STATFS,
2235                            ktime_us_delta(ktime_get(), kstart));
2236
2237         return 0;
2238 }
2239
2240 void ll_inode_size_lock(struct inode *inode)
2241 {
2242         struct ll_inode_info *lli;
2243
2244         LASSERT(!S_ISDIR(inode->i_mode));
2245
2246         lli = ll_i2info(inode);
2247         mutex_lock(&lli->lli_size_mutex);
2248 }
2249
2250 void ll_inode_size_unlock(struct inode *inode)
2251 {
2252         struct ll_inode_info *lli;
2253
2254         lli = ll_i2info(inode);
2255         mutex_unlock(&lli->lli_size_mutex);
2256 }
2257
2258 void ll_update_inode_flags(struct inode *inode, int ext_flags)
2259 {
2260         /* do not clear encryption flag */
2261         ext_flags |= ll_inode_to_ext_flags(inode->i_flags) & LUSTRE_ENCRYPT_FL;
2262         inode->i_flags = ll_ext_to_inode_flags(ext_flags);
2263         if (ext_flags & LUSTRE_PROJINHERIT_FL)
2264                 ll_file_set_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
2265         else
2266                 ll_file_clear_flag(ll_i2info(inode), LLIF_PROJECT_INHERIT);
2267 }
2268
2269 int ll_update_inode(struct inode *inode, struct lustre_md *md)
2270 {
2271         struct ll_inode_info *lli = ll_i2info(inode);
2272         struct mdt_body *body = md->body;
2273         struct ll_sb_info *sbi = ll_i2sbi(inode);
2274         int rc = 0;
2275
2276         if (body->mbo_valid & OBD_MD_FLEASIZE) {
2277                 rc = cl_file_inode_init(inode, md);
2278                 if (rc)
2279                         return rc;
2280         }
2281
2282         if (S_ISDIR(inode->i_mode)) {
2283                 rc = ll_update_lsm_md(inode, md);
2284                 if (rc != 0)
2285                         return rc;
2286         }
2287
2288         if (body->mbo_valid & OBD_MD_FLACL)
2289                 lli_replace_acl(lli, md);
2290
2291         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
2292                                         sbi->ll_flags & LL_SBI_32BIT_API);
2293         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
2294
2295         if (body->mbo_valid & OBD_MD_FLATIME) {
2296                 if (body->mbo_atime > inode->i_atime.tv_sec)
2297                         inode->i_atime.tv_sec = body->mbo_atime;
2298                 lli->lli_atime = body->mbo_atime;
2299         }
2300
2301         if (body->mbo_valid & OBD_MD_FLMTIME) {
2302                 if (body->mbo_mtime > inode->i_mtime.tv_sec) {
2303                         CDEBUG(D_INODE,
2304                                "setting ino %lu mtime from %lld to %llu\n",
2305                                inode->i_ino, (s64)inode->i_mtime.tv_sec,
2306                                body->mbo_mtime);
2307                         inode->i_mtime.tv_sec = body->mbo_mtime;
2308                 }
2309                 lli->lli_mtime = body->mbo_mtime;
2310         }
2311
2312         if (body->mbo_valid & OBD_MD_FLCTIME) {
2313                 if (body->mbo_ctime > inode->i_ctime.tv_sec)
2314                         inode->i_ctime.tv_sec = body->mbo_ctime;
2315                 lli->lli_ctime = body->mbo_ctime;
2316         }
2317
2318         if (body->mbo_valid & OBD_MD_FLBTIME)
2319                 lli->lli_btime = body->mbo_btime;
2320
2321         /* Clear i_flags to remove S_NOSEC before permissions are updated */
2322         if (body->mbo_valid & OBD_MD_FLFLAGS)
2323                 ll_update_inode_flags(inode, body->mbo_flags);
2324         if (body->mbo_valid & OBD_MD_FLMODE)
2325                 inode->i_mode = (inode->i_mode & S_IFMT) |
2326                                 (body->mbo_mode & ~S_IFMT);
2327
2328         if (body->mbo_valid & OBD_MD_FLTYPE)
2329                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2330                                 (body->mbo_mode & S_IFMT);
2331
2332         LASSERT(inode->i_mode != 0);
2333         if (body->mbo_valid & OBD_MD_FLUID)
2334                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
2335         if (body->mbo_valid & OBD_MD_FLGID)
2336                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
2337         if (body->mbo_valid & OBD_MD_FLPROJID)
2338                 lli->lli_projid = body->mbo_projid;
2339         if (body->mbo_valid & OBD_MD_FLNLINK)
2340                 set_nlink(inode, body->mbo_nlink);
2341         if (body->mbo_valid & OBD_MD_FLRDEV)
2342                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
2343
2344         if (body->mbo_valid & OBD_MD_FLID) {
2345                 /* FID shouldn't be changed! */
2346                 if (fid_is_sane(&lli->lli_fid)) {
2347                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
2348                                  "Trying to change FID "DFID
2349                                  " to the "DFID", inode "DFID"(%p)\n",
2350                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
2351                                  PFID(ll_inode2fid(inode)), inode);
2352                 } else {
2353                         lli->lli_fid = body->mbo_fid1;
2354                 }
2355         }
2356
2357         LASSERT(fid_seq(&lli->lli_fid) != 0);
2358
2359         lli->lli_attr_valid = body->mbo_valid;
2360         if (body->mbo_valid & OBD_MD_FLSIZE) {
2361                 i_size_write(inode, body->mbo_size);
2362
2363                 CDEBUG(D_VFSTRACE, "inode="DFID", updating i_size %llu\n",
2364                        PFID(ll_inode2fid(inode)),
2365                        (unsigned long long)body->mbo_size);
2366
2367                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
2368                         inode->i_blocks = body->mbo_blocks;
2369         } else {
2370                 if (body->mbo_valid & OBD_MD_FLLAZYSIZE)
2371                         lli->lli_lazysize = body->mbo_size;
2372                 if (body->mbo_valid & OBD_MD_FLLAZYBLOCKS)
2373                         lli->lli_lazyblocks = body->mbo_blocks;
2374         }
2375
2376         if (body->mbo_valid & OBD_MD_TSTATE) {
2377                 /* Set LLIF_FILE_RESTORING if restore ongoing and
2378                  * clear it when done to ensure to start again
2379                  * glimpsing updated attrs
2380                  */
2381                 if (body->mbo_t_state & MS_RESTORE)
2382                         ll_file_set_flag(lli, LLIF_FILE_RESTORING);
2383                 else
2384                         ll_file_clear_flag(lli, LLIF_FILE_RESTORING);
2385         }
2386
2387         return 0;
2388 }
2389
2390 int ll_read_inode2(struct inode *inode, void *opaque)
2391 {
2392         struct lustre_md *md = opaque;
2393         struct ll_inode_info *lli = ll_i2info(inode);
2394         int     rc;
2395         ENTRY;
2396
2397         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
2398                PFID(&lli->lli_fid), inode);
2399
2400         /* Core attributes from the MDS first.  This is a new inode, and
2401          * the VFS doesn't zero times in the core inode so we have to do
2402          * it ourselves.  They will be overwritten by either MDS or OST
2403          * attributes - we just need to make sure they aren't newer.
2404          */
2405         inode->i_mtime.tv_sec = 0;
2406         inode->i_atime.tv_sec = 0;
2407         inode->i_ctime.tv_sec = 0;
2408         inode->i_rdev = 0;
2409         rc = ll_update_inode(inode, md);
2410         if (rc != 0)
2411                 RETURN(rc);
2412
2413         /* OIDEBUG(inode); */
2414
2415 #ifdef HAVE_BACKING_DEV_INFO
2416         /* initializing backing dev info. */
2417         inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
2418 #endif
2419         if (S_ISREG(inode->i_mode)) {
2420                 struct ll_sb_info *sbi = ll_i2sbi(inode);
2421                 inode->i_op = &ll_file_inode_operations;
2422                 inode->i_fop = sbi->ll_fop;
2423                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
2424                 EXIT;
2425         } else if (S_ISDIR(inode->i_mode)) {
2426                 inode->i_op = &ll_dir_inode_operations;
2427                 inode->i_fop = &ll_dir_operations;
2428                 EXIT;
2429         } else if (S_ISLNK(inode->i_mode)) {
2430                 inode->i_op = &ll_fast_symlink_inode_operations;
2431                 EXIT;
2432         } else {
2433                 inode->i_op = &ll_special_inode_operations;
2434
2435                 init_special_inode(inode, inode->i_mode,
2436                                    inode->i_rdev);
2437
2438                 EXIT;
2439         }
2440
2441         return 0;
2442 }
2443
2444 void ll_delete_inode(struct inode *inode)
2445 {
2446         struct ll_inode_info *lli = ll_i2info(inode);
2447         struct address_space *mapping = &inode->i_data;
2448         unsigned long nrpages;
2449         unsigned long flags;
2450
2451         ENTRY;
2452
2453         if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) {
2454                 /* It is last chance to write out dirty pages,
2455                  * otherwise we may lose data while umount.
2456                  *
2457                  * If i_nlink is 0 then just discard data. This is safe because
2458                  * local inode gets i_nlink 0 from server only for the last
2459                  * unlink, so that file is not opened somewhere else
2460                  */
2461                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, inode->i_nlink ?
2462                                    CL_FSYNC_LOCAL : CL_FSYNC_DISCARD, 1);
2463         }
2464         truncate_inode_pages_final(mapping);
2465
2466         /* Workaround for LU-118: Note nrpages may not be totally updated when
2467          * truncate_inode_pages() returns, as there can be a page in the process
2468          * of deletion (inside __delete_from_page_cache()) in the specified
2469          * range. Thus mapping->nrpages can be non-zero when this function
2470          * returns even after truncation of the whole mapping.  Only do this if
2471          * npages isn't already zero.
2472          */
2473         nrpages = mapping->nrpages;
2474         if (nrpages) {
2475                 ll_xa_lock_irqsave(&mapping->i_pages, flags);
2476                 nrpages = mapping->nrpages;
2477                 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
2478         } /* Workaround end */
2479
2480         LASSERTF(nrpages == 0, "%s: inode="DFID"(%p) nrpages=%lu, "
2481                  "see https://jira.whamcloud.com/browse/LU-118\n",
2482                  ll_i2sbi(inode)->ll_fsname,
2483                  PFID(ll_inode2fid(inode)), inode, nrpages);
2484
2485         ll_clear_inode(inode);
2486         clear_inode(inode);
2487
2488         EXIT;
2489 }
2490
2491 int ll_iocontrol(struct inode *inode, struct file *file,
2492                  unsigned int cmd, unsigned long arg)
2493 {
2494         struct ll_sb_info *sbi = ll_i2sbi(inode);
2495         struct ptlrpc_request *req = NULL;
2496         int rc, flags = 0;
2497         ENTRY;
2498
2499         switch (cmd) {
2500         case FS_IOC_GETFLAGS: {
2501                 struct mdt_body *body;
2502                 struct md_op_data *op_data;
2503
2504                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
2505                                              0, 0, LUSTRE_OPC_ANY,
2506                                              NULL);
2507                 if (IS_ERR(op_data))
2508                         RETURN(PTR_ERR(op_data));
2509
2510                 op_data->op_valid = OBD_MD_FLFLAGS;
2511                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
2512                 ll_finish_md_op_data(op_data);
2513                 if (rc) {
2514                         CERROR("%s: failure inode "DFID": rc = %d\n",
2515                                sbi->ll_md_exp->exp_obd->obd_name,
2516                                PFID(ll_inode2fid(inode)), rc);
2517                         RETURN(-abs(rc));
2518                 }
2519
2520                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
2521
2522                 flags = body->mbo_flags;
2523
2524                 ptlrpc_req_finished(req);
2525
2526                 RETURN(put_user(flags, (int __user *)arg));
2527         }
2528         case FS_IOC_SETFLAGS: {
2529                 struct iattr *attr;
2530                 struct md_op_data *op_data;
2531                 struct cl_object *obj;
2532                 struct fsxattr fa = { 0 };
2533
2534                 if (get_user(flags, (int __user *)arg))
2535                         RETURN(-EFAULT);
2536
2537                 fa.fsx_projid = ll_i2info(inode)->lli_projid;
2538                 if (flags & LUSTRE_PROJINHERIT_FL)
2539                         fa.fsx_xflags = FS_XFLAG_PROJINHERIT;
2540
2541                 rc = ll_ioctl_check_project(inode, &fa);
2542                 if (rc)
2543                         RETURN(rc);
2544
2545                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
2546                                              LUSTRE_OPC_ANY, NULL);
2547                 if (IS_ERR(op_data))
2548                         RETURN(PTR_ERR(op_data));
2549
2550                 op_data->op_attr_flags = flags;
2551                 op_data->op_xvalid |= OP_XVALID_FLAGS;
2552                 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
2553                 ll_finish_md_op_data(op_data);
2554                 ptlrpc_req_finished(req);
2555                 if (rc)
2556                         RETURN(rc);
2557
2558                 ll_update_inode_flags(inode, flags);
2559
2560                 obj = ll_i2info(inode)->lli_clob;
2561                 if (obj == NULL)
2562                         RETURN(0);
2563
2564                 OBD_ALLOC_PTR(attr);
2565                 if (attr == NULL)
2566                         RETURN(-ENOMEM);
2567
2568                 rc = cl_setattr_ost(obj, attr, OP_XVALID_FLAGS, flags);
2569
2570                 OBD_FREE_PTR(attr);
2571                 RETURN(rc);
2572         }
2573         default:
2574                 RETURN(-ENOSYS);
2575         }
2576
2577         RETURN(0);
2578 }
2579
2580 int ll_flush_ctx(struct inode *inode)
2581 {
2582         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2583
2584         CDEBUG(D_SEC, "flush context for user %d\n",
2585                from_kuid(&init_user_ns, current_uid()));
2586
2587         obd_set_info_async(NULL, sbi->ll_md_exp,
2588                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2589                            0, NULL, NULL);
2590         obd_set_info_async(NULL, sbi->ll_dt_exp,
2591                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2592                            0, NULL, NULL);
2593         return 0;
2594 }
2595
2596 /* umount -f client means force down, don't save state */
2597 void ll_umount_begin(struct super_block *sb)
2598 {
2599         struct ll_sb_info *sbi = ll_s2sbi(sb);
2600         struct obd_device *obd;
2601         struct obd_ioctl_data *ioc_data;
2602         int cnt;
2603         ENTRY;
2604
2605         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2606                sb->s_count, atomic_read(&sb->s_active));
2607
2608         obd = class_exp2obd(sbi->ll_md_exp);
2609         if (obd == NULL) {
2610                 CERROR("Invalid MDC connection handle %#llx\n",
2611                        sbi->ll_md_exp->exp_handle.h_cookie);
2612                 EXIT;
2613                 return;
2614         }
2615         obd->obd_force = 1;
2616
2617         obd = class_exp2obd(sbi->ll_dt_exp);
2618         if (obd == NULL) {
2619                 CERROR("Invalid LOV connection handle %#llx\n",
2620                        sbi->ll_dt_exp->exp_handle.h_cookie);
2621                 EXIT;
2622                 return;
2623         }
2624         obd->obd_force = 1;
2625
2626         OBD_ALLOC_PTR(ioc_data);
2627         if (ioc_data) {
2628                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2629                               sizeof *ioc_data, ioc_data, NULL);
2630
2631                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2632                               sizeof *ioc_data, ioc_data, NULL);
2633
2634                 OBD_FREE_PTR(ioc_data);
2635         }
2636
2637         /* Really, we'd like to wait until there are no requests outstanding,
2638          * and then continue.  For now, we just periodically checking for vfs
2639          * to decrement mnt_cnt and hope to finish it within 10sec.
2640          */
2641         cnt = 10;
2642         while (cnt > 0 &&
2643                !may_umount(sbi->ll_mnt.mnt)) {
2644                 ssleep(1);
2645                 cnt -= 1;
2646         }
2647
2648         EXIT;
2649 }
2650
2651 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2652 {
2653         struct ll_sb_info *sbi = ll_s2sbi(sb);
2654         char *profilenm = get_profile_name(sb);
2655         int err;
2656         __u32 read_only;
2657
2658         if ((*flags & MS_RDONLY) != (sb->s_flags & SB_RDONLY)) {
2659                 read_only = *flags & MS_RDONLY;
2660                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2661                                          sizeof(KEY_READ_ONLY),
2662                                          KEY_READ_ONLY, sizeof(read_only),
2663                                          &read_only, NULL);
2664                 if (err) {
2665                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2666                                       profilenm, read_only ?
2667                                       "read-only" : "read-write", err);
2668                         return err;
2669                 }
2670
2671                 if (read_only)
2672                         sb->s_flags |= SB_RDONLY;
2673                 else
2674                         sb->s_flags &= ~SB_RDONLY;
2675
2676                 if (sbi->ll_flags & LL_SBI_VERBOSE)
2677                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2678                                       read_only ?  "read-only" : "read-write");
2679         }
2680         return 0;
2681 }
2682
2683 /**
2684  * Cleanup the open handle that is cached on MDT-side.
2685  *
2686  * For open case, the client side open handling thread may hit error
2687  * after the MDT grant the open. Under such case, the client should
2688  * send close RPC to the MDT as cleanup; otherwise, the open handle
2689  * on the MDT will be leaked there until the client umount or evicted.
2690  *
2691  * In further, if someone unlinked the file, because the open handle
2692  * holds the reference on such file/object, then it will block the
2693  * subsequent threads that want to locate such object via FID.
2694  *
2695  * \param[in] sb        super block for this file-system
2696  * \param[in] open_req  pointer to the original open request
2697  */
2698 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2699 {
2700         struct mdt_body                 *body;
2701         struct md_op_data               *op_data;
2702         struct ptlrpc_request           *close_req = NULL;
2703         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2704         ENTRY;
2705
2706         body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
2707         OBD_ALLOC_PTR(op_data);
2708         if (op_data == NULL) {
2709                 CWARN("%s: cannot allocate op_data to release open handle for "
2710                       DFID"\n", ll_s2sbi(sb)->ll_fsname, PFID(&body->mbo_fid1));
2711
2712                 RETURN_EXIT;
2713         }
2714
2715         op_data->op_fid1 = body->mbo_fid1;
2716         op_data->op_open_handle = body->mbo_open_handle;
2717         op_data->op_mod_time = ktime_get_real_seconds();
2718         md_close(exp, op_data, NULL, &close_req);
2719         ptlrpc_req_finished(close_req);
2720         ll_finish_md_op_data(op_data);
2721
2722         EXIT;
2723 }
2724
2725 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2726                   struct super_block *sb, struct lookup_intent *it)
2727 {
2728         struct ll_sb_info *sbi = NULL;
2729         struct lustre_md md = { NULL };
2730         bool default_lmv_deleted = false;
2731         int rc;
2732
2733         ENTRY;
2734
2735         LASSERT(*inode || sb);
2736         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2737         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2738                               sbi->ll_md_exp, &md);
2739         if (rc != 0)
2740                 GOTO(out, rc);
2741
2742         /*
2743          * clear default_lmv only if intent_getattr reply doesn't contain it.
2744          * but it needs to be done after iget, check this early because
2745          * ll_update_lsm_md() may change md.
2746          */
2747         if (it && (it->it_op & (IT_LOOKUP | IT_GETATTR)) &&
2748             S_ISDIR(md.body->mbo_mode) && !md.default_lmv)
2749                 default_lmv_deleted = true;
2750
2751         if (*inode) {
2752                 rc = ll_update_inode(*inode, &md);
2753                 if (rc != 0)
2754                         GOTO(out, rc);
2755         } else {
2756                 LASSERT(sb != NULL);
2757
2758                 /*
2759                  * At this point server returns to client's same fid as client
2760                  * generated for creating. So using ->fid1 is okay here.
2761                  */
2762                 if (!fid_is_sane(&md.body->mbo_fid1)) {
2763                         CERROR("%s: Fid is insane "DFID"\n",
2764                                 sbi->ll_fsname,
2765                                 PFID(&md.body->mbo_fid1));
2766                         GOTO(out, rc = -EINVAL);
2767                 }
2768
2769                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2770                                              sbi->ll_flags & LL_SBI_32BIT_API),
2771                                  &md);
2772                 if (IS_ERR(*inode)) {
2773                         lmd_clear_acl(&md);
2774                         rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2775                         *inode = NULL;
2776                         CERROR("new_inode -fatal: rc %d\n", rc);
2777                         GOTO(out, rc);
2778                 }
2779         }
2780
2781         /* Handling piggyback layout lock.
2782          * Layout lock can be piggybacked by getattr and open request.
2783          * The lsm can be applied to inode only if it comes with a layout lock
2784          * otherwise correct layout may be overwritten, for example:
2785          * 1. proc1: mdt returns a lsm but not granting layout
2786          * 2. layout was changed by another client
2787          * 3. proc2: refresh layout and layout lock granted
2788          * 4. proc1: to apply a stale layout */
2789         if (it != NULL && it->it_lock_mode != 0) {
2790                 struct lustre_handle lockh;
2791                 struct ldlm_lock *lock;
2792
2793                 lockh.cookie = it->it_lock_handle;
2794                 lock = ldlm_handle2lock(&lockh);
2795                 LASSERT(lock != NULL);
2796                 if (ldlm_has_layout(lock)) {
2797                         struct cl_object_conf conf;
2798
2799                         memset(&conf, 0, sizeof(conf));
2800                         conf.coc_opc = OBJECT_CONF_SET;
2801                         conf.coc_inode = *inode;
2802                         conf.coc_lock = lock;
2803                         conf.u.coc_layout = md.layout;
2804                         (void)ll_layout_conf(*inode, &conf);
2805                 }
2806                 LDLM_LOCK_PUT(lock);
2807         }
2808
2809         if (default_lmv_deleted)
2810                 ll_update_default_lsm_md(*inode, &md);
2811
2812         GOTO(out, rc = 0);
2813
2814 out:
2815         /* cleanup will be done if necessary */
2816         md_free_lustre_md(sbi->ll_md_exp, &md);
2817
2818         if (rc != 0 && it != NULL && it->it_op & IT_OPEN) {
2819                 ll_intent_drop_lock(it);
2820                 ll_open_cleanup(sb != NULL ? sb : (*inode)->i_sb, req);
2821         }
2822
2823         return rc;
2824 }
2825
2826 int ll_obd_statfs(struct inode *inode, void __user *arg)
2827 {
2828         struct ll_sb_info *sbi = NULL;
2829         struct obd_export *exp;
2830         char *buf = NULL;
2831         struct obd_ioctl_data *data = NULL;
2832         __u32 type;
2833         int len = 0, rc;
2834
2835         if (!inode || !(sbi = ll_i2sbi(inode)))
2836                 GOTO(out_statfs, rc = -EINVAL);
2837
2838         rc = obd_ioctl_getdata(&buf, &len, arg);
2839         if (rc)
2840                 GOTO(out_statfs, rc);
2841
2842         data = (void*)buf;
2843         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2844             !data->ioc_pbuf1 || !data->ioc_pbuf2)
2845                 GOTO(out_statfs, rc = -EINVAL);
2846
2847         if (data->ioc_inllen1 != sizeof(__u32) ||
2848             data->ioc_inllen2 != sizeof(__u32) ||
2849             data->ioc_plen1 != sizeof(struct obd_statfs) ||
2850             data->ioc_plen2 != sizeof(struct obd_uuid))
2851                 GOTO(out_statfs, rc = -EINVAL);
2852
2853         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2854         if (type & LL_STATFS_LMV)
2855                 exp = sbi->ll_md_exp;
2856         else if (type & LL_STATFS_LOV)
2857                 exp = sbi->ll_dt_exp;
2858         else
2859                 GOTO(out_statfs, rc = -ENODEV);
2860
2861         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2862         if (rc)
2863                 GOTO(out_statfs, rc);
2864 out_statfs:
2865         OBD_FREE_LARGE(buf, len);
2866         return rc;
2867 }
2868
2869 /*
2870  * this is normally called in ll_fini_md_op_data(), but sometimes it needs to
2871  * be called early to avoid deadlock.
2872  */
2873 void ll_unlock_md_op_lsm(struct md_op_data *op_data)
2874 {
2875         if (op_data->op_mea2_sem) {
2876                 up_read_non_owner(op_data->op_mea2_sem);
2877                 op_data->op_mea2_sem = NULL;
2878         }
2879
2880         if (op_data->op_mea1_sem) {
2881                 up_read_non_owner(op_data->op_mea1_sem);
2882                 op_data->op_mea1_sem = NULL;
2883         }
2884 }
2885
2886 /* this function prepares md_op_data hint for passing it down to MD stack. */
2887 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2888                                       struct inode *i1, struct inode *i2,
2889                                       const char *name, size_t namelen,
2890                                       __u32 mode, enum md_op_code opc,
2891                                       void *data)
2892 {
2893         LASSERT(i1 != NULL);
2894
2895         if (name == NULL) {
2896                 /* Do not reuse namelen for something else. */
2897                 if (namelen != 0)
2898                         return ERR_PTR(-EINVAL);
2899         } else {
2900                 if (namelen > ll_i2sbi(i1)->ll_namelen)
2901                         return ERR_PTR(-ENAMETOOLONG);
2902
2903                 if (!lu_name_is_valid_2(name, namelen))
2904                         return ERR_PTR(-EINVAL);
2905         }
2906
2907         if (op_data == NULL)
2908                 OBD_ALLOC_PTR(op_data);
2909
2910         if (op_data == NULL)
2911                 return ERR_PTR(-ENOMEM);
2912
2913         ll_i2gids(op_data->op_suppgids, i1, i2);
2914         op_data->op_fid1 = *ll_inode2fid(i1);
2915         op_data->op_code = opc;
2916
2917         if (S_ISDIR(i1->i_mode)) {
2918                 down_read_non_owner(&ll_i2info(i1)->lli_lsm_sem);
2919                 op_data->op_mea1_sem = &ll_i2info(i1)->lli_lsm_sem;
2920                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
2921                 op_data->op_default_mea1 = ll_i2info(i1)->lli_default_lsm_md;
2922         }
2923
2924         if (i2) {
2925                 op_data->op_fid2 = *ll_inode2fid(i2);
2926                 if (S_ISDIR(i2->i_mode)) {
2927                         if (i2 != i1) {
2928                                 /* i2 is typically a child of i1, and MUST be
2929                                  * further from the root to avoid deadlocks.
2930                                  */
2931                                 down_read_non_owner(&ll_i2info(i2)->lli_lsm_sem);
2932                                 op_data->op_mea2_sem =
2933                                                 &ll_i2info(i2)->lli_lsm_sem;
2934                         }
2935                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
2936                 }
2937         } else {
2938                 fid_zero(&op_data->op_fid2);
2939         }
2940
2941         if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
2942                 op_data->op_cli_flags |= CLI_HASH64;
2943
2944         if (ll_need_32bit_api(ll_i2sbi(i1)))
2945                 op_data->op_cli_flags |= CLI_API32;
2946
2947         op_data->op_name = name;
2948         op_data->op_namelen = namelen;
2949         op_data->op_mode = mode;
2950         op_data->op_mod_time = ktime_get_real_seconds();
2951         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2952         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2953         op_data->op_cap = cfs_curproc_cap_pack();
2954         op_data->op_mds = 0;
2955         if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
2956              filename_is_volatile(name, namelen, &op_data->op_mds)) {
2957                 op_data->op_bias |= MDS_CREATE_VOLATILE;
2958         }
2959         op_data->op_data = data;
2960
2961         return op_data;
2962 }
2963
2964 void ll_finish_md_op_data(struct md_op_data *op_data)
2965 {
2966         ll_unlock_md_op_lsm(op_data);
2967         security_release_secctx(op_data->op_file_secctx,
2968                                 op_data->op_file_secctx_size);
2969         OBD_FREE_PTR(op_data);
2970 }
2971
2972 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2973 {
2974         struct ll_sb_info *sbi;
2975
2976         LASSERT(seq && dentry);
2977         sbi = ll_s2sbi(dentry->d_sb);
2978
2979         if (sbi->ll_flags & LL_SBI_NOLCK)
2980                 seq_puts(seq, ",nolock");
2981
2982         /* "flock" is the default since 2.13, but it wasn't for many years,
2983          * so it is still useful to print this to show it is enabled.
2984          * Start to print "noflock" so it is now clear when flock is disabled.
2985          */
2986         if (sbi->ll_flags & LL_SBI_FLOCK)
2987                 seq_puts(seq, ",flock");
2988         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2989                 seq_puts(seq, ",localflock");
2990         else
2991                 seq_puts(seq, ",noflock");
2992
2993         if (sbi->ll_flags & LL_SBI_USER_XATTR)
2994                 seq_puts(seq, ",user_xattr");
2995
2996         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2997                 seq_puts(seq, ",lazystatfs");
2998
2999         if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
3000                 seq_puts(seq, ",user_fid2path");
3001
3002         if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
3003                 seq_puts(seq, ",always_ping");
3004
3005         if (ll_sbi_has_test_dummy_encryption(sbi))
3006                 seq_puts(seq, ",test_dummy_encryption");
3007
3008         if (ll_sbi_has_encrypt(sbi))
3009                 seq_puts(seq, ",encrypt");
3010         else
3011                 seq_puts(seq, ",noencrypt");
3012
3013         RETURN(0);
3014 }
3015
3016 /**
3017  * Get obd name by cmd, and copy out to user space
3018  */
3019 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
3020 {
3021         struct ll_sb_info *sbi = ll_i2sbi(inode);
3022         struct obd_device *obd;
3023         ENTRY;
3024
3025         if (cmd == OBD_IOC_GETDTNAME)
3026                 obd = class_exp2obd(sbi->ll_dt_exp);
3027         else if (cmd == OBD_IOC_GETMDNAME)
3028                 obd = class_exp2obd(sbi->ll_md_exp);
3029         else
3030                 RETURN(-EINVAL);
3031
3032         if (!obd)
3033                 RETURN(-ENOENT);
3034
3035         if (copy_to_user((void __user *)arg, obd->obd_name,
3036                          strlen(obd->obd_name) + 1))
3037                 RETURN(-EFAULT);
3038
3039         RETURN(0);
3040 }
3041
3042 static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
3043 {
3044         char *path = NULL;
3045
3046         struct path p;
3047
3048         p.dentry = dentry;
3049         p.mnt = current->fs->root.mnt;
3050         path_get(&p);
3051         path = d_path(&p, buf, bufsize);
3052         path_put(&p);
3053         return path;
3054 }
3055
3056 void ll_dirty_page_discard_warn(struct page *page, int ioret)
3057 {
3058         char *buf, *path = NULL;
3059         struct dentry *dentry = NULL;
3060         struct inode *inode = page->mapping->host;
3061
3062         /* this can be called inside spin lock so use GFP_ATOMIC. */
3063         buf = (char *)__get_free_page(GFP_ATOMIC);
3064         if (buf != NULL) {
3065                 dentry = d_find_alias(page->mapping->host);
3066                 if (dentry != NULL)
3067                         path = ll_d_path(dentry, buf, PAGE_SIZE);
3068         }
3069
3070         /* The below message is checked in recovery-small.sh test_24b */
3071         CDEBUG(D_WARNING,
3072                "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
3073                "(rc %d)\n", ll_i2sbi(inode)->ll_fsname,
3074                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
3075                PFID(ll_inode2fid(inode)),
3076                (path && !IS_ERR(path)) ? path : "", ioret);
3077
3078         if (dentry != NULL)
3079                 dput(dentry);
3080
3081         if (buf != NULL)
3082                 free_page((unsigned long)buf);
3083 }
3084
3085 ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
3086                         struct lov_user_md **kbuf)
3087 {
3088         struct lov_user_md      lum;
3089         ssize_t                 lum_size;
3090         ENTRY;
3091
3092         if (copy_from_user(&lum, md, sizeof(lum)))
3093                 RETURN(-EFAULT);
3094
3095         lum_size = ll_lov_user_md_size(&lum);
3096         if (lum_size < 0)
3097                 RETURN(lum_size);
3098
3099         OBD_ALLOC_LARGE(*kbuf, lum_size);
3100         if (*kbuf == NULL)
3101                 RETURN(-ENOMEM);
3102
3103         if (copy_from_user(*kbuf, md, lum_size) != 0) {
3104                 OBD_FREE_LARGE(*kbuf, lum_size);
3105                 RETURN(-EFAULT);
3106         }
3107
3108         RETURN(lum_size);
3109 }
3110
3111 /*
3112  * Compute llite root squash state after a change of root squash
3113  * configuration setting or add/remove of a lnet nid
3114  */
3115 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
3116 {
3117         struct root_squash_info *squash = &sbi->ll_squash;
3118         int i;
3119         bool matched;
3120         struct lnet_process_id id;
3121
3122         /* Update norootsquash flag */
3123         spin_lock(&squash->rsi_lock);
3124         if (list_empty(&squash->rsi_nosquash_nids))
3125                 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3126         else {
3127                 /* Do not apply root squash as soon as one of our NIDs is
3128                  * in the nosquash_nids list */
3129                 matched = false;
3130                 i = 0;
3131                 while (LNetGetId(i++, &id) != -ENOENT) {
3132                         if (id.nid == LNET_NID_LO_0)
3133                                 continue;
3134                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
3135                                 matched = true;
3136                                 break;
3137                         }
3138                 }
3139                 if (matched)
3140                         sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
3141                 else
3142                         sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
3143         }
3144         spin_unlock(&squash->rsi_lock);
3145 }
3146
3147 /**
3148  * Parse linkea content to extract information about a given hardlink
3149  *
3150  * \param[in]   ldata      - Initialized linkea data
3151  * \param[in]   linkno     - Link identifier
3152  * \param[out]  parent_fid - The entry's parent FID
3153  * \param[out]  ln         - Entry name destination buffer
3154  *
3155  * \retval 0 on success
3156  * \retval Appropriate negative error code on failure
3157  */
3158 static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
3159                             struct lu_fid *parent_fid, struct lu_name *ln)
3160 {
3161         unsigned int    idx;
3162         int             rc;
3163         ENTRY;
3164
3165         rc = linkea_init_with_rec(ldata);
3166         if (rc < 0)
3167                 RETURN(rc);
3168
3169         if (linkno >= ldata->ld_leh->leh_reccount)
3170                 /* beyond last link */
3171                 RETURN(-ENODATA);
3172
3173         linkea_first_entry(ldata);
3174         for (idx = 0; ldata->ld_lee != NULL; idx++) {
3175                 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
3176                                     parent_fid);
3177                 if (idx == linkno)
3178                         break;
3179
3180                 linkea_next_entry(ldata);
3181         }
3182
3183         if (idx < linkno)
3184                 RETURN(-ENODATA);
3185
3186         RETURN(0);
3187 }
3188
3189 /**
3190  * Get parent FID and name of an identified link. Operation is performed for
3191  * a given link number, letting the caller iterate over linkno to list one or
3192  * all links of an entry.
3193  *
3194  * \param[in]     file - File descriptor against which to perform the operation
3195  * \param[in,out] arg  - User-filled structure containing the linkno to operate
3196  *                       on and the available size. It is eventually filled with
3197  *                       the requested information or left untouched on error
3198  *
3199  * \retval - 0 on success
3200  * \retval - Appropriate negative error code on failure
3201  */
3202 int ll_getparent(struct file *file, struct getparent __user *arg)
3203 {
3204         struct inode            *inode = file_inode(file);
3205         struct linkea_data      *ldata;
3206         struct lu_buf            buf = LU_BUF_NULL;
3207         struct lu_name           ln;
3208         struct lu_fid            parent_fid;
3209         __u32                    linkno;
3210         __u32                    name_size;
3211         int                      rc;
3212
3213         ENTRY;
3214
3215         if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
3216             !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
3217                 RETURN(-EPERM);
3218
3219         if (get_user(name_size, &arg->gp_name_size))
3220                 RETURN(-EFAULT);
3221
3222         if (get_user(linkno, &arg->gp_linkno))
3223                 RETURN(-EFAULT);
3224
3225         if (name_size > PATH_MAX)
3226                 RETURN(-EINVAL);
3227
3228         OBD_ALLOC(ldata, sizeof(*ldata));
3229         if (ldata == NULL)
3230                 RETURN(-ENOMEM);
3231
3232         rc = linkea_data_new(ldata, &buf);
3233         if (rc < 0)
3234                 GOTO(ldata_free, rc);
3235
3236         rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
3237                            buf.lb_len, OBD_MD_FLXATTR);
3238         if (rc < 0)
3239                 GOTO(lb_free, rc);
3240
3241         rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
3242         if (rc < 0)
3243                 GOTO(lb_free, rc);
3244
3245         if (ln.ln_namelen >= name_size)
3246                 GOTO(lb_free, rc = -EOVERFLOW);
3247
3248         if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid)))
3249                 GOTO(lb_free, rc = -EFAULT);
3250
3251         if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen))
3252                 GOTO(lb_free, rc = -EFAULT);
3253
3254         if (put_user('\0', arg->gp_name + ln.ln_namelen))
3255                 GOTO(lb_free, rc = -EFAULT);
3256
3257 lb_free:
3258         lu_buf_free(&buf);
3259 ldata_free:
3260         OBD_FREE(ldata, sizeof(*ldata));
3261
3262         RETURN(rc);
3263 }